I am looking to upgrade our sonarqube instance from version 7.0 to 7.9.6 before going to 8.9.
I have copied across the conf files for wrapper.conf and sonar.properties along with using oracle jdk11. My database is PostgreSQL 9.6.8
es.log
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [analysis-common]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [lang-painless]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [mapper-extras]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [parent-join]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [percolator]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [reindex]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [repository-url]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] loaded module [transport-netty4]
2021.10.05 16:59:40 INFO es[][o.e.p.PluginsService] no plugins loaded
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [force_merge], size [1], queue size [unbounded]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [fetch_shard_started], core [1], max [4], keep alive [5m]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [listener], size [1], queue size [unbounded]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [index], size [2], queue size [200]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [refresh], core [1], max [1], keep alive [5m]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [generic], core [4], max [128], keep alive [30s]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [warmer], core [1], max [1], keep alive [5m]
2021.10.05 16:59:40 DEBUG es[][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] thread pool [sonarqube/search] will adjust queue by [50] when determining automatic queue size
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [search], size [4], queue size [1k]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [flush], core [1], max [1], keep alive [5m]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [fetch_shard_store], core [1], max [4], keep alive [5m]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [management], core [1], max [5], keep alive [5m]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [get], size [2], queue size [1k]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [analyze], size [1], queue size [16]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [write], size [2], queue size [200]
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [snapshot], core [1], max [1], keep alive [5m]
2021.10.05 16:59:40 DEBUG es[][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] thread pool [sonarqube/search_throttled] will adjust queue by [50] when determining automatic queue size
2021.10.05 16:59:40 DEBUG es[][o.e.t.ThreadPool] created thread pool: name [search_throttled], size [1], queue size [100]
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent0] -Dio.netty.noUnsafe: true
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent0] sun.misc.Unsafe: unavailable (io.netty.noUnsafe)
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent0] Java version: 11
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent0] java.nio.DirectByteBuffer.<init>(long, int): unavailable
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent] maxDirectMemory: 519438336 bytes (maybe)
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent] -Dio.netty.tmpdir: /opt/sonarqube-7.9.6/temp (java.io.tmpdir)
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent] -Dio.netty.bitMode: 64 (sun.arch.data.model)
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent] -Dio.netty.maxDirectMemory: -1 bytes
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent] -Dio.netty.uninitializedArrayAllocationThreshold: -1
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.CleanerJava9] java.nio.ByteBuffer.cleaner(): unavailable
java.lang.UnsupportedOperationException: sun.misc.Unsafe unavailable
at io.netty.util.internal.CleanerJava9.<clinit>(CleanerJava9.java:68) [netty-common-4.1.32.Final.jar:4.1.32.Final]
at io.netty.util.internal.PlatformDependent.<clinit>(PlatformDependent.java:172) [netty-common-4.1.32.Final.jar:4.1.32.Final]
at io.netty.util.ConstantPool.<init>(ConstantPool.java:32) [netty-common-4.1.32.Final.jar:4.1.32.Final]
at io.netty.util.AttributeKey$1.<init>(AttributeKey.java:27) [netty-common-4.1.32.Final.jar:4.1.32.Final]
at io.netty.util.AttributeKey.<clinit>(AttributeKey.java:27) [netty-common-4.1.32.Final.jar:4.1.32.Final]
at org.elasticsearch.transport.netty4.Netty4Transport.<clinit>(Netty4Transport.java:219) [transport-netty4-client-6.8.0.jar:6.8.0]
at org.elasticsearch.transport.Netty4Plugin.getSettings(Netty4Plugin.java:57) [transport-netty4-client-6.8.0.jar:6.8.0]
at org.elasticsearch.plugins.PluginsService.lambda$getPluginSettings$0(PluginsService.java:89) [elasticsearch-6.8.0.jar:6.8.0]
at java.util.stream.ReferencePipeline$7$1.accept(ReferencePipeline.java:271) [?:?]
at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1655) [?:?]
at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) [?:?]
at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) [?:?]
at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:913) [?:?]
at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) [?:?]
at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:578) [?:?]
at org.elasticsearch.plugins.PluginsService.getPluginSettings(PluginsService.java:89) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.node.Node.<init>(Node.java:356) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.node.Node.<init>(Node.java:266) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:212) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:212) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:333) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:159) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:150) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:86) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:124) [elasticsearch-cli-6.8.0.jar:6.8.0]
at org.elasticsearch.cli.Command.main(Command.java:90) [elasticsearch-cli-6.8.0.jar:6.8.0]
at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:116) [elasticsearch-6.8.0.jar:6.8.0]
at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:93) [elasticsearch-6.8.0.jar:6.8.0]
2021.10.05 16:59:40 DEBUG es[][i.n.u.i.PlatformDependent] -Dio.netty.noPreferDirect: true
2021.10.05 16:59:43 DEBUG es[][o.e.s.ScriptService] using script cache with max_size [100], expire [0s]
2021.10.05 16:59:43 WARN es[][o.e.d.c.s.Settings] [http.enabled] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.
2021.10.05 16:59:44 DEBUG es[][o.e.m.j.JvmGcMonitorService] enabled [true], interval [1s], gc_threshold [{default=GcThreshold{name='default', warnThreshold=10000, infoThreshold=5000, debugThreshold=2000}, young=GcThreshold{name='young', warnThreshold=1000, infoThreshold=700, debugThreshold=400}, old=GcThreshold{name='old', warnThreshold=10000, infoThreshold=5000, debugThreshold=2000}}], overhead [50, 25, 10]
2021.10.05 16:59:44 DEBUG es[][o.e.m.o.OsService] using refresh_interval [1s]
2021.10.05 16:59:44 DEBUG es[][o.e.m.p.ProcessService] using refresh_interval [1s]
2021.10.05 16:59:44 DEBUG es[][o.e.m.j.JvmService] using refresh_interval [1s]
2021.10.05 16:59:44 DEBUG es[][o.e.m.f.FsService] using refresh_interval [1s]
2021.10.05 16:59:44 DEBUG es[][o.e.c.r.a.d.ClusterRebalanceAllocationDecider] using [cluster.routing.allocation.allow_rebalance] with [indices_all_active]
2021.10.05 16:59:44 DEBUG es[][o.e.c.r.a.d.ConcurrentRebalanceAllocationDecider] using [cluster_concurrent_rebalance] with [2]
2021.10.05 16:59:44 DEBUG es[][o.e.c.r.a.d.ThrottlingAllocationDecider] using node_concurrent_outgoing_recoveries [2], node_concurrent_incoming_recoveries [2], node_initial_primaries_recoveries [4]
2021.10.05 16:59:45 DEBUG es[][o.e.i.IndicesQueryCache] using [node] query cache with size [49.5mb] max filter count [10000]
2021.10.05 16:59:45 DEBUG es[][o.e.i.IndexingMemoryController] using indexing buffer size [49.5mb] with indices.memory.shard_inactive_time [5m], indices.memory.interval [5s]
2021.10.05 16:59:45 DEBUG es[][o.e.g.GatewayMetaState] took 0s to load state
2021.10.05 16:59:45 DEBUG es[][o.e.d.z.SettingsBasedHostsProvider] using initial hosts [127.0.0.1, [::1]]
2021.10.05 16:59:45 INFO es[][o.e.d.DiscoveryModule] using discovery type [zen] and host providers [settings]
2021.10.05 16:59:45 DEBUG es[][o.e.d.z.UnicastZenPing] using concurrent_connects [10], resolve_timeout [5s]
2021.10.05 16:59:45 DEBUG es[][o.e.d.z.ElectMasterService] using minimum_master_nodes [1]
2021.10.05 16:59:45 DEBUG es[][o.e.d.z.ZenDiscovery] using ping_timeout [3s], join.timeout [1m], master_election.ignore_non_master [false]
2021.10.05 16:59:45 DEBUG es[][o.e.d.z.MasterFaultDetection] [master] uses ping_interval [1s], ping_timeout [30s], ping_retries [3]
2021.10.05 16:59:45 DEBUG es[][o.e.d.z.NodesFaultDetection] [node ] uses ping_interval [1s], ping_timeout [30s], ping_retries [3]
2021.10.05 16:59:46 DEBUG es[][o.e.i.r.RecoverySettings] using max_bytes_per_sec[40mb]
2021.10.05 16:59:46 INFO es[][o.e.n.Node] initialized
2021.10.05 16:59:46 INFO es[][o.e.n.Node] starting ...
2021.10.05 16:59:46 DEBUG es[][i.n.c.MultithreadEventLoopGroup] -Dio.netty.eventLoopThreads: 4
2021.10.05 16:59:47 DEBUG es[][i.n.c.n.NioEventLoop] -Dio.netty.noKeySetOptimization: true
2021.10.05 16:59:47 DEBUG es[][i.n.c.n.NioEventLoop] -Dio.netty.selectorAutoRebuildThreshold: 512
2021.10.05 16:59:47 DEBUG es[][i.n.u.i.PlatformDependent] org.jctools-core.MpscChunkedArrayQueue: unavailable
2021.10.05 16:59:47 DEBUG es[][o.e.t.n.Netty4Transport] using profile[default], worker_count[4], port[9001], bind_host[[127.0.0.1]], publish_host[[127.0.0.1]], receive_predictor[64kb->64kb]
2021.10.05 16:59:47 DEBUG es[][o.e.t.TcpTransport] binding server bootstrap to: [127.0.0.1]
2021.10.05 16:59:47 DEBUG es[][i.n.c.DefaultChannelId] -Dio.netty.processId: 15815 (auto-detected)
2021.10.05 16:59:47 DEBUG es[][i.n.u.NetUtil] -Djava.net.preferIPv4Stack: false
2021.10.05 16:59:47 DEBUG es[][i.n.u.NetUtil] -Djava.net.preferIPv6Addresses: false
2021.10.05 16:59:47 DEBUG es[][i.n.u.NetUtil] Loopback interface: lo (lo, 0:0:0:0:0:0:0:1%lo)
2021.10.05 16:59:47 DEBUG es[][i.n.u.NetUtil] /proc/sys/net/core/somaxconn: 128
2021.10.05 16:59:47 DEBUG es[][i.n.c.DefaultChannelId] -Dio.netty.machineId: 00:0d:3a:ff:fe:60:2a:70 (auto-detected)
2021.10.05 16:59:47 DEBUG es[][i.n.u.i.InternalThreadLocalMap] -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024
2021.10.05 16:59:47 DEBUG es[][i.n.u.i.InternalThreadLocalMap] -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096
2021.10.05 16:59:47 DEBUG es[][i.n.u.ResourceLeakDetector] -Dio.netty.leakDetection.level: simple
2021.10.05 16:59:47 DEBUG es[][i.n.u.ResourceLeakDetector] -Dio.netty.leakDetection.targetRecords: 4
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.numHeapArenas: 4
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.numDirectArenas: 4
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.pageSize: 8192
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.maxOrder: 11
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.chunkSize: 16777216
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.tinyCacheSize: 512
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.smallCacheSize: 256
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.normalCacheSize: 64
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.maxCachedBufferCapacity: 32768
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.cacheTrimInterval: 8192
2021.10.05 16:59:47 DEBUG es[][i.n.b.PooledByteBufAllocator] -Dio.netty.allocator.useCacheForAllThreads: true
2021.10.05 16:59:47 DEBUG es[][i.n.b.ByteBufUtil] -Dio.netty.allocator.type: pooled
2021.10.05 16:59:47 DEBUG es[][i.n.b.ByteBufUtil] -Dio.netty.threadLocalDirectBufferSize: 0
2021.10.05 16:59:47 DEBUG es[][i.n.b.ByteBufUtil] -Dio.netty.maxThreadLocalCharBufferSize: 16384
2021.10.05 16:59:47 DEBUG es[][o.e.t.TcpTransport] Bound profile [default] to address {127.0.0.1:9001}
2021.10.05 16:59:47 INFO es[][o.e.t.TransportService] publish_address {127.0.0.1:9001}, bound_addresses {127.0.0.1:9001}
2021.10.05 16:59:47 INFO es[][o.e.b.BootstrapChecks] explicitly enforcing bootstrap checks
2021.10.05 16:59:47 ERROR es[][o.e.b.Bootstrap] node validation exception
[1] bootstrap checks failed
[1]: max number of threads [2048] for user [sonar] is too low, increase to at least [4096]
2021.10.05 16:59:47 INFO es[][o.e.n.Node] stopping ...
2021.10.05 16:59:47 INFO es[][o.e.n.Node] stopped
2021.10.05 16:59:47 INFO es[][o.e.n.Node] closing ...
2021.10.05 16:59:47 INFO es[][o.e.n.Node] closed