MaskRay / ccls

C/C++/ObjC language server supporting cross references, hierarchies, completion and semantic highlighting
Apache License 2.0
3.76k stars 258 forks source link

LLVM ERROR: out of memory #320

Closed 3038922 closed 5 years ago

3038922 commented 5 years ago
Server CWD is d:\turningPointV5
11:10:07              ..\src\messages\initialize.cc:271 I initialize in directory D:/turningPointV5 with uri file:///d%3A/turningPointV5
11:10:07              ..\src\messages\initialize.cc:294 I initializationOptions: {"compilationDatabaseCommand":"","compilationDatabaseDirectory":"","cache":{"directory":"c:/ccls-cache","format":"binary","hierarchicalPath":true,"retainInMemory":1},"capabilities":{"documentOnTypeFormattingProvider":{"firstTriggerCharacter":"}","moreTriggerCharacter":[]},"foldingRangeProvider":true,"workspace":{"workspaceFolders":{"supported":true,"changeNotifications":true}}},"clang":{"excludeArgs":[],"extraArgs":[],"pathMappings":[],"resourceDir":""},"client":{"hierarchicalDocumentSymbolSupport":true,"linkSupport":true,"snippetSupport":true},"codeLens":{"localVariables":false},"completion":{"caseSensitivity":2,"detailedLabel":false,"dropOldRequests":true,"duplicateOptional":false,"filterAndSort":true,"include":{"blacklist":[],"maxPathSize":37,"suffixWhitelist":[".h",".hpp",".hh",".inc",".h",".hpp",".hh"],"whitelist":[]},"maxNum":100},"diagnostics":{"blacklist":[],"onChange":1000,"onOpen":0,"onSave":0,"spellChecking":true,"whitelist":[]},"highlight":{"largeFileSize":2097152,"lsRanges":true,"blacklist":[],"whitelist":[]},"index":{"blacklist":[],"comments":2,"initialBlacklist":[],"initialWhitelist":[],"maxInitializerLines":15,"multiVersion":0,"multiVersionBlacklist":[],"multiVersionWhitelist":[],"onChange":false,"threads":0,"trackDependency":2,"whitelist":[]},"request":{"timeout":5000},"session":{"maxNum":10},"workspaceSymbol":{"caseSensitivity":1,"maxNum":1000,"sort":true},"xref":{"maxNum":2000}}
11:10:07              ..\src\messages\initialize.cc:323 I use -resource-dir=C:\llvm\Release\lib\clang\9.0.0
11:10:07              ..\src\messages\initialize.cc:344 I add workspace folder turningPointV5: D:/turningPointV5/
11:10:07              ..\src\project.cc:397 I loaded D:/turningPointV5/compile_commands.json
11:10:07              ..\src\messages\initialize.cc:370 I start 32 indexers
11:10:07              ..\src\messages\initialize.cc:378 I dispatch initial index requests
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/userAuto/xuanxuanAuto.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/userAuto/birdAuto.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/userAuto/bearAuto.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/userAuto/sheepAuto.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/opcontrol.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/initialize.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/chassis/chassisAutoAiming.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/system/visionData.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/userDisplay/odomPage.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/chassis/chassis.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/chassis/chassisOdom.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/userDisplay/userDisplay.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/generic/misc.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/autonomous.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/device/motor.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/pid/pid.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/userDisplay/maintenanceInfo.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/generic/lift.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/userDisplay/setConfig.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/generic/head.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/userDisplay/visionPage.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/pid/klPid.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/userDisplay/sysInfo.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/system/sysBase.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/generic/generic.cpp
11:10:07              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/generic/intake.cpp
11:10:08              ..\src\pipeline.cc:292 I load cache for D:/turningPointV5/src/ncrapi/system/logger.cpp
11:10:08              ..\src\pipeline.cc:292 I load cache for D:/turningPointV5/src/ncrapi/util/odometry.cpp
11:10:08              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/userAuto/pigAuto.cpp
11:10:08              ..\src\pipeline.cc:292 I load cache for D:/turningPointV5/src/ncrapi/device/vision.cpp
11:10:08              ..\src\pipeline.cc:292 I load cache for D:/turningPointV5/src/ncrapi/device/adi.cpp
11:10:08              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/userDisplay/pidPage.cpp
11:10:08              ..\src\pipeline.cc:292 I load cache for D:/turningPointV5/src/ncrapi/userDisplay/debug.cpp
11:10:08              ..\src\pipeline.cc:292 I load cache for D:/turningPointV5/src/ncrapi/userDisplay/startPage.cpp
11:10:08              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/userAuto/fishAuto.cpp
11:10:08              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/userAuto/boboAuto.cpp
11:10:08              ..\src\pipeline.cc:292 I load cache for D:/turningPointV5/src/ncrapi/util/timer.cpp
11:10:08              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/generic/shoot.cpp
11:10:08              ..\src\pipeline.cc:337 I parse D:/turningPointV5/src/ncrapi/system/sysUser.cpp
LLVM ERROR: out of memory
11:10:11              ..\src\indexer.cc:1289 E clang crashed for D:/turningPointV5/src/ncrapi/device/motor.cpp
LLVM ERROR: out of memory
ccls server connection was closed
[Error - 上午11:10:11] Connection to server got closed. Server will not be restarted.
    "ccls.launch.command": "c:/ccls/Release/ccls.exe",
    "ccls.cache.directory": "c:/ccls-cache",
    "ccls.completion.enableSnippetInsertion": true, 
    "ccls.highlighting.enabled.types": true,
    "ccls.highlighting.enabled.freeStandingFunctions": true,
    "ccls.highlighting.enabled.memberFunctions": true,
    "ccls.highlighting.enabled.freeStandingVariables": true,
    "ccls.highlighting.enabled.memberVariables": true,
    "ccls.highlighting.enabled.namespaces": true,
    "ccls.highlighting.enabled.macros": true,
    "ccls.highlighting.enabled.enums": true,
    "ccls.highlighting.enabled.typeAliases": true,
    "ccls.highlighting.enabled.enumConstants": true,
    "ccls.highlighting.enabled.staticMemberFunctions": true,
    "ccls.highlighting.enabled.parameters": true,
    "ccls.highlighting.enabled.templateParameters": true,
    "ccls.highlighting.enabled.staticMemberVariables": true,
    "ccls.highlighting.enabled.globalVariables": true,

It's been all right before. After updating LLVM and CLLS, that's it.

MaskRay commented 5 years ago

As you see (LLVM ERROR: out of memory), the ccls process uses too much memory.

This is not necessarily a llvm/clang/ccls problem. If your project is large, higher memory consumption is expected.

The peak memory usage happens when ccls is indexing your project with index.threads threads. You may set index.threads to a smaller positive number (default: 0, which means std::hardware_concurrency()).

You can also change the initialization option cache.retainInMemory to 0 (default: 1) to save some memory.

3038922 commented 5 years ago

index.threads

my pc is "AMD 2950X 32G " This is LLVM's problem?

FirstLoveLife commented 5 years ago

I'm curious about msvc's performance on your project

rbresalier commented 5 years ago

I think ccls should be smarter about how it chooses the number of threads to use. I have this same issue in #316 , except I don't get a nice "Out of memory" error, ccls goes into uninterruptible sleep state.

ccls currently uses std::hardware_concurrency threads() to determine number of threads, but it could be smarter and look at how much memory is available in the system to be smart about limiting the number of threads in case using std::hardware_concurrency threads() would exhaust the memory.

MaskRay commented 5 years ago

@rbresalier https://github.com/MaskRay/ccls/issues/316#issuecomment-472634405 You passed it wrong :(

ccls currently uses std::hardware_concurrency threads() to determine number of threads, but it could be smarter and look at how much memory is available in the system to be smart about limiting the number of threads in case using std::hardware_concurrency threads() would exhaust the memory.

This is as difficult as determining the best -j number for make and ninja. Well, memory can be saved by reducing the parallelism, but it also harms performance. I don't think this can be smartly determined. The current choice (Ninja default) isn't bad. Users can change it if desired.

rbresalier commented 5 years ago

The current choice (Ninja default) isn't bad.

I disagree. It is bad because users get either "Out of memory" error or uinterruptible sleep with the default behavior. In my machine with 72 threads, default ccls behavior will always cause uninterruptible sleep. I don't think it is ever acceptable for any program to crash like this.

The default behavior doesn't have to be perfect. Yes it is difficult to determine the very best number of threads to use - but I think the default behavior should strive to make a balance between the best choice and not crashing. If a user really wants to achieve better than the default then they can play with the number of threads. I truly believe the default should not cause crashes with no hint of what the problem is.

rbresalier commented 5 years ago

I also think the ccls algorithms are using too much memory. With the same code base, but using cquery instead, I observe cquery using up to 113 indexer threads and using 5.28GByte in my system. My system has 256GByte and 72 cores. So why does ccls run out of memory, 256GByte with 72 threads while cquery can live with 5.28GByte and 113 indexer threads?

MaskRay commented 5 years ago

I also think the ccls algorithms are using too much memory.

There could be other issues we haven't diagnosed. The filesystem can have problems. Clang may take significantly more memory parsing some code patterns. Check your cgroup memory.swappiness, etc. I don't believe ccls can use so much more memory than cquery. You may compare ccls/src/indexer.hh ccls/src/query.hh and cquery/src/indexer.h cquery/src/query.h.

Yes it is difficult to determine the very best number of threads to use - but I think the default behavior should strive to make a balance between the best choice and not crashing. If a user really wants to achieve better than the default then they can play with the number of threads.

They should tune the index.threads initialization option, just as they should tune -j when building. Many people working on large projects really want their projects to be indexed quicker and are fine with the ccls defaults. @3038922 and you made complaints. I appreciate that but I don't see it is a widely agreed opinion among users. And from my own experience this doesn't matter that much. There are users with 56 threads, too.

I disagree. It is bad because users get either "Out of memory" error or uinterruptible sleep with the default behavior. In my machine with 72 threads, default ccls behavior will always cause uninterruptible sleep. I don't think it is ever acceptable for any program to crash like this.

I disagree that uninterruptible sleep was ccls's fault.

ninja would also choke your system. The peak memory usage happens when indexers (ccls: direct clangIndex; cquery: indirect clangIndex via libclang) are parsing/analyzing/indexing the code. The retained memory (ccls: query.h; cquery: query.hh) isn't the dominating factor.

cache.retainInMemory: 1

Really appreciated if you can give some numbers about how much saving it makes by changing it to 0. I added this option and defaulted it to 1 to make this specific workflow more reliable: some users spawn several editors accessing the same project (thinking of multiple nvim processes). It increases some memory but most users don't deal with huge projects and they can surely live with the increase.

Don't get me wrong. I take memory usage seriously. If you read my old commits in cquery, you shall notice changes like: changing usr from std::string to Usr (uint64_t), adding Maybe to replace optional, deleting/merging fields, etc.

  const double grow = 1.3;
  size_t t;

  if ((t = funcs.size() + u->funcs_hint) > funcs.capacity()) {
    t = size_t(t * grow);
    funcs.reserve(t);
    func_usr.reserve(t);
  }

If you are really motivated, try some older versions (especially those releases before last October; some fields had to be added for new features).

src/indexer.cc:1266. Change some to false, etc. ccls uses clangIndex directly, which has less bookkeeping compared with libclang.

  index::IndexingOptions IndexOpts;
  IndexOpts.SystemSymbolFilter =
      index::IndexingOptions::SystemSymbolFilterKind::All;
  IndexOpts.IndexFunctionLocals = true;
  IndexOpts.IndexImplicitInstantiation = true;
#if LLVM_VERSION_MAJOR >= 9
  IndexOpts.IndexParametersInDeclarations = true;
  IndexOpts.IndexTemplateParameters = true;
#endif
rbresalier commented 5 years ago

There could be other issues we haven't diagnosed.

I agree and I think one of these issues should be reopened. With same code base, cquery does not have uninterruptible sleep with 113 threads, yet ccls does with default settings (72 threads on my system). Reducing threads in ccls makes the issue go away, but since cquery can spawn 113 threads, ccls should be able to spawn 72 threads on my system.

Maybe you can try to recreate the issue on your system by telling ccls to spawn 72 threads (even though you have less CPU cores) and see what happens? And then you can try to get cquery to spawn 113 threads like I have in my system.

I disagree that uninterruptible sleep was ccls's fault.

This is not about assigning fault. Most applications perform validation on user's input that would break the application and let user know. For example, if application asks me a question to answer yes/no, and I answer "maybe", application will tell me I made an error.

Similarly if user asks ccls for too many threads that could cause a crash, it would be much more user friendly to let the user know instead of allowing the user's input to crash the system with no clue as to how to fix it. As a new user to ccls, my first experience as uninterruptible sleep and I had no idea why - I know nothing about number of threads. It would be nice to know why.

They should tune the index.threads initialization option

But how would a user even know to do this? I was a new user of ccls, I knew nothing about it. I tried it with its default settings and it went into uninterruptible sleep. To find out that I had to change # of threads, I had to raise an issue on github and have lots of discussion. If ccls just told me about a potential issue I would have saved so much time.

Would be much more user friendly to tell or warn the user instead of just crashing and the user, who knows nothing about ccls, has no clue why. It is not about assigning fault.

Anyway, this is probably a moot issue because I think there is an un-diagnosed problem and ccls should work with 72 threads on my system, just because cquery works with 113 threads. If that issue is solved, I wouldn't need to reduce the number of threads.

rbresalier commented 5 years ago

I tried retainInMemory: 0. Very quickly after I open gVim I see error in gVim: "Language server cpp exited unexpectedly: failed to fill the whole buffer" and ccls is dead. With retainInMemory:1, I get uinterruptible sleep state. I had to move to latest release, 0.20190308.1, in order to get the retainInMemory option, it was not available in the 0.20181225.8 release I was using earlier.

MaskRay commented 5 years ago

Again, I don't think the #316 OOM is a ccls issue. Answered in https://github.com/MaskRay/ccls/issues/316#issuecomment-473156229 but replied here for one question

retainInMemory: 0. Very quickly after I open gVim I see error in gVim: "Language server cpp exited unexpectedly: failed to fill the whole buffer" and ccls is dead

There might be some versions using cache.retainInMemory: boolean. It is int now. 0.20190308.1 is good. The language client should also tell you that ccls had emitted an error message before it exited. For type mismatch, it exits immediately.

MaskRay commented 5 years ago

The commit "Add initialization option index.initialNoLinkage: false" should decrease memory usage a lot.

HyperWinX commented 6 months ago

Same problem. I use Arch Linux machine based on FX-8350 and 16GB of RAM. I added "-fsanitize=address" to CFLAGS/CXXFLAGS variables in /etc/makepkg.conf, and compiled ccls-git AUR package. When i work in VSC, every save (probably) CCLS runs several more jobs. After some time i have 200-250 jobs already. Project has several hundreds of lines of code, but CCLS uses 1.5GB of RAM and 30TB of virtual memory. Then it crashed, and i got report of CCLS and AddressSanitizer.

=================================================================
==21974==ERROR: AddressSanitizer: requested allocation size 0x7e750f672170 (0x7e750f673170 after adjustments for alignment, red zones etc.) exceeds maximum supported size of 0x10000000000 (thread T5)
    #0 0x5d678ae3dd89 in malloc (/usr/bin/ccls+0x126d89) (BuildId: 26bd40cd47bdaebbefcf51c149c7159af78c7241)
    #1 0x7e750fedec48 in llvm::SmallVectorBase<unsigned long>::grow_pod(void*, unsigned long, unsigned long) (/usr/lib/libLLVM-17.so+0x6dec48) (BuildId: 23cd725c53bbcb975bb284794c263d7ee44cbd7f)

==21974==HINT: if you don't care about these errors you may set allocator_may_return_null=1
SUMMARY: AddressSanitizer: allocation-size-too-big (/usr/bin/ccls+0x126d89) (BuildId: 26bd40cd47bdaebbefcf51c149c7159af78c7241) in malloc
Thread T5 created by T0 here:
    #0 0x5d678ae34fa8 in pthread_create (/usr/bin/ccls+0x11dfa8) (BuildId: 26bd40cd47bdaebbefcf51c149c7159af78c7241)
    #1 0x5d678af8ac5c in ccls::spawnThread(void* (*)(void*), void*) /usr/src/debug/ccls-git/ccls/src/platform_posix.cc:75:3
    #2 0x5d678affa7bd in ccls::SemaManager::SemaManager(ccls::Project*, ccls::WorkingFiles*, std::function<void (std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::vector<ccls::Diagnostic, std::allocator<ccls::Diagnostic>>)>, std::function<void (ccls::RequestId)>) /usr/src/debug/ccls-git/ccls/src/sema_manager.cc:762:3
    #3 0x5d678af70ea1 in ccls::pipeline::mainLoop() /usr/src/debug/ccls-git/ccls/src/pipeline.cc:625:15
    #4 0x5d678aea3610 in main /usr/src/debug/ccls-git/ccls/src/main.cc:146:7
    #5 0x7e750f241ccf  (/usr/lib/libc.so.6+0x25ccf) (BuildId: 6542915cee3354fbcf2b3ac5542201faec43b5c9)

==21974==ABORTING
ccls server connection was closed
[Error - 7:06:19 PM] Connection to server got closed. Server will not be restarted.
[Error - 7:06:19 PM] Request textDocument/completion failed.
Error: Connection got disposed.
    at Object.dispose (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/main.js:904:25)
    at Object.dispose (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/client.js:74:35)
    at LanguageClient.handleConnectionClosed (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/client.js:2309:42)
    at LanguageClient.handleConnectionClosed (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/main.js:155:15)
    at closeHandler (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/client.js:2296:18)
    at CallbackList.invoke (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:62:39)
    at Emitter.fire (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:121:36)
    at closeHandler (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/main.js:240:26)
    at CallbackList.invoke (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:62:39)
    at Emitter.fire (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:121:36)
    at StreamMessageWriter.fireClose (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/messageWriter.js:39:27)
    at Socket.<anonymous> (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/messageWriter.js:58:46)
    at Socket.emit (node:events:517:28)
    at Pipe.<anonymous> (node:net:350:12)

Yes, 2024, and this bug is still present

MaskRay commented 5 months ago

Same problem. I use Arch Linux machine based on FX-8350 and 16GB of RAM. I added "-fsanitize=address" to CFLAGS/CXXFLAGS variables in /etc/makepkg.conf, and compiled ccls-git AUR package. When i work in VSC, every save (probably) CCLS runs several more jobs. After some time i have 200-250 jobs already. Project has several hundreds of lines of code, but CCLS uses 1.5GB of RAM and 30TB of virtual memory. Then it crashed, and i got report of CCLS and AddressSanitizer.

=================================================================
==21974==ERROR: AddressSanitizer: requested allocation size 0x7e750f672170 (0x7e750f673170 after adjustments for alignment, red zones etc.) exceeds maximum supported size of 0x10000000000 (thread T5)
    #0 0x5d678ae3dd89 in malloc (/usr/bin/ccls+0x126d89) (BuildId: 26bd40cd47bdaebbefcf51c149c7159af78c7241)
    #1 0x7e750fedec48 in llvm::SmallVectorBase<unsigned long>::grow_pod(void*, unsigned long, unsigned long) (/usr/lib/libLLVM-17.so+0x6dec48) (BuildId: 23cd725c53bbcb975bb284794c263d7ee44cbd7f)

==21974==HINT: if you don't care about these errors you may set allocator_may_return_null=1
SUMMARY: AddressSanitizer: allocation-size-too-big (/usr/bin/ccls+0x126d89) (BuildId: 26bd40cd47bdaebbefcf51c149c7159af78c7241) in malloc
Thread T5 created by T0 here:
    #0 0x5d678ae34fa8 in pthread_create (/usr/bin/ccls+0x11dfa8) (BuildId: 26bd40cd47bdaebbefcf51c149c7159af78c7241)
    #1 0x5d678af8ac5c in ccls::spawnThread(void* (*)(void*), void*) /usr/src/debug/ccls-git/ccls/src/platform_posix.cc:75:3
    #2 0x5d678affa7bd in ccls::SemaManager::SemaManager(ccls::Project*, ccls::WorkingFiles*, std::function<void (std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::vector<ccls::Diagnostic, std::allocator<ccls::Diagnostic>>)>, std::function<void (ccls::RequestId)>) /usr/src/debug/ccls-git/ccls/src/sema_manager.cc:762:3
    #3 0x5d678af70ea1 in ccls::pipeline::mainLoop() /usr/src/debug/ccls-git/ccls/src/pipeline.cc:625:15
    #4 0x5d678aea3610 in main /usr/src/debug/ccls-git/ccls/src/main.cc:146:7
    #5 0x7e750f241ccf  (/usr/lib/libc.so.6+0x25ccf) (BuildId: 6542915cee3354fbcf2b3ac5542201faec43b5c9)

==21974==ABORTING
ccls server connection was closed
[Error - 7:06:19 PM] Connection to server got closed. Server will not be restarted.
[Error - 7:06:19 PM] Request textDocument/completion failed.
Error: Connection got disposed.
  at Object.dispose (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/main.js:904:25)
  at Object.dispose (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/client.js:74:35)
  at LanguageClient.handleConnectionClosed (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/client.js:2309:42)
  at LanguageClient.handleConnectionClosed (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/main.js:155:15)
  at closeHandler (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-languageclient/lib/client.js:2296:18)
  at CallbackList.invoke (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:62:39)
  at Emitter.fire (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:121:36)
  at closeHandler (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/main.js:240:26)
  at CallbackList.invoke (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:62:39)
  at Emitter.fire (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/events.js:121:36)
  at StreamMessageWriter.fireClose (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/messageWriter.js:39:27)
  at Socket.<anonymous> (/home/hyperwin/.vscode-oss/extensions/ccls-project.ccls-0.1.29-universal/node_modules/vscode-jsonrpc/lib/messageWriter.js:58:46)
  at Socket.emit (node:events:517:28)
  at Pipe.<anonymous> (node:net:350:12)

Yes, 2024, and this bug is still present

I don't think this is a ccls bug. If you build ccls with -fsanitize=address, you'd need to build llvm/clang with -fsanitize=address as well to avoid ABI mismatches.