Open j2u123 opened 6 months ago
I think I ran into that. You should try setting the cmake option -DLLVM_TARGETS_TO_BUILD="host;NVPTX;AMDGPU"
. You can look at command ran by the buildbot:
https://github.com/openai/triton/blob/4650e0782bfa95c037a3d13ba44107e6979f3499/.github/workflows/llvm-build.yml#L96
If you find a solution and feel like updating the readme, feel free to send a PR :)
I'm running into a very similar issue after building LLVM (from source) and Triton (again, from source).
python -c "import triton as t"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "triton-dev/python/triton/__init__.py", line 8, in <module>
from .runtime import (
File "triton-dev/python/triton/runtime/__init__.py", line 1, in <module>
from .autotuner import (Autotuner, Config, Heuristics, autotune, heuristics)
File "triton-dev/python/triton/runtime/autotuner.py", line 9, in <module>
from ..testing import do_bench, do_bench_cudagraph
File "triton-dev/python/triton/testing.py", line 7, in <module>
from . import language as tl
File "triton-dev/python/triton/language/__init__.py", line 4, in <module>
from . import math
File "triton-dev/python/triton/language/math.py", line 1, in <module>
from . import core
File "triton-dev/python/triton/language/core.py", line 9, in <module>
from ..runtime.jit import jit
File "triton-dev/python/triton/runtime/jit.py", line 12, in <module>
from ..runtime.driver import driver
File "triton-dev/python/triton/runtime/driver.py", line 1, in <module>
from ..backends import backends
File "triton-dev/python/triton/backends/__init__.py", line 50, in <module>
backends = _discover_backends()
File "triton-dev/python/triton/backends/__init__.py", line 43, in _discover_backends
compiler = _load_module(name, os.path.join(root, name, 'compiler.py'))
File "triton-dev/python/triton/backends/__init__.py", line 12, in _load_module
spec.loader.exec_module(module)
File "triton-dev/python/triton/backends/nvidia/compiler.py", line 2, in <module>
from triton._C.libtriton import ir, passes, llvm, nvidia
ImportError: triton-dev/python/triton/_C/libtriton.so: undefined symbol: LLVMInitializeRISCVAsmParser
I checked my llvm build and the missing symbol is actually found in $LLVM_BUILD_DIR/lib. I'm wondering why Triton is unable to find it.
It appears that the issue stems from the python/src/llvm.cc, which includes llvm/Support/TargetSelect.h . This file brings in all of LLVM's targets, whereas the CMake configuration only adds the necessary targets as dependencies.
Here is a workaroud.
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 32ef8af26..136277ac1 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -26,6 +26,7 @@ option(TRITON_BUILD_TUTORIALS "Build C++ Triton tutorials" ON)
option(TRITON_BUILD_PYTHON_MODULE "Build Python Triton bindings" OFF)
option(TRITON_BUILD_PROTON "Build the Triton Proton profiler" ON)
option(TRITON_BUILD_UT "Build C++ Triton Unit Tests" ON)
+option(TRITON_BUILD_WITH_ALL_LLVM_COMPONENTS "Build C++ Triton with all llvm components" ON)
set(TRITON_CODEGEN_BACKENDS "" CACHE STRING "Enable different codegen backends")
# Ensure Python3 vars are set correctly
@@ -253,6 +254,12 @@ if(TRITON_BUILD_PYTHON_MODULE)
${PYTHON_SRC_PATH}/llvm.cc)
# Link triton with its dependencies
+ if (TRITON_BUILD_WITH_ALL_LLVM_COMPONENTS)
+ execute_process(COMMAND llvm-config --libfiles
+ OUTPUT_VARIABLE LLVM_LIBS OUTPUT_STRIP_TRAILING_WHITESPACE)
+ string(REPLACE " " ";" LLVM_LIBS "${LLVM_LIBS}")
+ list(APPEND TRITON_LIBRARIES ${LLVM_LIBS})
+ endif()
target_link_libraries(triton PUBLIC ${TRITON_LIBRARIES})
if(WIN32)
target_link_libraries(triton PRIVATE ${CMAKE_DL_LIBS})
After following the 'Building with a custom LLVM' instructions, I build and install Triton successfully; and I could run a simple vector_add triton program. Then I add one print statement in the Triton source code, and build and install again; but now, when I run this simple vector_add triton program again, the 'import triton' causes error.
Traceback (most recent call last): File "triton/tutorial/add_ir/add.py", line 2, in
import triton
File "workspace/triton/python/triton/init.py", line 8, in
from .runtime import (
File "workspace/triton/python/triton/runtime/init.py", line 1, in
from .autotuner import (Autotuner, Config, Heuristics, OutOfResources, autotune, heuristics)
File "workspace/triton/python/triton/runtime/autotuner.py", line 7, in
from ..testing import do_bench
File "workspace/triton/python/triton/testing.py", line 7, in
from . import language as tl
File "workspace/triton/python/triton/language/init.py", line 4, in
from . import math
File "workspace/triton/python/triton/language/math.py", line 1, in
from . import core
File "workspace/triton/python/triton/language/core.py", line 9, in
from ..runtime.jit import jit
File "workspace/triton/python/triton/runtime/jit.py", line 11, in
from ..runtime.driver import driver
File "workspace/triton/python/triton/runtime/driver.py", line 1, in
from ..backends import backends
File "workspace/triton/python/triton/backends/init.py", line 50, in
backends = _discover_backends()
^^^^^^^^^^^^^^^^^^^^
File "workspace/triton/python/triton/backends/init.py", line 43, in _discover_backends
compiler = _load_module(name, os.path.join(root, name, 'compiler.py'))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "workspace/triton/python/triton/backends/init.py", line 12, in _load_module
spec.loader.exec_module(module)
File "workspace/triton/python/triton/backends/nvidia/compiler.py", line 2, in
from triton._C.libtriton import ir, passes, llvm, nvidia
ImportError: workspace/triton/python/triton/_C/libtriton.so: undefined symbol: LLVMInitializeSparcTarget
Do I rebuild in a wrong way? Or do I miss something important?