[package - main-i386-default][misc/llama-cpp] Failed for llama-cpp-3616 in build
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sat, 24 Aug 2024 04:33:48 UTC
You are receiving this mail as a port that you maintain is failing to build on the FreeBSD package build server. Please investigate the failure and submit a PR to fix build. Maintainer: yuri@FreeBSD.org Log URL: https://pkg-status.freebsd.org/beefy17/data/main-i386-default/pa760f6876277_s5cbb98c8259/logs/llama-cpp-3616.log Build URL: https://pkg-status.freebsd.org/beefy17/build.html?mastername=main-i386-default&build=pa760f6876277_s5cbb98c8259 Log: =>> Building misc/llama-cpp build started at Sat Aug 24 04:31:23 UTC 2024 port directory: /usr/ports/misc/llama-cpp package name: llama-cpp-3616 building for: FreeBSD main-i386-default-job-06 15.0-CURRENT FreeBSD 15.0-CURRENT 1500023 i386 maintained by: yuri@FreeBSD.org Makefile datestamp: -rw-r--r-- 1 root wheel 1116 Aug 24 01:01 /usr/ports/misc/llama-cpp/Makefile Ports top last git commit: a760f687627 Ports top unclean checkout: no Port dir last git commit: 94655493eaa Port dir unclean checkout: no Poudriere version: poudriere-git-3.4.1-36-ge6fe6143 Host OSVERSION: 1500019 Jail OSVERSION: 1500023 Job Id: 06 !!! Jail is newer than host. (Jail: 1500023, Host: 1500019) !!! !!! This is not supported. !!! !!! Host kernel must be same or newer than jail. !!! !!! Expect build failures. !!! ---Begin Environment--- SHELL=/bin/sh BLOCKSIZE=K MAIL=/var/mail/root MM_CHARSET=UTF-8 LANG=C.UTF-8 OSVERSION=1500023 STATUS=1 HOME=/root PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin MAKE_OBJDIR_CHECK_WRITABLE=0 UNAME_m=i386 UNAME_p=i386 UNAME_r=15.0-CURRENT LOCALBASE=/usr/local UNAME_v=FreeBSD 15.0-CURRENT 1500023 USER=root POUDRIERE_NAME=poudriere-git LIBEXECPREFIX=/usr/local/libexec/poudriere POUDRIERE_VERSION=3.4.1-36-ge6fe6143 MASTERMNT=/usr/local/poudriere/data/.m/main-i386-default/ref LC_COLLATE=C POUDRIERE_BUILD_TYPE=bulk PACKAGE_BUILDING=yes SAVED_TERM= OUTPUT_REDIRECTED_STDERR=4 OUTPUT_REDIRECTED=1 PWD=/usr/local/poudriere/data/.m/main-i386-default/06/.p OUTPUT_REDIRECTED_STDOUT=3 P_PORTS_FEATURES=FLAVORS SUBPACKAGES SELECTED_OPTIONS MASTERNAME=main-i386-default SCRIPTPREFIX=/usr/local/share/poudriere SCRIPTNAME=bulk.sh OLDPWD=/usr/local/poudriere/data/.m/main-i386-default/ref/.p/pool POUDRIERE_PKGNAME=poudriere-git-3.4.1-36-ge6fe6143 SCRIPTPATH=/usr/local/share/poudriere/bulk.sh POUDRIEREPATH=/usr/local/bin/poudriere ---End Environment--- ---Begin Poudriere Port Flags/Env--- PORT_FLAGS= PKGENV= FLAVOR= MAKE_ARGS= ---End Poudriere Port Flags/Env--- ---Begin OPTIONS List--- ===> The following configuration options are available for llama-cpp-3616: EXAMPLES=on: Build and/or install examples VULKAN=on: Vulkan GPU offload support ===> Use 'make config' to modify these settings ---End OPTIONS List--- --MAINTAINER-- yuri@FreeBSD.org --End MAINTAINER-- --CONFIGURE_ARGS-- --End CONFIGURE_ARGS-- --CONFIGURE_ENV-- PYTHON="/usr/local/bin/python3.11" XDG_DATA_HOME=/wrkdirs/usr/ports/misc/llama-cpp/work XDG_CONFIG_HOME=/wrkdirs/usr/ports/misc/llama-cpp/work XDG_CACHE_HOME=/wrkdirs/usr/ports/misc/llama-cpp/work/.cache HOME=/wrkdirs/usr/ports/misc/llama-cpp/work TMPDIR="/tmp" PATH=/wrkdirs/usr/ports/misc/llama-cpp/work/.bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin PKG_CONFIG_LIBDIR=/wrkdirs/usr/ports/misc/llama-cpp/work/.pkgconfig:/usr/local/libdata/pkgconfig:/usr/local/share/pkgconfig:/usr/libdata/pkgconfig SHELL=/bin/sh CONFIG_SHELL=/bin/sh --End CONFIGURE_ENV-- --MAKE_ENV-- NINJA_STATUS="[%p %s/%t] " XDG_DATA_HOME=/wrkdirs/usr/ports/misc/llama-cpp/work XDG_CONFIG_HOME=/wrkdirs/usr/ports/misc/llama-cpp/work XDG_CACHE_HOME=/wrkdirs/usr/ports/misc/llama-cpp/work/.cache HOME=/wrkdirs/usr/ports/misc/llama-cpp/work TMPDIR="/tmp" PATH=/wrkdirs/usr/ports/misc/llama-cpp/work/.bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin PKG_CONFIG_LIBDIR=/wrkdirs/usr/ports/misc/llama-cpp/work/.pkgconfig:/usr/local/libdata/pkgconfig:/usr/local/share/pkgconfig:/usr/libdata/pkgconfig MK_DEBUG_FILES=no MK_KERNEL_SYMBOLS=no SHELL=/bin/sh NO_LINT=YES DESTDIR=/wrkdirs/usr/ports/misc/llama-cpp/work/stage PREFIX=/usr/local LOCALBASE=/usr/local CC="cc" CFLAGS="-O2 -pipe -fstack-protector-strong -fno-strict-aliasing " CPP="cpp" CPPFLAGS="" LDFLAGS=" -fstack-protector-strong " LIBS="" CXX="c++" CXXFLAGS="-O2 -pipe -fstack-protector-strong -fno-strict-aliasing " BSD_INSTALL_PROGRAM="install -s -m 555" BSD_INSTALL_LIB="install -s -m 0644" B SD_INSTALL_SCRIPT="install -m 555" BSD_INSTALL_DATA="install -m 0644" BSD_INSTALL_MAN="install -m 444" --End MAKE_ENV-- --PLIST_SUB-- PORTEXAMPLES="" EXAMPLES="" NO_EXAMPLES="@comment " VULKAN="" NO_VULKAN="@comment " CMAKE_BUILD_TYPE="release" PYTHON_INCLUDEDIR=include/python3.11 PYTHON_LIBDIR=lib/python3.11 PYTHON_PLATFORM=freebsd15 PYTHON_SITELIBDIR=lib/python3.11/site-packages PYTHON_SUFFIX=311 PYTHON_EXT_SUFFIX=.cpython-311 PYTHON_VER=3.11 PYTHON_VERSION=python3.11 PYTHON2="@comment " PYTHON3="" OSREL=15.0 PREFIX=%D LOCALBASE=/usr/local RESETPREFIX=/usr/local LIB32DIR=lib DOCSDIR="share/doc/llama-cpp" EXAMPLESDIR="share/examples/llama-cpp" DATADIR="share/llama-cpp" WWWDIR="www/llama-cpp" ETCDIR="etc/llama-cpp" --End PLIST_SUB-- --SUB_LIST-- EXAMPLES="" NO_EXAMPLES="@comment " VULKAN="" NO_VULKAN="@comment " PYTHON_INCLUDEDIR=/usr/local/include/python3.11 PYTHON_LIBDIR=/usr/local/lib/python3.11 PYTHON_PLATFORM=freebsd15 PYTHON_SITELIBDIR=/usr/local/lib/python3.11/site-packages PYTHON_SUFFIX=311 PYTHON_EXT_SUFFIX=.cpython-311 PYTHON_VER=3.11 PYTHON_VERSION=python3.11 PYTHON2="@comment " PYTHON3="" PREFIX=/usr/local LOCALBASE=/usr/local DATADIR=/usr/local/share/llama-cpp DOCSDIR=/usr/local/share/doc/llama-cpp EXAMPLESDIR=/usr/local/share/examples/llama-cpp WWWDIR=/usr/local/www/llama-cpp ETCDIR=/usr/local/etc/llama-cpp --End SUB_LIST-- ---Begin make.conf--- # XXX: We really need this but cannot use it while 'make checksum' does not # try the next mirror on checksum failure. It currently retries the same # failed mirror and then fails rather then trying another. It *does* # try the next if the size is mismatched though. #MASTER_SITE_FREEBSD=yes # Build ALLOW_MAKE_JOBS_PACKAGES with 3 jobs MAKE_JOBS_NUMBER=3 USE_PACKAGE_DEPENDS=yes BATCH=yes WRKDIRPREFIX=/wrkdirs PORTSDIR=/usr/ports PACKAGES=/packages DISTDIR=/distfiles PACKAGE_BUILDING=yes PACKAGE_BUILDING_FLAVORS=yes MACHINE=i386 MACHINE_ARCH=i386 ARCH=${MACHINE_ARCH} #### #### # XXX: We really need this but cannot use it while 'make checksum' does not # try the next mirror on checksum failure. It currently retries the same # failed mirror and then fails rather then trying another. It *does* # try the next if the size is mismatched though. #MASTER_SITE_FREEBSD=yes # Build ALLOW_MAKE_JOBS_PACKAGES with 3 jobs MAKE_JOBS_NUMBER=3 #### Misc Poudriere #### .include "/etc/make.conf.ports_env" GID=0 UID=0 ---End make.conf--- --Resource limits-- cpu time (seconds, -t) unlimited file size (512-blocks, -f) unlimited data seg size (kbytes, -d) 524288 stack size (kbytes, -s) 65536 core file size (512-blocks, -c) unlimited max memory size (kbytes, -m) unlimited locked memory (kbytes, -l) unlimited max user processes (-u) 89999 open files (-n) 8192 virtual mem size (kbytes, -v) unlimited swap limit (kbytes, -w) unlimited socket buffer size (bytes, -b) unlimited pseudo-terminals (-p) unlimited kqueues (-k) unlimited umtx shared locks (-o) unlimited --End resource limits-- =======================<phase: check-sanity >============================ ===== env: NO_DEPENDS=yes USER=root UID=0 GID=0 ===> License MIT accepted by the user =========================================================================== =======================<phase: pkg-depends >============================ ===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0 ===> llama-cpp-3616 depends on file: /usr/local/sbin/pkg - not found ===> Installing existing package /packages/All/pkg-1.21.3.pkg [main-i386-default-job-06] Installing pkg-1.21.3... [main-i386-default-job-06] Extracting pkg-1.21.3: .......... done ===> llama-cpp-3616 depends on file: /usr/local/sbin/pkg - found ===> Returning to build of llama-cpp-3616 =========================================================================== =======================<phase: fetch-depends >============================ ===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0 =========================================================================== =======================<phase: fetch >============================ ===== env: NO_DEPENDS=yes USER=root UID=0 GID=0 ===> License MIT accepted by the user => ggerganov-llama.cpp-b3616_GH0.tar.gz doesn't seem to exist in /portdistfiles/. => Attempting to fetch https://codeload.github.com/ggerganov/llama.cpp/tar.gz/b3616?dummy=/ggerganov-llama.cpp-b3616_GH0.tar.gz fetch: https://codeload.github.com/ggerganov/llama.cpp/tar.gz/b3616?dummy=/ggerganov-llama.cpp-b3616_GH0.tar.gz: size unknown fetch: https://codeload.github.com/ggerganov/llama.cpp/tar.gz/b3616?dummy=/ggerganov-llama.cpp-b3616_GH0.tar.gz: size of remote file is not known ggerganov-llama.cpp-b3616_GH0.tar.gz 18 MB 7972 kBps 02s ===> Fetching all distfiles required by llama-cpp-3616 for building =========================================================================== =======================<phase: checksum >============================ ===== env: NO_DEPENDS=yes USER=root UID=0 GID=0 ===> License MIT accepted by the user ===> Fetching all distfiles required by llama-cpp-3616 for building => SHA256 Checksum OK for ggerganov-llama.cpp-b3616_GH0.tar.gz. => SHA256 Checksum OK for nomic-ai-kompute-4565194_GH0.tar.gz. =========================================================================== =======================<phase: extract-depends>============================ ===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0 =========================================================================== =======================<phase: extract >============================ ===== env: NO_DEPENDS=yes USER=root UID=0 GID=0 ===> License MIT accepted by the user ===> Fetching all distfiles required by llama-cpp-3616 for building ===> Extracting for llama-cpp-3616 => SHA256 Checksum OK for ggerganov-llama.cpp-b3616_GH0.tar.gz. => SHA256 Checksum OK for nomic-ai-kompute-4565194_GH0.tar.gz. =========================================================================== =======================<phase: patch-depends >============================ ===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0 =========================================================================== <snip> ===> Returning to build of llama-cpp-3616 ===> llama-cpp-3616 depends on file: /usr/local/bin/cmake - not found ===> Installing existing package /packages/All/cmake-core-3.30.2.pkg [main-i386-default-job-06] Installing cmake-core-3.30.2... [main-i386-default-job-06] `-- Installing expat-2.6.2... [main-i386-default-job-06] `-- Extracting expat-2.6.2: .......... done [main-i386-default-job-06] `-- Installing jsoncpp-1.9.5... [main-i386-default-job-06] `-- Extracting jsoncpp-1.9.5: .......... done [main-i386-default-job-06] `-- Installing libuv-1.48.0... [main-i386-default-job-06] `-- Extracting libuv-1.48.0: .......... done [main-i386-default-job-06] `-- Installing rhash-1.4.4_1... [main-i386-default-job-06] | `-- Installing gettext-runtime-0.22.5... [main-i386-default-job-06] | | `-- Installing indexinfo-0.3.1... [main-i386-default-job-06] | | `-- Extracting indexinfo-0.3.1: .... done [main-i386-default-job-06] | `-- Extracting gettext-runtime-0.22.5: .......... done [main-i386-default-job-06] `-- Extracting rhash-1.4.4_1: .......... done [main-i386-default-job-06] Extracting cmake-core-3.30.2: .......... done ===> llama-cpp-3616 depends on file: /usr/local/bin/cmake - found ===> Returning to build of llama-cpp-3616 ===> llama-cpp-3616 depends on executable: ninja - not found ===> Installing existing package /packages/All/ninja-1.11.1,4.pkg [main-i386-default-job-06] Installing ninja-1.11.1,4... [main-i386-default-job-06] `-- Installing python311-3.11.9_1... [main-i386-default-job-06] | `-- Installing libffi-3.4.6... [main-i386-default-job-06] | `-- Extracting libffi-3.4.6: .......... done [main-i386-default-job-06] | `-- Installing mpdecimal-4.0.0... [main-i386-default-job-06] | `-- Extracting mpdecimal-4.0.0: .......... done [main-i386-default-job-06] | `-- Installing readline-8.2.10... [main-i386-default-job-06] | `-- Extracting readline-8.2.10: .......... done [main-i386-default-job-06] `-- Extracting python311-3.11.9_1: .......... done [main-i386-default-job-06] Extracting ninja-1.11.1,4: ........ done ===== Message from python311-3.11.9_1: -- Note that some standard Python modules are provided as separate ports as they require additional dependencies. They are available as: py311-gdbm databases/py-gdbm@py311 py311-sqlite3 databases/py-sqlite3@py311 py311-tkinter x11-toolkits/py-tkinter@py311 ===> llama-cpp-3616 depends on executable: ninja - found ===> Returning to build of llama-cpp-3616 =========================================================================== =======================<phase: lib-depends >============================ ===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0 ===> llama-cpp-3616 depends on shared library: libvulkan.so - not found ===> Installing existing package /packages/All/vulkan-loader-1.3.293.pkg [main-i386-default-job-06] Installing vulkan-loader-1.3.293... [main-i386-default-job-06] `-- Installing libX11-1.8.9,1... [main-i386-default-job-06] | `-- Installing libxcb-1.17.0... [main-i386-default-job-06] | | `-- Installing libXau-1.0.11... [main-i386-default-job-06] | | `-- Extracting libXau-1.0.11: .......... done [main-i386-default-job-06] | | `-- Installing libXdmcp-1.1.5... [main-i386-default-job-06] | | `-- Installing xorgproto-2024.1... [main-i386-default-job-06] | | `-- Extracting xorgproto-2024.1: .......... done [main-i386-default-job-06] | | `-- Extracting libXdmcp-1.1.5: ......... done [main-i386-default-job-06] | `-- Extracting libxcb-1.17.0: .......... done [main-i386-default-job-06] `-- Extracting libX11-1.8.9,1: .......... done [main-i386-default-job-06] `-- Installing libXrandr-1.5.4... [main-i386-default-job-06] | `-- Installing libXext-1.3.6,1... [main-i386-default-job-06] | `-- Extracting libXext-1.3.6,1: .......... done [main-i386-default-job-06] | `-- Installing libXrender-0.9.11... [main-i386-default-job-06] | `-- Extracting libXrender-0.9.11: .......... done [main-i386-default-job-06] `-- Extracting libXrandr-1.5.4: .......... done [main-i386-default-job-06] `-- Installing wayland-1.23.0... [main-i386-default-job-06] | `-- Installing libepoll-shim-0.0.20240608... [main-i386-default-job-06] | `-- Extracting libepoll-shim-0.0.20240608: .......... done [main-i386-default-job-06] | `-- Installing libxml2-2.11.8... [main-i386-default-job-06] | `-- Extracting libxml2-2.11.8: .......... done [main-i386-default-job-06] `-- Extracting wayland-1.23.0: .......... done [main-i386-default-job-06] Extracting vulkan-loader-1.3.293: .......... done ===== Message from wayland-1.23.0: -- Wayland requires XDG_RUNTIME_DIR to be defined to a path that will contain "wayland-%d" unix(4) sockets. This is usually handled by consolekit2 (via ck-launch-session) or pam_xdg (via login). ===> llama-cpp-3616 depends on shared library: libvulkan.so - found (/usr/local/lib/libvulkan.so) ===> Returning to build of llama-cpp-3616 =========================================================================== =======================<phase: configure >============================ ===== env: NO_DEPENDS=yes USER=root UID=0 GID=0 ===> Configuring for llama-cpp-3616 ===> Performing out-of-source build /bin/mkdir -p /wrkdirs/usr/ports/misc/llama-cpp/work/.build -- The C compiler identification is Clang 18.1.6 -- The CXX compiler identification is Clang 18.1.6 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /wrkdirs/usr/ports/misc/llama-cpp/work/.bin/git -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed -- Found Threads: TRUE -- Found OpenMP_C: -fopenmp=libomp (found version "5.1") -- Found OpenMP_CXX: -fopenmp=libomp (found version "5.1") -- Found OpenMP: TRUE (found version "5.1") -- OpenMP found -- Using llamafile -- Found Vulkan: /usr/local/lib/libvulkan.so (found version "1.3.293") found components: glslc missing components: glslangValidator -- Vulkan found -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with GGML_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: i386 -- Unknown architecture -- Looking for pthread_create in pthreads -- Looking for pthread_create in pthreads - not found -- Looking for pthread_create in pthread -- Looking for pthread_create in pthread - found CMake Warning at common/CMakeLists.txt:30 (message): Git repository not found; to enable automatic generation of build info, make sure Git is installed and the project is a Git repository. -- Configuring done (2.4s) -- Generating done (0.2s) CMake Warning: Manually-specified variables were not used by the project: BOOST_PYTHON_SUFFIX CMAKE_COLOR_MAKEFILE CMAKE_MODULE_LINKER_FLAGS CMAKE_VERBOSE_MAKEFILE FETCHCONTENT_FULLY_DISCONNECTED Python3_EXECUTABLE Python_ADDITIONAL_VERSIONS Python_EXECUTABLE -- Build files have been written to: /wrkdirs/usr/ports/misc/llama-cpp/work/.build =========================================================================== =======================<phase: build >============================ ===== env: NO_DEPENDS=yes USER=root UID=0 GID=0 ===> Building for llama-cpp-3616 [ 0% 3/127] cd /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616 && /usr/local/bin/cmake -DMSVC= -DCMAKE_C_COMPILER_VERSION=18.1.6 -DCMAKE_C_COMPILER_ID=Clang -DCMAKE_VS_PLATFORM_NAME= -DCMAKE_C_COMPILER=/usr/bin/cc -P /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/common/cmake/build-info-gen-cpp.cmake -- Found Git: /wrkdirs/usr/ports/misc/llama-cpp/work/.bin/git [ 1% 4/127] /usr/bin/c++ -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -fPIC -MD -MT common/CMakeFiles/build_info.dir/build-info.cpp.o -MF common/CMakeFiles/build_info.dir/build-info.cpp.o.d -o common/CMakeFiles/build_info.dir/build-info.cpp.o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/common/build-info.cpp [ 2% 5/127] /usr/bin/cc -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples/gguf-hash/deps -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -MD -MT examples/gguf-hash/CMakeFiles/sha1.dir/deps/sha1/sha1.c.o -MF examples/gguf-hash/CMakeFiles/sha1.dir/deps/sha1/sha1.c.o.d -o examples/gguf-hash/CMakeFiles/sha1.dir/deps/sha1/sha1.c.o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples/gguf-hash/deps/sha1/sha1.c [ 3% 6/127] /usr/bin/cc -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples/gguf-hash/deps -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -MD -MT examples/gguf-hash/CMakeFiles/sha256.dir/deps/sha256/sha256.c.o -MF examples/gguf-hash/CMakeFiles/sha256.dir/deps/sha256/sha256.c.o.d -o examples/gguf-hash/CMakeFiles/sha256.dir/deps/sha256/sha256.c.o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples/gguf-hash/deps/sha256/sha256.c [ 3% 6/127] /usr/bin/cc -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples/gguf-hash/deps -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -MD -MT examples/gguf-hash/CMakeFiles/xxhash.dir/deps/xxhash/xxhash.c.o -MF examples/gguf-hash/CMakeFiles/xxhash.dir/deps/xxhash/xxhash.c.o.d -o examples/gguf-hash/CMakeFiles/xxhash.dir/deps/xxhash/xxhash.c.o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/examples/gguf-hash/deps/xxhash/xxhash.c [ 4% 6/127] /usr/bin/c++ -DGGML_SCHED_MAX_COPIES=4 -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu++11 -pthread -MD -MT ggml/src/vulkan-shaders/CMakeFiles/vulkan-shaders-gen.dir/vulkan-shaders-gen.cpp.o -MF ggml/src/vulkan-shaders/CMakeFiles/vulkan-shaders-gen.dir/vulkan-shaders-gen.cpp.o.d -o ggml/src/vulkan-shaders/CMakeFiles/vulkan-shaders-gen.dir/vulkan-shaders-gen.cpp.o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp [ 5% 7/127] : && /usr/bin/c++ -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -fstack-protector-strong ggml/src/vulkan-shaders/CMakeFiles/vulkan-shaders-gen.dir/vulkan-shaders-gen.cpp.o -o bin/vulkan-shaders-gen -pthread && : [ 6% 8/127] cd /wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src && /wrkdirs/usr/ports/misc/llama-cpp/work/.build/bin/vulkan-shaders-gen --glslc /usr/local/bin/glslc --input-dir /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/vulkan-shaders --output-dir /wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src/vulkan-shaders.spv --target-hpp /wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src/ggml-vulkan-shaders.hpp --target-cpp /wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src/ggml-vulkan-shaders.cpp --no-clean ggml_vulkan: Generating and compiling shaders to SPIR-V [ 7% 11/127] /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -DGGML_USE_VULKAN -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_EXPORTS -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/../include -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/. -I/wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src -isystem /usr/local/include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -fopenmp=libomp -MD -MT ggml/src/CMakeFiles/ggml.dir/ggml-alloc.c.o -MF ggml/src/CMakeFiles/ggml.dir/ggml-alloc.c.o.d -o ggml/src/CMakeFiles/ggml.dir/ggml-alloc.c.o -c / wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-alloc.c [ 7% 12/127] /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -DGGML_USE_VULKAN -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_EXPORTS -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/../include -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/. -I/wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src -isystem /usr/local/include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -fopenmp=libomp -MD -MT ggml/src/CMakeFiles/ggml.dir/ggml-backend.c.o -MF ggml/src/CMakeFiles/ggml.dir/ggml-backend.c.o.d -o ggml/src/CMakeFiles/ggml.dir/ggml-backend.c. o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-backend.c [ 8% 13/127] /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -DGGML_USE_VULKAN -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_EXPORTS -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/../include -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/. -I/wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src -isystem /usr/local/include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -fopenmp=libomp -MD -MT ggml/src/CMakeFiles/ggml.dir/ggml-quants.c.o -MF ggml/src/CMakeFiles/ggml.dir/ggml-quants.c.o.d -o ggml/src/CMakeFiles/ggml.dir/ggml-quants.c.o - c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-quants.c [ 9% 14/127] /usr/bin/c++ -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -DGGML_USE_VULKAN -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_EXPORTS -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/../include -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/. -I/wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src -isystem /usr/local/include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu++11 -fPIC -Wmissing-declarations -Wmissing-noreturn -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi -pthread -fopenmp=libomp -MD -MT ggml/src/CMakeFiles/ggml.dir/ggml-vulkan.cpp.o -MF ggml/src/CMakeFiles/ggml.dir/ggml-vulkan.cpp.o.d -o ggml/src/CMakeFiles/ggml.dir/ggml-vulkan.cpp.o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggm l/src/ggml-vulkan.cpp FAILED: ggml/src/CMakeFiles/ggml.dir/ggml-vulkan.cpp.o /usr/bin/c++ -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -DGGML_USE_VULKAN -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_EXPORTS -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/../include -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/. -I/wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src -isystem /usr/local/include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu++11 -fPIC -Wmissing-declarations -Wmissing-noreturn -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi -pthread -fopenmp=libomp -MD -MT ggml/src/CMakeFiles/ggml.dir/ggml-vulkan.cpp.o -MF ggml/src/CMakeFiles/ggml.dir/ggml-vulkan.cpp.o.d -o ggml/src/CMakeFiles/ggml.dir/ggml-vulkan.cpp.o -c /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-vulkan.cpp /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-vulkan.cpp:2622:5: error: no matching function for call to 'vkCmdCopyBuffer' 2622 | vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy); | ^~~~~~~~~~~~~~~ /usr/local/include/vulkan/vulkan_core.h:4735:28: note: candidate function not viable: no known conversion from 'vk::Buffer' to 'VkBuffer' (aka 'unsigned long long') for 2nd argument 4735 | VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( | ^ 4736 | VkCommandBuffer commandBuffer, 4737 | VkBuffer srcBuffer, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-vulkan.cpp:2697:5: error: no matching function for call to 'vkCmdCopyBuffer' 2697 | vkCmdCopyBuffer(subctx->s->buffer, staging_buffer->buffer, dst->buffer, 1, &buf_copy); | ^~~~~~~~~~~~~~~ /usr/local/include/vulkan/vulkan_core.h:4735:28: note: candidate function not viable: no known conversion from 'vk::Buffer' to 'VkBuffer' (aka 'unsigned long long') for 2nd argument 4735 | VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( | ^ 4736 | VkCommandBuffer commandBuffer, 4737 | VkBuffer srcBuffer, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-vulkan.cpp:2829:5: error: no matching function for call to 'vkCmdCopyBuffer' 2829 | vkCmdCopyBuffer(ctx->s->buffer, src->buffer, dst->buffer, 1, &bc); | ^~~~~~~~~~~~~~~ /usr/local/include/vulkan/vulkan_core.h:4735:28: note: candidate function not viable: no known conversion from 'vk::Buffer' to 'VkBuffer' (aka 'unsigned long long') for 2nd argument 4735 | VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( | ^ 4736 | VkCommandBuffer commandBuffer, 4737 | VkBuffer srcBuffer, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml-vulkan.cpp:6710:61: warning: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat] 6710 | snprintf(name, sizeof(name), "%s%ld", GGML_VK_NAME, i); | ~~~ ^ | %zu 1 warning and 3 errors generated. [ 10% 14/127] /usr/bin/c++ -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -DGGML_USE_VULKAN -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_EXPORTS -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/../include -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/. -I/wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src -isystem /usr/local/include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu++11 -fPIC -Wmissing-declarations -Wmissing-noreturn -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi -pthread -fopenmp=libomp -MD -MT ggml/src/CMakeFiles/ggml.dir/ggml-vulkan-shaders.cpp.o -MF ggml/src/CMakeFiles/ggml.dir/ggml-vulkan-shaders.cpp.o.d -o ggml/src/CMakeFiles/ggml.dir/ggml-vulkan-shaders.cpp.o -c /wrkdirs/usr/ports/misc/llama-cpp/ work/.build/ggml/src/ggml-vulkan-shaders.cpp [ 11% 14/127] /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_LLAMAFILE -DGGML_USE_OPENMP -DGGML_USE_VULKAN -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_EXPORTS -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/../include -I/wrkdirs/usr/ports/misc/llama-cpp/work/llama.cpp-b3616/ggml/src/. -I/wrkdirs/usr/ports/misc/llama-cpp/work/.build/ggml/src -isystem /usr/local/include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -fopenmp=libomp -MD -MT ggml/src/CMakeFiles/ggml.dir/ggml.c.o -MF ggml/src/CMakeFiles/ggml.dir/ggml.c.o.d -o ggml/src/CMakeFiles/ggml.dir/ggml.c.o -c /wrkdirs/usr/ports/ misc/llama-cpp/work/llama.cpp-b3616/ggml/src/ggml.c ninja: build stopped: subcommand failed. ===> Compilation failed unexpectedly. Try to set MAKE_JOBS_UNSAFE=yes and rebuild before reporting the failure to the maintainer. *** Error code 1 Stop. make: stopped making "build" in /usr/ports/misc/llama-cpp