[package - 133releng-armv7-default][misc/py-llama-cpp-python] Failed for py311-llama-cpp-python-0.3.2 in build

From: <pkg-fallout_at_FreeBSD.org>
Date: Tue, 03 Dec 2024 16:57:42 UTC
You are receiving this mail as a port that you maintain
is failing to build on the FreeBSD package build server.
Please investigate the failure and submit a PR to fix
build.

Maintainer:     yuri@FreeBSD.org
Log URL:        https://pkg-status.freebsd.org/ampere3/data/133releng-armv7-default/884c365e2398/logs/py311-llama-cpp-python-0.3.2.log
Build URL:      https://pkg-status.freebsd.org/ampere3/build.html?mastername=133releng-armv7-default&build=884c365e2398
Log:

=>> Building misc/py-llama-cpp-python
build started at Tue Dec  3 16:56:35 UTC 2024
port directory: /usr/ports/misc/py-llama-cpp-python
package name: py311-llama-cpp-python-0.3.2
building for: FreeBSD 133releng-armv7-default-job-13 13.3-RELEASE-p8 FreeBSD 13.3-RELEASE-p8 1303001 arm
maintained by: yuri@FreeBSD.org
Makefile datestamp: -rw-r--r--  1 root  wheel  1736 Nov 28 01:06 /usr/ports/misc/py-llama-cpp-python/Makefile
Ports top last git commit: 884c365e239
Ports top unclean checkout: no
Port dir last git commit: f6f441f6827
Port dir unclean checkout: no
Poudriere version: poudriere-git-3.4.2
Host OSVERSION: 1500023
Jail OSVERSION: 1303001
Job Id: 13

---Begin Environment---
SHELL=/bin/csh
OSVERSION=1303001
UNAME_v=FreeBSD 13.3-RELEASE-p8 1303001
UNAME_r=13.3-RELEASE-p8
BLOCKSIZE=K
MAIL=/var/mail/root
MM_CHARSET=UTF-8
LANG=C.UTF-8
STATUS=1
HOME=/root
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin
MAKE_OBJDIR_CHECK_WRITABLE=0
LOCALBASE=/usr/local
USER=root
POUDRIERE_NAME=poudriere-git
LIBEXECPREFIX=/usr/local/libexec/poudriere
POUDRIERE_VERSION=3.4.2
MASTERMNT=/usr/local/poudriere/data/.m/133releng-armv7-default/ref
LC_COLLATE=C
POUDRIERE_BUILD_TYPE=bulk
PACKAGE_BUILDING=yes
SAVED_TERM=
OUTPUT_REDIRECTED_STDERR=4
OUTPUT_REDIRECTED=1
PWD=/usr/local/poudriere/data/.m/133releng-armv7-default/13/.p
OUTPUT_REDIRECTED_STDOUT=3
P_PORTS_FEATURES=FLAVORS SUBPACKAGES SELECTED_OPTIONS
MASTERNAME=133releng-armv7-default
SCRIPTPREFIX=/usr/local/share/poudriere
SCRIPTNAME=bulk.sh
OLDPWD=/usr/local/poudriere/data/.m/133releng-armv7-default/ref/.p/pool
POUDRIERE_PKGNAME=poudriere-git-3.4.2
SCRIPTPATH=/usr/local/share/poudriere/bulk.sh
POUDRIEREPATH=/usr/local/bin/poudriere
---End Environment---

---Begin Poudriere Port Flags/Env---
PORT_FLAGS=
PKGENV=
FLAVOR=py311
MAKE_ARGS= FLAVOR=py311
---End Poudriere Port Flags/Env---

---Begin OPTIONS List---
---End OPTIONS List---

--MAINTAINER--
yuri@FreeBSD.org
--End MAINTAINER--

--CONFIGURE_ARGS--

--End CONFIGURE_ARGS--

--CONFIGURE_ENV--
PYTHON="/usr/local/bin/python3.11" XDG_DATA_HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311  XDG_CONFIG_HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311  XDG_CACHE_HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/.cache  HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311 TMPDIR="/tmp" PATH=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/.bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin PKG_CONFIG_LIBDIR=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/.pkgconfig:/usr/local/libdata/pkgconfig:/usr/local/share/pkgconfig:/usr/libdata/pkgconfig SHELL=/bin/sh CONFIG_SHELL=/bin/sh
--End CONFIGURE_ENV--

--MAKE_ENV--
XDG_DATA_HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311  XDG_CONFIG_HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311  XDG_CACHE_HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/.cache  HOME=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311 TMPDIR="/tmp" PATH=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/.bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin PKG_CONFIG_LIBDIR=/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/.pkgconfig:/usr/local/libdata/pkgconfig:/usr/local/share/pkgconfig:/usr/libdata/pkgconfig MK_DEBUG_FILES=no MK_KERNEL_SYMBOLS=no SHELL=/bin/sh NO_LINT=YES PREFIX=/usr/local  LOCALBASE=/usr/local  CC="cc" CFLAGS="-O2 -pipe  -fstack-protector-strong -fno-strict-aliasing "  CPP="cpp" CPPFLAGS=""  LDFLAGS=" -fstack-protector-strong " LIBS=""  CXX="c++" CXXFLAGS="-O2 -pipe -fstack-protector-strong -fno-strict-aliasing  " BSD_INSTALL_PROGRAM="install  -s -m 555"  BSD_INSTALL_LIB="install
  -s -m 0644"  BSD_INSTALL_SCRIPT="install  -m 555"  BSD_INSTALL_DATA="install  -m 0644"  BSD_INSTALL_MAN="install  -m 444"
--End MAKE_ENV--

--PLIST_SUB--
PYTHON_INCLUDEDIR=include/python3.11  PYTHON_LIBDIR=lib/python3.11  PYTHON_PLATFORM=freebsd13  PYTHON_SITELIBDIR=lib/python3.11/site-packages  PYTHON_SUFFIX=311  PYTHON_EXT_SUFFIX=.cpython-311  PYTHON_VER=3.11  PYTHON_VERSION=python3.11 PYTHON2="@comment " PYTHON3="" OSREL=13.3 PREFIX=%D LOCALBASE=/usr/local  RESETPREFIX=/usr/local LIB32DIR=lib DOCSDIR="share/doc/llama-cpp-python"  EXAMPLESDIR="share/examples/llama-cpp-python"  DATADIR="share/llama-cpp-python"  WWWDIR="www/llama-cpp-python"  ETCDIR="etc/llama-cpp-python"
--End PLIST_SUB--

--SUB_LIST--
PYTHON_INCLUDEDIR=/usr/local/include/python3.11  PYTHON_LIBDIR=/usr/local/lib/python3.11  PYTHON_PLATFORM=freebsd13  PYTHON_SITELIBDIR=/usr/local/lib/python3.11/site-packages  PYTHON_SUFFIX=311  PYTHON_EXT_SUFFIX=.cpython-311  PYTHON_VER=3.11  PYTHON_VERSION=python3.11 PYTHON2="@comment " PYTHON3="" PREFIX=/usr/local LOCALBASE=/usr/local  DATADIR=/usr/local/share/llama-cpp-python DOCSDIR=/usr/local/share/doc/llama-cpp-python EXAMPLESDIR=/usr/local/share/examples/llama-cpp-python  WWWDIR=/usr/local/www/llama-cpp-python ETCDIR=/usr/local/etc/llama-cpp-python
--End SUB_LIST--

---Begin make.conf---
# XXX: We really need this but cannot use it while 'make checksum' does not
# try the next mirror on checksum failure.  It currently retries the same
# failed mirror and then fails rather then trying another.  It *does*
# try the next if the size is mismatched though.
#MASTER_SITE_FREEBSD=yes
# Build ALLOW_MAKE_JOBS_PACKAGES with 3 jobs
MAKE_JOBS_NUMBER=3
USE_PACKAGE_DEPENDS=yes
BATCH=yes
WRKDIRPREFIX=/wrkdirs
PORTSDIR=/usr/ports
PACKAGES=/packages
DISTDIR=/distfiles
PACKAGE_BUILDING=yes
PACKAGE_BUILDING_FLAVORS=yes
####  ####
# XXX: We really need this but cannot use it while 'make checksum' does not
# try the next mirror on checksum failure.  It currently retries the same
# failed mirror and then fails rather then trying another.  It *does*
# try the next if the size is mismatched though.
#MASTER_SITE_FREEBSD=yes
# Build ALLOW_MAKE_JOBS_PACKAGES with 3 jobs
MAKE_JOBS_NUMBER=3
#### Misc Poudriere ####
.include "/etc/make.conf.ports_env"
GID=0
UID=0
---End make.conf---
--Resource limits--
cpu time               (seconds, -t)  unlimited
file size           (512-blocks, -f)  unlimited
data seg size           (kbytes, -d)  524288
stack size              (kbytes, -s)  65536
core file size      (512-blocks, -c)  unlimited
max memory size         (kbytes, -m)  unlimited
locked memory           (kbytes, -l)  unlimited
max user processes              (-u)  89999
open files                      (-n)  8192
virtual mem size        (kbytes, -v)  unlimited
swap limit              (kbytes, -w)  unlimited
socket buffer size       (bytes, -b)  unlimited
pseudo-terminals                (-p)  unlimited
kqueues                         (-k)  unlimited
umtx shared locks               (-o)  unlimited
--End resource limits--
=======================<phase: check-sanity   >============================
===== env: NO_DEPENDS=yes USER=root UID=0 GID=0
===>  License MIT accepted by the user
===========================================================================
=======================<phase: pkg-depends    >============================
===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0
===>   py311-llama-cpp-python-0.3.2 depends on file: /usr/local/sbin/pkg - not found
===>   Installing existing package /packages/All/pkg-1.21.3.pkg
[133releng-armv7-default-job-13] Installing pkg-1.21.3...
[133releng-armv7-default-job-13] Extracting pkg-1.21.3: .......... done
===>   py311-llama-cpp-python-0.3.2 depends on file: /usr/local/sbin/pkg - found
===>   Returning to build of py311-llama-cpp-python-0.3.2
===========================================================================
=======================<phase: fetch-depends  >============================
===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0
===========================================================================
=======================<phase: fetch          >============================
===== env: NO_DEPENDS=yes USER=root UID=0 GID=0
===>  License MIT accepted by the user
===> Fetching all distfiles required by py311-llama-cpp-python-0.3.2 for building
===========================================================================
=======================<phase: checksum       >============================
===== env: NO_DEPENDS=yes USER=root UID=0 GID=0
===>  License MIT accepted by the user
===> Fetching all distfiles required by py311-llama-cpp-python-0.3.2 for building
=> SHA256 Checksum OK for abetlen-llama-cpp-python-v0.3.2_GH0.tar.gz.
=> SHA256 Checksum OK for ggerganov-llama.cpp-74d73dc_GH0.tar.gz.
===========================================================================
=======================<phase: extract-depends>============================
===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0
===========================================================================
=======================<phase: extract        >============================
===== env: NO_DEPENDS=yes USER=root UID=0 GID=0
===>  License MIT accepted by the user
===> Fetching all distfiles required by py311-llama-cpp-python-0.3.2 for building
===>  Extracting for py311-llama-cpp-python-0.3.2
=> SHA256 Checksum OK for abetlen-llama-cpp-python-v0.3.2_GH0.tar.gz.
=> SHA256 Checksum OK for ggerganov-llama.cpp-74d73dc_GH0.tar.gz.
===========================================================================
=======================<phase: patch-depends  >============================
===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0
===========================================================================
=======================<phase: patch          >============================
===== env: NO_DEPENDS=yes USER=root UID=0 GID=0
===>  Patching for py311-llama-cpp-python-0.3.2
===========================================================================
=======================<phase: build-depends  >============================
===== env: USE_PACKAGE_DEPENDS_ONLY=1 USER=root UID=0 GID=0
===>   py311-llama-cpp-python-0.3.2 depends on package: py311-scikit-build-core>0 - not found
===>   Installing existing package /packages/All/py311-scikit-build-core-0.10.7.pkg
[133releng-armv7-default-job-13] Installing py311-scikit-build-core-0.10.7...
[133releng-armv7-default-job-13] `-- Installing py311-packaging-24.2...
[133releng-armv7-default-job-13] |   `-- Installing python311-3.11.10...
[133releng-armv7-default-job-13] |   | `-- Installing gettext-runtime-0.22.5...
[133releng-armv7-default-job-13] |   |   `-- Installing indexinfo-0.3.1...
[133releng-armv7-default-job-13] |   |   `-- Extracting indexinfo-0.3.1: .... done
[133releng-armv7-default-job-13] |   | `-- Extracting gettext-runtime-0.22.5: .......... done
[133releng-armv7-default-job-13] |   | `-- Installing libffi-3.4.6...
[133releng-armv7-default-job-13] |   | `-- Extracting libffi-3.4.6: .......... done
[133releng-armv7-default-job-13] |   | `-- Installing mpdecimal-4.0.0...
[133releng-armv7-default-job-13] |   | `-- Extracting mpdecimal-4.0.0: .......... done
[133releng-armv7-default-job-13] |   | `-- Installing readline-8.2.13_2...
[133releng-armv7-default-job-13] |   | `-- Extracting readline-8.2.13_2: .......... done
[133releng-armv7-default-job-13] |   `-- Extracting python311-3.11.10: .......... done
[133releng-armv7-default-job-13] `-- Extracting py311-packaging-24.2: .......... done
[133releng-armv7-default-job-13] `-- Installing py311-pathspec-0.12.1...
<snip>
-- ARM detected
-- Performing Test COMPILER_SUPPORTS_FP16_FORMAT_I3E
-- Performing Test COMPILER_SUPPORTS_FP16_FORMAT_I3E - Failed
-- Using runtime weight conversion of Q4_0 to Q4_0_x_x to enable optimized GEMM/GEMV kernels
-- Including CPU backend
CMake Warning at vendor/llama.cpp/ggml/src/ggml-amx/CMakeLists.txt:106 (message):
  AMX requires x86 and gcc version > 11.0.  Turning off GGML_AMX.


-- Looking for pthread_create in pthreads
-- Looking for pthread_create in pthreads - not found
-- Looking for pthread_create in pthread
-- Looking for pthread_create in pthread - found
CMake Warning at vendor/llama.cpp/common/CMakeLists.txt:30 (message):
  Git repository not found; to enable automatic generation of build info,
  make sure Git is installed and the project is a Git repository.


CMake Warning (dev) at CMakeLists.txt:13 (install):
  Target llama has PUBLIC_HEADER files but no PUBLIC_HEADER DESTINATION.
Call Stack (most recent call first):
  CMakeLists.txt:80 (llama_cpp_python_install_target)
This warning is for project developers.  Use -Wno-dev to suppress it.

CMake Warning (dev) at CMakeLists.txt:21 (install):
  Target llama has PUBLIC_HEADER files but no PUBLIC_HEADER DESTINATION.
Call Stack (most recent call first):
  CMakeLists.txt:80 (llama_cpp_python_install_target)
This warning is for project developers.  Use -Wno-dev to suppress it.

CMake Warning (dev) at CMakeLists.txt:13 (install):
  Target ggml has PUBLIC_HEADER files but no PUBLIC_HEADER DESTINATION.
Call Stack (most recent call first):
  CMakeLists.txt:81 (llama_cpp_python_install_target)
This warning is for project developers.  Use -Wno-dev to suppress it.

CMake Warning (dev) at CMakeLists.txt:21 (install):
  Target ggml has PUBLIC_HEADER files but no PUBLIC_HEADER DESTINATION.
Call Stack (most recent call first):
  CMakeLists.txt:81 (llama_cpp_python_install_target)
This warning is for project developers.  Use -Wno-dev to suppress it.

-- Configuring done (4.2s)
-- Generating done (0.1s)
-- Build files have been written to: /tmp/tmpaz5rnqxo/build
*** Building project with Unix Makefiles...
Change Dir: '/tmp/tmpaz5rnqxo/build'

Run Build Command(s): /usr/local/bin/cmake -E env VERBOSE=1 /usr/bin/make -f Makefile
/usr/local/bin/cmake -S/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2 -B/tmp/tmpaz5rnqxo/build --check-build-system CMakeFiles/Makefile.cmake 0
/usr/local/bin/cmake -E cmake_progress_start /tmp/tmpaz5rnqxo/build/CMakeFiles /tmp/tmpaz5rnqxo/build//CMakeFiles/progress.marks
/usr/bin/make  -f CMakeFiles/Makefile2 all
/usr/bin/make  -f vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/build.make vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/depend
cd /tmp/tmpaz5rnqxo/build && /usr/local/bin/cmake -E cmake_depends "Unix Makefiles" /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2 /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src /tmp/tmpaz5rnqxo/build /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/DependInfo.cmake "--color="
/usr/bin/make  -f vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/build.make vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/build
[  2%] Building C object vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml.c.o
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src && /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_base_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe  -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -MD -MT vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml.c.o -MF CMakeFiles/ggml-base.dir/ggml.c.o.d -o CMakeFiles/ggml-base.dir/ggml.c.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.
cpp/ggml/src/ggml.c
[  5%] Building C object vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-alloc.c.o
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src && /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_base_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe  -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -MD -MT vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-alloc.c.o -MF CMakeFiles/ggml-base.dir/ggml-alloc.c.o.d -o CMakeFiles/ggml-base.dir/ggml-alloc.c.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0
.3.2/vendor/llama.cpp/ggml/src/ggml-alloc.c
[  7%] Building CXX object vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-backend.cpp.o
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src && /usr/bin/c++ -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_base_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu++11 -fPIC -Wmissing-declarations -Wmissing-noreturn -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi -pthread -MD -MT vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-backend.cpp.o -MF CMakeFiles/ggml-base.dir/ggml-backend.cpp.o.d -o CMakeFiles/ggml-base.dir/ggml-backend.cpp.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-backend.cpp
[ 10%] Building CXX object vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-threading.cpp.o
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src && /usr/bin/c++ -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_base_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu++11 -fPIC -Wmissing-declarations -Wmissing-noreturn -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi -pthread -MD -MT vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-threading.cpp.o -MF CMakeFiles/ggml-base.dir/ggml-threading.cpp.o.d -o CMakeFiles/ggml-base.dir/ggml-threading.cpp.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-threading.cpp
[ 12%] Building C object vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-quants.c.o
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src && /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_base_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe  -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -MD -MT vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-quants.c.o -MF CMakeFiles/ggml-base.dir/ggml-quants.c.o.d -o CMakeFiles/ggml-base.dir/ggml-quants.c.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-pytho
n-0.3.2/vendor/llama.cpp/ggml/src/ggml-quants.c
[ 15%] Building C object vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-aarch64.c.o
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src && /usr/bin/cc -DGGML_BUILD -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_base_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe  -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -pthread -MD -MT vendor/llama.cpp/ggml/src/CMakeFiles/ggml-base.dir/ggml-aarch64.c.o -MF CMakeFiles/ggml-base.dir/ggml-aarch64.c.o.d -o CMakeFiles/ggml-base.dir/ggml-aarch64.c.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-py
thon-0.3.2/vendor/llama.cpp/ggml/src/ggml-aarch64.c
[ 17%] Linking CXX shared library libggml-base.so
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src && /usr/local/bin/cmake -E cmake_link_script CMakeFiles/ggml-base.dir/link.txt --verbose=1
/usr/bin/c++ -fPIC -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -Xlinker --dependency-file -Xlinker CMakeFiles/ggml-base.dir/link.d -fstack-protector-strong -shared -Wl,-soname,libggml-base.so -o libggml-base.so "CMakeFiles/ggml-base.dir/ggml.c.o" "CMakeFiles/ggml-base.dir/ggml-alloc.c.o" "CMakeFiles/ggml-base.dir/ggml-backend.cpp.o" "CMakeFiles/ggml-base.dir/ggml-threading.cpp.o" "CMakeFiles/ggml-base.dir/ggml-quants.c.o" "CMakeFiles/ggml-base.dir/ggml-aarch64.c.o"  -Wl,-rpath,"\$ORIGIN" -lm -pthread
[ 17%] Built target ggml-base
/usr/bin/make  -f vendor/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/build.make vendor/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/depend
cd /tmp/tmpaz5rnqxo/build && /usr/local/bin/cmake -E cmake_depends "Unix Makefiles" /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2 /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu /tmp/tmpaz5rnqxo/build /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src/ggml-cpu /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/DependInfo.cmake "--color="
/usr/bin/make  -f vendor/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/build.make vendor/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/build
[ 20%] Building C object vendor/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/ggml-cpu.c.o
cd /tmp/tmpaz5rnqxo/build/vendor/llama.cpp/ggml/src/ggml-cpu && /usr/bin/cc -DGGML_BACKEND_BUILD -DGGML_BACKEND_SHARED -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_CPU_AARCH64 -DGGML_USE_LLAMAFILE -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_cpu_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/.. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe  -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -mfpu=neon-fp-armv8 -mno-unaligned-acce
ss -funsafe-math-optimizations -MD -MT vendor/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/ggml-cpu.c.o -MF CMakeFiles/ggml-cpu.dir/ggml-cpu.c.o.d -o CMakeFiles/ggml-cpu.dir/ggml-cpu.c.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:245:9: warning: 'CACHE_LINE_SIZE' macro redefined [-Wmacro-redefined]
  245 | #define CACHE_LINE_SIZE 64
      |         ^
/usr/include/machine/param.h:102:9: note: previous definition is here
  102 | #define CACHE_LINE_SIZE         (1 << CACHE_LINE_SHIFT)
      |         ^
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:1534:5: warning: implicit conversion increases floating-point precision: 'float' to 'ggml_float' (aka 'double') [-Wdouble-promotion]
 1534 |     GGML_F16_VEC_REDUCE(sumf, sum);
      |     ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:582:41: note: expanded from macro 'GGML_F16_VEC_REDUCE'
  582 |     #define GGML_F16_VEC_REDUCE         GGML_F32Cx4_REDUCE
      |                                         ^
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:572:38: note: expanded from macro 'GGML_F32Cx4_REDUCE'
  572 |     #define GGML_F32Cx4_REDUCE       GGML_F32x4_REDUCE
      |                                      ^
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:502:13: note: expanded from macro 'GGML_F32x4_REDUCE'
  502 |     (res) = GGML_F32x4_REDUCE_ONE((x)[0]);         \
      |           ~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:487:34: note: expanded from macro 'GGML_F32x4_REDUCE_ONE'
  487 | #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
      |                                  ^~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:1582:9: warning: implicit conversion increases floating-point precision: 'float' to 'ggml_float' (aka 'double') [-Wdouble-promotion]
 1582 |         GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
      |         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:582:41: note: expanded from macro 'GGML_F16_VEC_REDUCE'
  582 |     #define GGML_F16_VEC_REDUCE         GGML_F32Cx4_REDUCE
      |                                         ^
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:572:38: note: expanded from macro 'GGML_F32Cx4_REDUCE'
  572 |     #define GGML_F32Cx4_REDUCE       GGML_F32x4_REDUCE
      |                                      ^
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:502:13: note: expanded from macro 'GGML_F32x4_REDUCE'
  502 |     (res) = GGML_F32x4_REDUCE_ONE((x)[0]);         \
      |           ~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:487:34: note: expanded from macro 'GGML_F32x4_REDUCE_ONE'
  487 | #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
      |                                  ^~~~~~~~~~~~~
fatal error: error in backend: Cannot select: 0x270f5aa0: v4f32 = fmaxnum 0x27a93190, 0x269f01e0
  0x27a93190: v4f32,i32,ch = ARMISD::VLD1_UPD<(load (s128) from %ir.16869, align 4)> 0x269ad6fc, 0x269f0640, Constant:i32<16>, Constant:i32<1>
    0x269f0640: i32,ch = CopyFromReg 0x269ad6fc, Register:i32 %6764
      0x270f5be0: i32 = Register %6764
    0x27a93b40: i32 = Constant<16>
    0x26974f50: i32 = Constant<1>
  0x269f01e0: v4f32 = bitcast 0x270f55f0
    0x270f55f0: v4i32 = ARMISD::VMOVIMM TargetConstant:i32<0>
      0x27a933c0: i32 = TargetConstant<0>
In function: ggml_graph_compute_thread
PLEASE submit a bug report to https://bugs.freebsd.org/submit/ and include the crash backtrace, preprocessed source, and associated run script.
Stack dump:
0.	Program arguments: /usr/bin/cc -DGGML_BACKEND_BUILD -DGGML_BACKEND_SHARED -DGGML_SCHED_MAX_COPIES=4 -DGGML_SHARED -DGGML_USE_CPU_AARCH64 -DGGML_USE_LLAMAFILE -D_XOPEN_SOURCE=600 -D__BSD_VISIBLE -Dggml_cpu_EXPORTS -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/.. -I/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/../include -O2 -pipe -fstack-protector-strong -fno-strict-aliasing -O3 -DNDEBUG -std=gnu11 -fPIC -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wunreachable-code-break -Wunreachable-code-return -Wdouble-promotion -mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations -MD -MT vend
or/llama.cpp/ggml/src/ggml-cpu/CMakeFiles/ggml-cpu.dir/ggml-cpu.c.o -MF CMakeFiles/ggml-cpu.dir/ggml-cpu.c.o.d -o CMakeFiles/ggml-cpu.dir/ggml-cpu.c.o -c /wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c
1.	<eof> parser at end of file
2.	Code generation
3.	Running pass 'Function Pass Manager' on module '/wrkdirs/usr/ports/misc/py-llama-cpp-python/work-py311/llama-cpp-python-0.3.2/vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c'.
4.	Running pass 'ARM Instruction Selection' on function '@ggml_graph_compute_thread'
 #0 0x046f3e28 (/usr/bin/cc+0x46f3e28)
 #1 0x046f1cf0 (/usr/bin/cc+0x46f1cf0)
 #2 0x046b81a4 (/usr/bin/cc+0x46b81a4)
 #3 0x046b8160 (/usr/bin/cc+0x46b8160)
 #4 0x046e2ab0 (/usr/bin/cc+0x46e2ab0)
 #5 0x019cc89c (/usr/bin/cc+0x19cc89c)
 #6 0x046bf248 (/usr/bin/cc+0x46bf248)
 #7 0x04a2df4c (/usr/bin/cc+0x4a2df4c)
 #8 0x04a2d4d0 (/usr/bin/cc+0x4a2d4d0)
 #9 0x04ee11cc (/usr/bin/cc+0x4ee11cc)
#10 0x04a25330 (/usr/bin/cc+0x4a25330)
#11 0x04a24b84 (/usr/bin/cc+0x4a24b84)
#12 0x04a247d8 (/usr/bin/cc+0x4a247d8)
#13 0x04a24048 (/usr/bin/cc+0x4a24048)
#14 0x04a223fc (/usr/bin/cc+0x4a223fc)
#15 0x04edc618 (/usr/bin/cc+0x4edc618)
#16 0x03eab248 (/usr/bin/cc+0x3eab248)
#17 0x0431ae08 (/usr/bin/cc+0x431ae08)
#18 0x04320e04 (/usr/bin/cc+0x4320e04)
#19 0x0431b3e0 (/usr/bin/cc+0x431b3e0)
#20 0x022968bc (/usr/bin/cc+0x22968bc)
#21 0x025a7d6c (/usr/bin/cc+0x25a7d6c)
#22 0x02aefc2c (/usr/bin/cc+0x2aefc2c)
#23 0x024e61e4 (/usr/bin/cc+0x24e61e4)
#24 0x0246ade4 (/usr/bin/cc+0x246ade4)
#25 0x025a13e8 (/usr/bin/cc+0x25a13e8)
#26 0x019cc240 (/usr/bin/cc+0x19cc240)
#27 0x019d9774 (/usr/bin/cc+0x19d9774)
#28 0x02314fc4 (/usr/bin/cc+0x2314fc4)
#29 0x046b813c (/usr/bin/cc+0x46b813c)
#30 0x02314780 (/usr/bin/cc+0x2314780)
#31 0x022df1a4 (/usr/bin/cc+0x22df1a4)
#32 0x022df430 (/usr/bin/cc+0x22df430)
#33 0x022f7640 (/usr/bin/cc+0x22f7640)
#34 0x019d9020 (/usr/bin/cc+0x19d9020)
cc: error: clang frontend command failed with exit code 70 (use -v to see invocation)
FreeBSD clang version 17.0.6 (https://github.com/llvm/llvm-project.git llvmorg-17.0.6-0-g6009708b4367)
Target: armv7-unknown-freebsd13.3-gnueabihf
Thread model: posix
InstalledDir: /usr/bin
cc: note: diagnostic msg: 
********************

PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:
Preprocessed source(s) and associated run script(s) are located at:
cc: note: diagnostic msg: /tmp/ggml-cpu-6ef35f.c
cc: note: diagnostic msg: /tmp/ggml-cpu-6ef35f.sh
cc: note: diagnostic msg: 

********************
*** Error code 1

Stop.
make[2]: stopped in /tmp/tmpaz5rnqxo/build
*** Error code 1

Stop.
make[1]: stopped in /tmp/tmpaz5rnqxo/build
*** Error code 1

Stop.
make: stopped in /tmp/tmpaz5rnqxo/build


*** CMake build failed

ERROR Backend subprocess exited when trying to invoke build_wheel
*** Error code 1

Stop.
make: stopped in /usr/ports/misc/py-llama-cpp-python