git: 8d4f6872f4ab - main - misc/py-opt-einsum: New port: Optimized Einsum: A tensor contraction order optimizer

From: Yuri Victorovich <yuri_at_FreeBSD.org>
Date: Mon, 18 Aug 2025 07:31:09 UTC
The branch main has been updated by yuri:

URL: https://cgit.FreeBSD.org/ports/commit/?id=8d4f6872f4ab415d384f83a769a1deabdf2a441c

commit 8d4f6872f4ab415d384f83a769a1deabdf2a441c
Author:     Yuri Victorovich <yuri@FreeBSD.org>
AuthorDate: 2025-08-18 05:15:25 +0000
Commit:     Yuri Victorovich <yuri@FreeBSD.org>
CommitDate: 2025-08-18 07:31:06 +0000

    misc/py-opt-einsum: New port: Optimized Einsum: A tensor contraction order optimizer
---
 misc/Makefile                |  1 +
 misc/py-opt-einsum/Makefile  | 26 ++++++++++++++++++++++++++
 misc/py-opt-einsum/distinfo  |  3 +++
 misc/py-opt-einsum/pkg-descr |  9 +++++++++
 4 files changed, 39 insertions(+)

diff --git a/misc/Makefile b/misc/Makefile
index 4d3466e4b671..a2353c07faf5 100644
--- a/misc/Makefile
+++ b/misc/Makefile
@@ -498,6 +498,7 @@
     SUBDIR += py-openpaperwork-core
     SUBDIR += py-openpaperwork-gtk
     SUBDIR += py-openvdb
+    SUBDIR += py-opt-einsum
     SUBDIR += py-optuna
     SUBDIR += py-oterm
     SUBDIR += py-palettable
diff --git a/misc/py-opt-einsum/Makefile b/misc/py-opt-einsum/Makefile
new file mode 100644
index 000000000000..afc1a532cd9d
--- /dev/null
+++ b/misc/py-opt-einsum/Makefile
@@ -0,0 +1,26 @@
+PORTNAME=	opt-einsum
+DISTVERSION=	3.4.0
+CATEGORIES=	misc python # machine-learning
+MASTER_SITES=	PYPI
+PKGNAMEPREFIX=	${PYTHON_PKGNAMEPREFIX}
+DISTNAME=	${PORTNAME:S/-/_/}-${PORTVERSION}
+
+MAINTAINER=	yuri@FreeBSD.org
+COMMENT=	Optimized Einsum: A tensor contraction order optimizer
+WWW=		https://github.com/dgasmith/opt_einsum
+
+LICENSE=	MIT
+LICENSE_FILE=	${WRKSRC}/LICENSE
+
+BUILD_DEPENDS=	${PYTHON_PKGNAMEPREFIX}hatch-fancy-pypi-readme>=0:devel/py-hatch-fancy-pypi-readme@${PY_FLAVOR} \
+		${PYTHON_PKGNAMEPREFIX}hatch-vcs>0:devel/py-hatch-vcs@${PY_FLAVOR} \
+		${PYTHON_PKGNAMEPREFIX}hatchling>0:devel/py-hatchling@${PY_FLAVOR}
+
+USES=		python
+USE_PYTHON=	pep517 autoplist pytest
+
+NO_ARCH=	yes
+
+# tests as of 3.4.0: 7736 passed, 155 skipped in 76.00s (0:01:16)
+
+.include <bsd.port.mk>
diff --git a/misc/py-opt-einsum/distinfo b/misc/py-opt-einsum/distinfo
new file mode 100644
index 000000000000..856c1d93e171
--- /dev/null
+++ b/misc/py-opt-einsum/distinfo
@@ -0,0 +1,3 @@
+TIMESTAMP = 1755493435
+SHA256 (opt_einsum-3.4.0.tar.gz) = 96ca72f1b886d148241348783498194c577fa30a8faac108586b14f1ba4473ac
+SIZE (opt_einsum-3.4.0.tar.gz) = 63004
diff --git a/misc/py-opt-einsum/pkg-descr b/misc/py-opt-einsum/pkg-descr
new file mode 100644
index 000000000000..7e1e65cc14a1
--- /dev/null
+++ b/misc/py-opt-einsum/pkg-descr
@@ -0,0 +1,9 @@
+Optimized einsum can significantly reduce the overall execution time of
+einsum-like expressions (e.g., np.einsum, dask.array.einsum, pytorch.einsum,
+tensorflow.einsum, ) by optimizing the expression's contraction order and
+dispatching many operations to canonical BLAS, cuBLAS, or other specialized
+routines.
+
+Optimized einsum is agnostic to the backend and can handle NumPy, Dask, PyTorch,
+Tensorflow, CuPy, Sparse, Theano, JAX, and Autograd arrays as well as
+potentially any library which conforms to a standard API.