git: d80a1a385f1a - main - misc/py-comfy-kitchen: New port: ComfyUI: Fast kernel library for Diffusion inference
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 19 Mar 2026 06:47:42 UTC
The branch main has been updated by yuri:
URL: https://cgit.FreeBSD.org/ports/commit/?id=d80a1a385f1af5e0dd1e55712f0bdd5e45d72bdb
commit d80a1a385f1af5e0dd1e55712f0bdd5e45d72bdb
Author: Yuri Victorovich <yuri@FreeBSD.org>
AuthorDate: 2026-03-19 01:26:01 +0000
Commit: Yuri Victorovich <yuri@FreeBSD.org>
CommitDate: 2026-03-19 06:47:38 +0000
misc/py-comfy-kitchen: New port: ComfyUI: Fast kernel library for Diffusion inference
---
misc/Makefile | 1 +
misc/py-comfy-kitchen/Makefile | 41 ++++++++++++++++++++++++++++++
misc/py-comfy-kitchen/distinfo | 3 +++
misc/py-comfy-kitchen/files/patch-setup.py | 11 ++++++++
misc/py-comfy-kitchen/pkg-descr | 17 +++++++++++++
5 files changed, 73 insertions(+)
diff --git a/misc/Makefile b/misc/Makefile
index 4066b73e7e1d..e2265de6bf12 100644
--- a/misc/Makefile
+++ b/misc/Makefile
@@ -446,6 +446,7 @@
SUBDIR += py-colored
SUBDIR += py-colorzero
SUBDIR += py-comfy-aimdo
+ SUBDIR += py-comfy-kitchen
SUBDIR += py-comfyui-embedded-docs
SUBDIR += py-comfyui-frontend-package
SUBDIR += py-comfyui-workflow-templates
diff --git a/misc/py-comfy-kitchen/Makefile b/misc/py-comfy-kitchen/Makefile
new file mode 100644
index 000000000000..c774b1deaf04
--- /dev/null
+++ b/misc/py-comfy-kitchen/Makefile
@@ -0,0 +1,41 @@
+PORTNAME= comfy-kitchen
+DISTVERSIONPREFIX= v
+DISTVERSION= 0.2.8
+CATEGORIES= misc python
+PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
+
+MAINTAINER= yuri@FreeBSD.org
+COMMENT= ComfyUI: Fast kernel library for Diffusion inference
+WWW= https://github.com/Comfy-Org/comfy-kitchen
+
+LICENSE= APACHE20
+
+BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}nanobind>0:devel/py-nanobind@${PY_FLAVOR} \
+ ${PY_SETUPTOOLS} \
+ ${PYTHON_PKGNAMEPREFIX}wheel>0:devel/py-wheel@${PY_FLAVOR}
+RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}packaging>0:devel/py-packaging@${PY_FLAVOR} \
+ ${PYTHON_PKGNAMEPREFIX}pytorch>=2.0.0:misc/py-pytorch@${PY_FLAVOR}
+
+USES= python
+USE_PYTHON= pep517 autoplist pytest
+
+USE_GITHUB= yes
+GH_ACCOUNT= Comfy-Org
+
+NO_ARCH= yes
+
+# CUDA is not available on FreeBSD
+MAKE_ENV+= COMFY_KITCHEN_NO_CUDA=1
+
+NO_ARCH= yes # it would have binaries when CUDA would be enabled
+
+do-test:
+ @cd ${WRKSRC} && ${SETENV} ${TEST_ENV} ${PYTHON_CMD} -m pytest -v -rs
+
+tests as of 0.2.8:
+# Results (14.34s):
+# 574 passed
+# 203 skipped
+# 1 warning
+
+.include <bsd.port.mk>
diff --git a/misc/py-comfy-kitchen/distinfo b/misc/py-comfy-kitchen/distinfo
new file mode 100644
index 000000000000..0bb634471796
--- /dev/null
+++ b/misc/py-comfy-kitchen/distinfo
@@ -0,0 +1,3 @@
+TIMESTAMP = 1773845240
+SHA256 (Comfy-Org-comfy-kitchen-v0.2.8_GH0.tar.gz) = 0fd1ac41e48a4e2bb8e3458ddebbfc5b019288501701b862363a8742bb4486f2
+SIZE (Comfy-Org-comfy-kitchen-v0.2.8_GH0.tar.gz) = 96322
diff --git a/misc/py-comfy-kitchen/files/patch-setup.py b/misc/py-comfy-kitchen/files/patch-setup.py
new file mode 100644
index 000000000000..0acdc5fd821d
--- /dev/null
+++ b/misc/py-comfy-kitchen/files/patch-setup.py
@@ -0,0 +1,11 @@
+--- setup.py.orig 2025-03-01 00:00:00 UTC
++++ setup.py
+@@ -19,7 +19,7 @@ from setuptools.command.build_ext import build_ext
+ # This needs to happen before get_extensions() is called
+ # Usage: python setup.py install --no-cuda
+ # or: pip install . --no-cuda
+-BUILD_NO_CUDA = False
++BUILD_NO_CUDA = os.getenv("COMFY_KITCHEN_NO_CUDA", "0") == "1"
+ if "--no-cuda" in sys.argv:
+ BUILD_NO_CUDA = True
+ sys.argv.remove("--no-cuda") # Remove so setuptools doesn't complain
diff --git a/misc/py-comfy-kitchen/pkg-descr b/misc/py-comfy-kitchen/pkg-descr
new file mode 100644
index 000000000000..f3e8b0a78e13
--- /dev/null
+++ b/misc/py-comfy-kitchen/pkg-descr
@@ -0,0 +1,17 @@
+Comfy Kitchen is a high-performance kernel library designed for Diffusion model
+inference. It provides optimized implementations for critical operations,
+including various quantization formats and Rotary Positional Embeddings (RoPE).
+The library features a flexible dispatch system that automatically selects the
+most efficient compute backend—CUDA, Triton, or eager PyTorch—based on available
+hardware and input constraints.
+
+Key features include:
+* Optimized kernels specifically tuned for Diffusion inference workloads.
+* Support for multiple compute backends (CUDA C, Triton JIT, and pure PyTorch).
+* Transparent quantization via a QuantizedTensor subclass that intercepts
+ PyTorch operations.
+* Support for advanced quantization formats including FP8, NVFP4, and MXFP8.
+* Automatic backend selection and constraint validation for hardware-specific
+ optimizations.
+* Implementation of performance-critical functions like RoPE and scaled matrix
+ multiplication.