Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions modules/nf-core/huggingface/download/environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json
channels:
- conda-forge
dependencies:
- conda-forge::huggingface_hub=1.6.0
30 changes: 30 additions & 0 deletions modules/nf-core/huggingface/download/main.nf
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
process HF_DOWNLOAD {
tag "$meta.id"
label 'process_medium'

conda "${moduleDir}/environment.yml"
container "community.wave.seqera.io/library/huggingface_hub:1.6.0--c106a7f9664ca39b"

input:
tuple val(meta), val(hf_repo), val(hf_file), val(hf_home)

output:
tuple val(meta), path(hf_file), emit: output
tuple val("${task.process}"), val("huggingface_hub"), eval("hf --version 2>&1 | tail -n1 | awk '{print \$NF}'"), topic: versions, emit: versions_huggingface_hub

when:
task.ext.when == null || task.ext.when

script:
def hf_home_resolved = hf_home ?: "${workflow.projectDir}/hf_cache"
"""
export HF_HOME="${hf_home_resolved}"
export HF_HUB_CACHE=\$HF_HOME
hf download ${hf_repo} ${hf_file} --local-dir \$PWD
"""

stub:
"""
touch ${hf_file}
"""
}
66 changes: 66 additions & 0 deletions modules/nf-core/huggingface/download/meta.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
name: hf_download
description: Tool for downloading models from HuggingFace
keywords:
- llm
- llama
- ai
tools:
- huggingface_hub:
description: "HuggingFace Hub CLI interface"
homepage: "https://huggingface.co/docs/huggingface_hub/guides/cli"
licence:
- "MIT"
identifier: ""
input:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`- prompt_file:
- hf_repo:
type: string
description: HuggingFace repository
- hf_file:
type: string
description: HuggingFace GGUF file
- hf_home:
type: string
description: HuggingFace default cache directory
output:
output:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`
- hf_file:
type: file
description: Downloaded HuggingFace GGUF file
ontologies: []
versions_huggingface_hub:
- - ${task.process}:
type: string
description: The name of the process
- huggingface_hub:
type: string
description: The name of the tool
- hf --version 2>&1 | tail -n1 | awk '{print \$NF}':
type: eval
description: The expression to obtain the version of the tool
topics:
versions:
- - ${task.process}:
type: string
description: The name of the process
- huggingface_hub:
type: string
description: The name of the tool
- hf --version 2>&1 | tail -n1 | awk '{print \$NF}':
type: eval
description: The expression to obtain the version of the tool
authors:
- "@toniher"
- "@lucacozzuto"
maintainers:
- "@toniher"
- "@lucacozzuto"
66 changes: 66 additions & 0 deletions modules/nf-core/huggingface/download/tests/main.nf.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
nextflow_process {

name "Test Process HF_DOWNLOAD"
script "../main.nf"
process "HF_DOWNLOAD"

tag "modules"
tag "modules_nfcore"
tag "huggingface"
tag "huggingface/download"

test("download gguf file") {

when {
process {
"""
input[0] = [
[ id:'test_model' ],
"ggml-org/gemma-3-1b-it-GGUF",
"gemma-3-1b-it-Q4_K_M.gguf",
"./hf_cache"
]
"""
}
}

then {
assertAll(
{ assert process.success },
{ assert process.out.output.size() == 1 },
{ assert process.out.output[0][0] == [ id:'test_model' ] },
{ assert file(process.out.output[0][1]).name == "gemma-3-1b-it-Q4_K_M.gguf" },
{ assert file(process.out.output[0][1]).size() > 0 },
{ assert snapshot(process.out.findAll { key, val -> key.startsWith('versions') }).match() }
)
}
}

test("stub - download gguf file") {

options "-stub"

when {
process {
"""
input[0] = [
[ id:'test_model' ],
"ggml-org/gemma-3-1b-it-GGUF",
"gemma-3-1b-it-Q4_K_M.gguf",
"./hf_cache"
]
"""
}
}

then {
assertAll(
{ assert process.success },
{ assert process.out.output.size() == 1 },
{ assert process.out.output[0][0] == [ id:'test_model' ] },
{ assert file(process.out.output[0][1]).name == "gemma-3-1b-it-Q4_K_M.gguf" },
{ assert snapshot(process.out.findAll { key, val -> key.startsWith('versions') }).match() }
)
}
}
}
38 changes: 38 additions & 0 deletions modules/nf-core/huggingface/download/tests/main.nf.test.snap
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
{
"stub - download gguf file": {
"content": [
{
"versions_huggingface_hub": [
[
"HF_DOWNLOAD",
"huggingface_hub",
"1.6.0"
]
]
}
],
"timestamp": "2026-03-26T08:39:57.919278809",
"meta": {
"nf-test": "0.9.5",
"nextflow": "25.10.4"
}
},
"download gguf file": {
"content": [
{
"versions_huggingface_hub": [
[
"HF_DOWNLOAD",
"huggingface_hub",
"1.6.0"
]
]
}
],
"timestamp": "2026-03-26T08:38:24.630341776",
"meta": {
"nf-test": "0.9.5",
"nextflow": "25.10.4"
}
}
}
7 changes: 7 additions & 0 deletions modules/nf-core/huggingface/download/tests/nextflow.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
nextflow.enable.moduleBinaries = true

process {
withName: 'HF_DOWNLOAD' {
containerOptions = { workflow.profile.contains('docker') ? "--volume ${projectDir}/hf_cache:${projectDir}/hf_cache" : '' }
}
}
5 changes: 5 additions & 0 deletions modules/nf-core/llamacpp-python/run/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM nvidia/cuda:12.4.1-devel-ubuntu22.04

RUN apt-get update && apt-get install -y python3 python3-pip
RUN pip3 install llama-cpp-python \
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124
7 changes: 7 additions & 0 deletions modules/nf-core/llamacpp-python/run/environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json
channels:
- conda-forge
- bioconda
dependencies:
- conda-forge::llama-cpp-python=0.3.16
36 changes: 36 additions & 0 deletions modules/nf-core/llamacpp-python/run/main.nf
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
process LLAMACPP_PYTHON_RUN {
tag "$meta.id"
label 'process_medium'
label 'process_gpu'

conda "${moduleDir}/environment.yml"
container "${ task.ext.use_gpu ? 'quay.io/nf-core/llama-cpp-python:0.1.9' : 'community.wave.seqera.io/library/llama-cpp-python:0.3.16--b351398cd0ea7fc5' }"

input:
tuple val(meta), path(prompt_file), path(gguf_model)

output:
tuple val(meta), path("output.txt"), emit: output
tuple val("${task.process}"), val("llama-cpp-python"), eval("python3 -c 'import llama_cpp; print(llama_cpp.__version__)'"), topic: versions, emit: versions_llama_cpp_python

when:
task.ext.when == null || task.ext.when

script:
def args = task.ext.args ?: ''
prefix = task.ext.prefix ?: "${meta.id}"
"""
llamacpp-python.py \
--model ${gguf_model} \
--messages ${prompt_file} \
--output output.txt \
${args}
"""

stub:
prefix = task.ext.prefix ?: "${meta.id}"
"""
touch output.txt

"""
}
69 changes: 69 additions & 0 deletions modules/nf-core/llamacpp-python/run/meta.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
name: llamacpp_python_run
description: Python wrapper for running locally-hosted LLM with llama.cpp
keywords:
- llm
- llama
- ai
tools:
- llama-cpp-python:
description: "Python wrapper for llama.cpp LLM inference tool"
homepage: "https://llama-cpp-python.readthedocs.io/en/latest/"
licence:
- "MIT"
identifier: ""
input:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`- prompt_file:
- prompt_file:
type: file
description: |
Prompt file
Structure: [ val(meta), path(prompt_file) ]
ontologies: []
- gguf_model:
type: file
description: |
GGUF model
Structure: [ val(meta), path(gguf_model) ]
ontologies: []
output:
output:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`
- "output.txt":
type: file
description: File with the output of LLM inference request
ontologies: []
versions_llama_cpp_python:
- - ${task.process}:
type: string
description: The name of the process
- llama-cpp-python:
type: string
description: The name of the tool
- python3 -c 'import llama_cpp; print(llama_cpp.__version__)':
type: eval
description: The expression to obtain the version of the tool
topics:
versions:
- - ${task.process}:
type: string
description: The name of the process
- llama-cpp-python:
type: string
description: The name of the tool
- python3 -c 'import llama_cpp; print(llama_cpp.__version__)':
type: eval
description: The expression to obtain the version of the tool
authors:
- "@toniher"
- "@lucacozzuto"
maintainers:
- "@toniher"
- "@lucacozzuto"
Loading
Loading