Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion modules/nf-core/limma/differential/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ process LIMMA_DIFFERENTIAL {
tuple val(meta), path("*.limma.model.txt") , emit: model
tuple val(meta), path("*.R_sessionInfo.log") , emit: session_info
tuple val(meta), path("*.normalised_counts.tsv") , emit: normalised_counts, optional: true
path "versions.yml" , emit: versions
path "versions.yml" , emit: versions_limma, topic: versions

when:
task.ext.when == null || task.ext.when
Expand Down
12 changes: 12 additions & 0 deletions modules/nf-core/limma/differential/meta.yml
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,14 @@ output:
pattern: "*.normalised_counts.tsv"
ontologies:
- edam: http://edamontology.org/format_3475 # TSV
versions_limma:
- versions.yml:
type: file
description: File containing software versions
pattern: "versions.yml"
ontologies:
- edam: http://edamontology.org/format_3750 # YAML
topics:
versions:
- versions.yml:
type: file
Expand All @@ -155,3 +163,7 @@ authors:
- "@pinin4fjords"
maintainers:
- "@pinin4fjords"
versions:
- versions.yml:
type: file
description: YAML file containing versions of tools used in the module
105 changes: 97 additions & 8 deletions modules/nf-core/limma/differential/templates/limma_de.R
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,80 @@ nullify <- function(x) {
if (is.character(x) && (tolower(x) == "null" || x == "")) NULL else x
}

#' Check for Non-Empty, Non-Whitespace String
#'
#' This function checks if the input is non-NULL and contains more than just whitespace.
#' It returns TRUE if the input is a non-empty, non-whitespace string, and FALSE otherwise.
#'
#' @param input A variable to check.
#' @return A logical value: TRUE if the input is a valid, non-empty, non-whitespace string; FALSE otherwise.
#' @examples
#' is_valid_string("Hello World") # Returns TRUE
#' is_valid_string(" ") # Returns FALSE
#' is_valid_string(NULL) # Returns FALSE
is_valid_string <- function(input) {
!is.null(input) && nzchar(trimws(input))
}

#' Rewrite a contrast expression using sanitised design column names
#'
#' `makeContrasts()` requires syntactically valid coefficient names. The DREAM
#' design matrix is sanitised with `make.names()`, so a user may provide
#' contrasts using the original names (for example containing spaces). This
#' helper rewrites exact design-column matches to their sanitised equivalents.
#'
#' @param contrast_string User-provided contrast expression.
#' @param design_names Original design matrix column names.
#' original_design_names
#' @return Contrast expression compatible with `makeContrasts()`.
normalise_contrast_string <- function(contrast_string, design_names) {
if (!is_valid_string(contrast_string)) {
return(contrast_string)
}

sanitised_names <- make.names(design_names)
replacement_order <- order(nchar(design_names), decreasing = TRUE)
normalised <- contrast_string

for (idx in replacement_order) {
normalised <- gsub(
design_names[[idx]],
sanitised_names[[idx]],
normalised,
fixed = TRUE
)
}

normalised
}

#' Resolve user-supplied metadata column names against loaded metadata
#'
#' Metadata is read with `check.names = TRUE`, so columns containing spaces are
#' sanitised by R. This helper accepts either the original column name or the
#' sanitised version and returns the column present in `metadata`.
#'
#' @param column_name Column name provided by the user.
#' @param metadata Loaded metadata data frame.
#'
#' @return Resolved column name present in metadata.
resolve_metadata_column <- function(column_name, metadata) {
if (!is_valid_string(column_name)) {
return(column_name)
}

if (column_name %in% colnames(metadata)) {
return(column_name)
}

sanitised_name <- make.names(column_name)
if (sanitised_name %in% colnames(metadata)) {
return(sanitised_name)
}

column_name
}

################################################
################################################
## PARSE PARAMETERS FROM NEXTFLOW ##
Expand Down Expand Up @@ -135,7 +209,11 @@ if ( ! is.null(opt\$seed)){
}

# If there is no option supplied, convert string "null" to NULL
keys <- c("formula", "contrast_string", "contrast_variable", "reference_level", "target_level")
keys <- c(
"formula", "contrast_string", "contrast_variable", "reference_level",
"target_level", "blocking_variables", "exclude_samples_col",
"exclude_samples_values", "block"
)
opt[keys] <- lapply(opt[keys], nullify)

# Check if required parameters have been provided
Expand Down Expand Up @@ -225,10 +303,13 @@ if (length(missing_samples) > 0) {
################################################
################################################

contrast_variable <- make.names(opt\$contrast_variable)
contrast_variable <- NULL
blocking.vars <- c()
original_design_names <- NULL

if (!is.null(opt\$contrast_variable)) {
contrast_variable <- resolve_metadata_column(opt\$contrast_variable, sample.sheet)

if (!contrast_variable %in% colnames(sample.sheet)) {
stop(
paste0(
Expand All @@ -245,8 +326,13 @@ if (!is.null(opt\$contrast_variable)) {
'column of the sample sheet'
)
)
} else if (!is.null(opt\$blocking_variables)) {
blocking.vars = make.names(unlist(strsplit(opt\$blocking_variables, split = ';')))
} else if (is_valid_string(opt\$blocking_variables)) {
blocking.vars = vapply(
unlist(strsplit(opt\$blocking_variables, split = ';')),
resolve_metadata_column,
character(1),
metadata = sample.sheet
)
if (!all(blocking.vars %in% colnames(sample.sheet))) {
missing_block <- paste(blocking.vars[! blocking.vars %in% colnames(sample.sheet)], collapse = ',')
stop(
Expand All @@ -259,7 +345,7 @@ if (!is.null(opt\$contrast_variable)) {
}

# Handle conflicts between blocking variables and block
if (!is.null(opt\$block) && !is.null(opt\$blocking_variables)) {
if (is_valid_string(opt\$block) && is_valid_string(opt\$blocking_variables)) {
if (opt\$block %in% blocking.vars) {
warning(paste("Variable", opt\$block, "is specified both as a fixed effect and a random effect. It will be treated as a random effect only."))
blocking.vars <- setdiff(blocking.vars, opt\$block)
Expand Down Expand Up @@ -304,11 +390,12 @@ if ((! is.null(opt\$exclude_samples_col)) && (! is.null(opt\$exclude_samples_val
################################################
################################################

if (!is.null(opt\$formula)) {
if (is_valid_string(opt\$formula)) {
model <- opt\$formula
model_formula <- as.formula(model)
cat("Using user-specified formula:\n ", deparse(model_formula), "\n")
design <- model.matrix(model_formula, data = sample.sheet)
original_design_names <- colnames(design)
colnames(design) <- make.names(colnames(design))
cat("Column names after make.names():\n ", paste(colnames(design), collapse = ", "), "\n")

Expand Down Expand Up @@ -338,6 +425,7 @@ if (!is.null(opt\$formula)) {
as.formula(model),
data=sample.sheet
)
original_design_names <- colnames(design)

# Adjust column names for the contrast variable
colnames(design) <- sub(
Expand Down Expand Up @@ -420,9 +508,10 @@ fit <- do.call(lmFit, lmfit_args)
# Contrasts bit

# Create the contrast string for the specified comparison
if (!is.null(opt\$contrast_string)) {
if (is_valid_string(opt\$contrast_string)) {
cat("Using contrast string:", opt\$contrast_string, "\n")
contrast_string <- as.character(opt\$contrast_string)
contrast_string <- normalise_contrast_string(as.character(opt\$contrast_string), original_design_names)
cat("Normalised contrast string:", contrast_string, "\n")
contrast.matrix <- makeContrasts(contrasts=contrast_string, levels=colnames(design))

} else {
Expand Down
20 changes: 10 additions & 10 deletions modules/nf-core/limma/differential/tests/main.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() }
{ assert snapshot(process.out.model, process.out.versions_limma).match() }
)
}
}
Expand Down Expand Up @@ -97,7 +97,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() },
{ assert snapshot(process.out.model, process.out.versions_limma).match() },
{ assert path(process.out.session_info[0][1]).getText().contains("limma_3.58.1") },
{ assert path(process.out.results[0][1]).getText().contains("1007_s_at\t-0.2775254") },
{ assert path(process.out.results[0][1]).getText().contains("1053_at\t-0.071547786") }
Expand Down Expand Up @@ -159,7 +159,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() },
{ assert snapshot(process.out.model, process.out.versions_limma).match() },
{ assert path(process.out.session_info[0][1]).getText().contains("limma_3.58.1") },
{ assert path(process.out.results[0][1]).getText().contains("1007_s_at\t-0.27752") },
{ assert path(process.out.results[0][1]).getText().contains("1053_at\t-0.0715477") }
Expand Down Expand Up @@ -221,7 +221,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() },
{ assert snapshot(process.out.model, process.out.versions_limma).match() },
{ assert path(process.out.session_info[0][1]).getText().contains("limma_3.58.1") },
{ assert path(process.out.results[0][1]).getText().contains("1007_s_at\t-0.4778282") },
{ assert path(process.out.results[0][1]).getText().contains("1053_at\t-0.08958278") }
Expand Down Expand Up @@ -284,7 +284,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() },
{ assert snapshot(process.out.model, process.out.versions_limma).match() },
{ assert path(process.out.session_info[0][1]).getText().contains("limma_3.58.1") },
{ assert path(process.out.results[0][1]).getText().contains("1007_s_at\t-0.2775254") },
{ assert path(process.out.results[0][1]).getText().contains("1053_at\t-0.071547786") }
Expand Down Expand Up @@ -312,7 +312,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() },
{ assert snapshot(process.out.model, process.out.versions_limma).match() },
{ assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000023978\t-4.89014922224241") },
{ assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.77922") },
{ assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000023978\t6.11247620232167") },
Expand Down Expand Up @@ -341,7 +341,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() },
{ assert snapshot(process.out.model, process.out.versions_limma).match() },
{ assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000023978\t-2.84055986312942") },
{ assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.7055") },
{ assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000023978\t6.11247620232167") },
Expand Down Expand Up @@ -370,7 +370,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.model, process.out.versions).match() },
{ assert snapshot(process.out.model, process.out.versions_limma).match() },
{ assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000023978\t-2.8363444336503") },
{ assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.7360") },
{ assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000023978\t6.11247620232167") },
Expand Down Expand Up @@ -399,7 +399,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot(process.out.results, process.out.model, process.out.normalised_counts, process.out.versions).match() }
{ assert snapshot(process.out.results, process.out.model, process.out.normalised_counts, process.out.versions_limma).match() }
)
}

Expand All @@ -423,7 +423,7 @@ nextflow_process {
then {
assertAll(
{ assert process.success },
{ assert snapshot([process.out.results, process.out.model, process.out.normalised_counts, process.out.versions]).match() } )
{ assert snapshot([process.out.results, process.out.model, process.out.normalised_counts, process.out.versions_limma]).match() } )
}
}
}
4 changes: 0 additions & 4 deletions subworkflows/nf-core/abundance_differential_filter/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,11 @@ workflow ABUNDANCE_DIFFERENTIAL_FILTER {
norm_inputs.samples_and_matrix.filter{index -> index[0].differential_method == 'limma'}
)

ch_versions = ch_versions.mix(LIMMA_NORM.out.versions.first())

LIMMA_DIFFERENTIAL(
inputs.contrasts_for_diff_with_formula.filter{index -> index[0].differential_method == 'limma' },
inputs.samples_and_matrix.filter{index -> index[0].differential_method == 'limma' }
)

ch_versions = ch_versions.mix(LIMMA_DIFFERENTIAL.out.versions.first())

// ----------------------------------------------------
// Run DESeq2
// ----------------------------------------------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,15 +52,14 @@
"diagnosis_normal_uremia_test_limma.R_sessionInfo.log"
],
[
"versions.yml:md5,1c02d4e455e8f3809c8ce37bee947690",
"versions.yml:md5,1ddaab440e2528c688c05a02dd066f12"
"versions.yml:md5,1c02d4e455e8f3809c8ce37bee947690"
]
],
"meta": {
"nf-test": "0.9.2",
"nextflow": "25.10.4"
"nf-test": "0.9.3",
"nextflow": "25.10.3"
},
"timestamp": "2026-03-24T15:43:26.461974792"
"timestamp": "2026-03-27T16:48:42.788489896"
},
"deseq2 and limma - mouse - basic": {
"content": [
Expand Down Expand Up @@ -264,16 +263,14 @@
[
"versions.yml:md5,1a6a400c49aa4dda7ec5c4ed0cc56340",
"versions.yml:md5,1c02d4e455e8f3809c8ce37bee947690",
"versions.yml:md5,1ddaab440e2528c688c05a02dd066f12",
"versions.yml:md5,2c0576aefff8da32c7c0cfd8529aa4b5",
"versions.yml:md5,b80e2c320ea0429466a7b7c3c3ac78fa"
"versions.yml:md5,2c0576aefff8da32c7c0cfd8529aa4b5"
]
],
"meta": {
"nf-test": "0.9.2",
"nextflow": "25.10.4"
"nf-test": "0.9.3",
"nextflow": "25.10.3"
},
"timestamp": "2026-03-24T15:47:16.379223242"
"timestamp": "2026-03-27T16:50:55.990696876"
},
"limma - voom": {
"content": [
Expand Down Expand Up @@ -352,16 +349,14 @@
"treatment_mCherry_hND6_sample_number_test_limma_voom.R_sessionInfo.log"
],
[
"versions.yml:md5,1c02d4e455e8f3809c8ce37bee947690",
"versions.yml:md5,1ddaab440e2528c688c05a02dd066f12",
"versions.yml:md5,b80e2c320ea0429466a7b7c3c3ac78fa"
"versions.yml:md5,1c02d4e455e8f3809c8ce37bee947690"
]
],
"meta": {
"nf-test": "0.9.2",
"nextflow": "25.10.4"
"nf-test": "0.9.3",
"nextflow": "25.10.3"
},
"timestamp": "2026-03-24T15:45:01.909392414"
"timestamp": "2026-03-27T16:49:04.43670094"
},
"dream": {
"content": [
Expand Down Expand Up @@ -674,17 +669,15 @@
[
"versions.yml:md5,1a6a400c49aa4dda7ec5c4ed0cc56340",
"versions.yml:md5,1c02d4e455e8f3809c8ce37bee947690",
"versions.yml:md5,1ddaab440e2528c688c05a02dd066f12",
"versions.yml:md5,2c0576aefff8da32c7c0cfd8529aa4b5",
"versions.yml:md5,b80e2c320ea0429466a7b7c3c3ac78fa",
"versions.yml:md5,ff5b7c1d83470f6f548f3643bb37a830"
]
],
"meta": {
"nf-test": "0.9.2",
"nextflow": "25.10.4"
"nf-test": "0.9.3",
"nextflow": "25.10.3"
},
"timestamp": "2026-03-24T15:48:06.008809772"
"timestamp": "2026-03-27T16:51:32.308588161"
},
"stub": {
"content": [
Expand Down
Loading