-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathjob_script.sh
More file actions
27 lines (20 loc) · 964 Bytes
/
job_script.sh
File metadata and controls
27 lines (20 loc) · 964 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#!/bin/bash
#Submit this script with: sbatch thefilename
#For more details about each parameter, please check SLURM sbatch documentation https://slurm.schedmd.com/sbatch.html
#SBATCH --time=7:00:00 # walltime
#SBATCH --ntasks=1 # number of tasks
#SBATCH --cpus-per-task=24 # number of CPUs Per Task i.e if your code is multi-threaded
#SBATCH --nodes=1 # number of nodes
#SBATCH --gres=gpu:a100:1
#SBATCH --mem=48G # memory per node
#SBATCH -J "go_flow_llm_one_node" # job name
#SBATCH -o "out_gfllm_one_node" # job output file
#SBATCH -e "err_gfllm_one_node" # job error file
#SBATCH --mail-user=agreen@ebi.ac.uk # email address
#SBATCH --mail-type=END
# LOAD MODULES, INSERT CODE, AND RUN YOUR PROGRAMS HERE
source ~/.pyenv_setup
export HF_HUB_ENABLE_HF_TRANSFER=1
pyenv activate mirna-curator
#python src/mirna_curator/main.py --config configs/curation_config_QwQ.json
python src/mirna_curator/main.py --config configs/one_node_QwQ.json