-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patheval_LLaDA.sh
More file actions
81 lines (66 loc) · 2.23 KB
/
eval_LLaDA.sh
File metadata and controls
81 lines (66 loc) · 2.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
export CUDA_VISIBLE_DEVICES=2
BASE_OUTPUT_PATH="./results/llada"
MODEL_PATH="your model path"
length=512
steps=512
block_length=32
task="humaneval"
OUTPUT_PATH="${BASE_OUTPUT_PATH}/${task}_${length}"
accelerate launch evaluation_script.py \
-m dllm_eval \
--model LLaDA \
--tasks ${task} \
--batch_size 1 \
--model_args "pretrained=${MODEL_PATH},assistant_prefix=<reasoning> " \
--gen_kwargs "block_length=${block_length},gen_length=${length},steps=${steps},temperature=0" \
--num_fewshot 0 \
--output_path "${OUTPUT_PATH}" \
--log_samples \
--confirm_run_unsafe_code
python metrics/humaneval.py \
--res_path "${OUTPUT_PATH}"
task="gsm8k"
OUTPUT_PATH="${BASE_OUTPUT_PATH}/${task}_${length}"
accelerate launch evaluation_script.py \
-m dllm_eval \
--model LLaDA \
--tasks ${task} \
--batch_size 1 \
--model_args "pretrained=${MODEL_PATH},assistant_prefix=<reasoning> " \
--gen_kwargs "block_length=${block_length},gen_length=${length},steps=${steps},temperature=0" \
--num_fewshot 0 \
--output_path "${OUTPUT_PATH}" \
--log_samples \
--confirm_run_unsafe_code
python metrics/gsm8k.py \
--res_path "${OUTPUT_PATH}"
task="mbpp"
OUTPUT_PATH="${BASE_OUTPUT_PATH}/${task}_${length}"
accelerate launch evaluation_script.py \
-m dllm_eval \
--model LLaDA \
--tasks ${task} \
--batch_size 1 \
--model_args "pretrained=${MODEL_PATH},assistant_prefix=<reasoning> " \
--gen_kwargs "block_length=${block_length},gen_length=${length},steps=${steps},temperature=0" \
--num_fewshot 0 \
--output_path "${OUTPUT_PATH}" \
--log_samples \
--confirm_run_unsafe_code
python metrics/mbpp.py \
--res_path "${OUTPUT_PATH}"
task="math500"
OUTPUT_PATH="${BASE_OUTPUT_PATH}/${task}_${length}"
accelerate launch evaluation_script.py \
-m dllm_eval \
--model LLaDA \
--tasks ${task} \
--batch_size 1 \
--model_args "pretrained=${MODEL_PATH},assistant_prefix=<reasoning> " \
--gen_kwargs "block_length=${block_length},gen_length=${length},steps=${steps},temperature=0" \
--num_fewshot 0 \
--output_path "${OUTPUT_PATH}" \
--log_samples \
--confirm_run_unsafe_code
python metrics/math500.py \
--res_path "${OUTPUT_PATH}"