forked from graphcode2vec/graphcode2vec
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreNode_training.sh
More file actions
109 lines (102 loc) · 2.27 KB
/
preNode_training.sh
File metadata and controls
109 lines (102 loc) · 2.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/bin/bash
device=$1
i=node_class_pretraining
for gnn_type in gat gin gcn graphsage
do
#gnn_type=gat
gp=attention
output=${i}_${gp}/${gnn_type}
sw=lstm
jk=sum
lstm_emb_dim=150
mkdir -p $output
python node_vgage_pretrain.py --batch_size 500 --num_workers 5 --epochs 10 --num_layer 5 \
--subword_embedding $sw \
--lstm_emb_dim $lstm_emb_dim \
--graph_pooling $gp \
--JK $jk \
--saved_model_path ${output} \
--log_file ${output}/log.txt \
--gnn_type $gnn_type \
--sub_token_path ./tokens/jars \
--emb_file emb_100.txt \
--dataset DV_PDG \
--task node_class \
--device $device
done
#--check_all \
i=node_class_pretraining
for gnn_type in gat gin gcn graphsage
do
#gnn_type=gat
sw=lstm
gp=mean
output=${i}_${gp}/${gnn_type}
jk=sum
lstm_emb_dim=150
mkdir -p $output
python node_vgage_pretrain.py --batch_size 500 --num_workers 5 --epochs 10 --num_layer 5 \
--subword_embedding $sw \
--lstm_emb_dim $lstm_emb_dim \
--graph_pooling $gp \
--JK $jk \
--saved_model_path ${output} \
--log_file ${output}/log.txt \
--gnn_type $gnn_type \
--sub_token_path ./tokens/jars \
--emb_file emb_100.txt \
--dataset DV_PDG \
--task node_class \
--device $device
done
i=node_class_pretraining_all
for gnn_type in gat gin gcn graphsage
do
#gnn_type=gat
gp=attention
output=${i}_${gp}/${gnn_type}
sw=lstm
jk=sum
lstm_emb_dim=150
mkdir -p $output
python node_vgage_pretrain.py --batch_size 500 --num_workers 5 --epochs 10 --num_layer 5 \
--subword_embedding $sw \
--lstm_emb_dim $lstm_emb_dim \
--graph_pooling $gp \
--JK $jk \
--saved_model_path ${output} \
--log_file ${output}/log.txt \
--gnn_type $gnn_type \
--sub_token_path ./tokens/jars \
--emb_file emb_100.txt \
--dataset DV_PDG \
--task node_class \
--device $device \
--check_all
done
#--check_all \
i=node_class_pretraining_all
for gnn_type in gat gin gcn graphsage
do
#gnn_type=gat
sw=lstm
gp=mean
output=${i}_${gp}/${gnn_type}
jk=sum
lstm_emb_dim=150
mkdir -p $output
python node_vgage_pretrain.py --batch_size 500 --num_workers 5 --epochs 10 --num_layer 5 \
--subword_embedding $sw \
--lstm_emb_dim $lstm_emb_dim \
--graph_pooling $gp \
--JK $jk \
--saved_model_path ${output} \
--log_file ${output}/log.txt \
--gnn_type $gnn_type \
--sub_token_path ./tokens/jars \
--emb_file emb_100.txt \
--dataset DV_PDG \
--task node_class \
--device $device \
--check_all
done