-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathword2vec.c
More file actions
1081 lines (1029 loc) · 43 KB
/
word2vec.c
File metadata and controls
1081 lines (1029 loc) · 43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ---------------------------------------------------------------------
//
// Word2vec C code commented by Chandler May. Run
//
// git diff original
//
// to see the complete set of changes to the original code.
//
// Preliminaries:
// * There are a lot of global variables. They are important.
// * a, b, and c are used for loop counters and lots of other things;
// it's not uncommon for one of them to be set to one thing and then
// re-initialized and used for something completely different.
// * Random number generation is through a custom linear congruential
// generator. This in particular facilitates decoupled random number
// generation across training threads.
//
// Notation:
// * Backticks are used to denote variables (`foo`)
// * The skip-gram model learns co-occurrence information between a
// word and a fixed-length window of context words on either side of
// it. The embedding of the central word is called the "input"
// representation and the embeddings of the left and right context
// words are "output" representations. This distinction (and
// terminology) are quite important for understanding the code so here
// is an example, assumping a context window of two words on either
// side:
//
// The quick brown fox jumped over the lazy dog.
// ^ ^ ^ ^ ^
// | | | | |
// output output input output output
//
// When looking at the input word "jumped" in this sentence, the
// skip-gram with negative sampling (SGNS) model learns that it
// co-occurs with "brown" and "fox" and "over" and "the" and does not
// co-occur with a selection of negative-sample words (perhaps
// "apple", "of", and "slithered").
//
// Partial call graph:
//
// main
// |
// L- TrainModel
// |
// |- ReadVocab
// | |- ReadWord
// | |- AddWordToVocab
// | L- SortVocab
// |
// |- LearnVocabFromTrainFile
// | |- ReadWord
// | |- AddWordToVocab
// | |- SearchVocab
// | |- ReduceVocab
// | L- SortVocab
// |
// |- SaveVocab
// |- InitNet
// |- InitUnigramTable
// |
// L- TrainModelThread
// L- ReadWordIndex
// |- ReadWord
// L- SearchVocab
//
// ---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <pthread.h>
// max length of filenames, vocabulary words (including null terminator)
#define MAX_STRING 100
// size of pre-computed e^x / (e^x + 1) table
#define EXP_TABLE_SIZE 1000
// max exponent x for which to pre-compute e^x / (e^x + 1)
#define MAX_EXP 6
// max length (in words) of a sequence of overlapping word contexts or
// "sentences" (if there is a sequence of words longer than this not
// explicitly broken up into multiple "sentences" in the training data,
// it will be broken up into multiple "sentences" of length no longer
// than `MAX_SENTENCE_LENGTH` each during training)
#define MAX_SENTENCE_LENGTH 1000
// max length of Huffman codes used by hierarchical softmax
#define MAX_CODE_LENGTH 40
// Maximum 30 * 0.7 = 21M words in the vocabulary
// (where 0.7 is the magical load factor beyond which hash table
// performance degrades)
const int vocab_hash_size = 30000000;
// Negative sampling distribution represented by 1e8-element discrete
// sample from smoothed empirical unigram distribution.
const int table_size = 1e8;
// Set precision of real numbers
typedef float real;
// Representation of a word in the vocabulary, including (optional,
// for hierarchical softmax only) Huffman coding
struct vocab_word {
long long cn;
int *point;
char *word, *code, codelen;
};
struct vocab_word *vocab; // vocabulary
char
train_file[MAX_STRING], // training data (text) input file
output_file[MAX_STRING], // word vector (or word vector cluster)
// (binary/text) output file
save_vocab_file[MAX_STRING], // vocabulary (text) output file
read_vocab_file[MAX_STRING]; // vocabulary (text) input file
int
binary = 0, // 0 for text output, 1 for binary
cbow = 1, // 0 for skip-gram, 1 for CBOW
debug_mode = 2, // 1 for extra terminal output, 2 for
// extra extra terminal output
window = 5, // context window radius (use `window`
// output words on left and `window`
// output words on right as context
// of central input word)
min_count = 5, // min count of words in vocabulary when
// pruning to mitigate sparsity
num_threads = 12, // number of threads to use for training
min_reduce = 1, // initial min count of words to keep in
// vocabulary if pruning for space
// (will be incremented as necessary)
// (do not change)
hs = 0, // 1 for hierarchical softmax
negative = 5; // number of negative samples to draw
// per word
int *vocab_hash, // hash table of words to positions in
// vocabulary
*table; // discrete sample of words used as
// negative sampling distribution
long long
vocab_max_size = 1000, // capacity of vocabulary
// (will be incremented as necessary)
vocab_size = 0, // number of words in vocabulary
// (do not change)
layer1_size = 100, // size of embeddings
train_words = 0, // number of word tokens in training data
// (do not change)
word_count_actual = 0, // number of word tokens seen so far
// during training, over all
// iterations; updated infrequently
// (used for terminal output and
// learning rate)
// (do not change)
iter = 5, // number of passes to take through
// training data
file_size = 0, // size (in bytes) of training data file
classes = 0; // number of k-means clusters to learn
// of word vectors and write to output
// file (0 to write word vectors to
// output file, no clustering)
real
alpha = 0.025, // linear-decay learning rate
starting_alpha, // initial learning rate
// (do not change; initialized at
// beginning of training)
sample = 1e-3; // word subsampling threshold
real
*syn0, // input word embeddings
*syn1, // (used by hierarchical softmax)
*syn1neg, // output word embeddings
*expTable; // precomputed table of
// e^x / (e^x + 1) for x in
// [-MAX_EXP, MAX_EXP)
clock_t start; // start time of training algorithm
// Allocate and populate negative-sampling data structure `table`, an
// array of `table_size` words distributed approximately according to
// the empirical unigram distribution (smoothed by raising all
// probabilities to the power of 0.75 and re-normalizing), from `vocab`,
// an array of `vocab_size` words represented as `vocab_word` structs.
void InitUnigramTable() {
int a, i;
double train_words_pow = 0;
double d1, power = 0.75;
// allocate memory
table = (int *)malloc(table_size * sizeof(int));
// compute normalizer, `train_words_pow`
for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
// initialize vocab position `i` and cumulative probability mass `d1`
i = 0;
d1 = pow(vocab[i].cn, power) / train_words_pow;
for (a = 0; a < table_size; a++) {
table[a] = i;
if (a / (double)table_size > d1) {
i++;
d1 += pow(vocab[i].cn, power) / train_words_pow;
}
if (i >= vocab_size) i = vocab_size - 1;
}
}
// Read a single word from file `fin` into length `MAX_STRING` array
// `word`, terminating with a null byte, treating space, tab, and
// newline as word boundaries. Ignore carriage returns. If the first
// character read (excluding carriage returns) is a newline, set `word`
// to "</s>" and return. If a newline is encountered after reading one
// or more non-boundary characters, put that newline back into the
// stream and leave word as the non-boundary characters up to that point
// (so that the next time this function is called the newline will be
// read and `word` will be set to "</s>"). If the number of
// non-boundary characters is equal to or greater than `MAX_STRING`,
// read and ignore the trailing characters. If we reach end of file,
// set `eof` to 1.
void ReadWord(char *word, FILE *fin, char *eof) {
int a = 0, ch;
while (1) {
ch = fgetc_unlocked(fin);
if (ch == EOF) {
*eof = 1;
break;
}
if (ch == '\r') continue; // skip carriage returns
if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
if (a > 0) {
if (ch == '\n') ungetc(ch, fin);
break;
}
if (ch == '\n') {
strcpy(word, (char *)"</s>");
return;
} else continue;
}
word[a] = ch;
a++;
if (a >= MAX_STRING - 1) a--; // Truncate too-long words
}
word[a] = 0;
}
// Return hash (integer between 0, inclusive, and `vocab_hash_size`,
// exclusive) of `word`
int GetWordHash(char *word) {
unsigned long long a, hash = 0;
for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
hash = hash % vocab_hash_size;
return hash;
}
// Return position of `word` in vocabulary `vocab` using `vocab_hash`,
// a linear-probing hash table; if the word is not found, return -1.
int SearchVocab(char *word) {
// compute initial hash
unsigned int hash = GetWordHash(word);
while (1) {
// return -1 if cell is empty
if (vocab_hash[hash] == -1) return -1;
// return position at current hash if word is a match
if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
// no match, increment hash
hash = (hash + 1) % vocab_hash_size;
}
return -1;
}
// Read a word from file `fin` and return its position in vocabulary
// `vocab`. If the next thing in the file is a newline, return 0 (the
// index of "</s>"). If the word is not in the vocabulary, return -1.
// If the end of file is reached, set `eof` to 1 and return -1. TODO:
// if the file is not newline-terminated, the last word will be
// swallowed (-1 will be returned because we have reached EOF, even if
// a word was read).
int ReadWordIndex(FILE *fin, char *eof) {
char word[MAX_STRING], eof_l = 0;
ReadWord(word, fin, &eof_l);
if (eof_l) {
*eof = 1;
return -1;
}
return SearchVocab(word);
}
// Add word `word` to first empty slot in vocabulary `vocab`, increment
// vocabulary length `vocab_size`, increase `vocab_max_size` by
// increments of 1000 (increasing `vocab` allocation) as needed, and set
// position of that slot in vocabulary hash table `vocab_hash`.
// Return index of `word` in `vocab`.
int AddWordToVocab(char *word) {
unsigned int hash, length = strlen(word) + 1;
if (length > MAX_STRING) length = MAX_STRING;
// add word to `vocab` and increment `vocab_size`
vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
// TODO: this may blow up if strlen(word) + 1 > MAX_STRING
strcpy(vocab[vocab_size].word, word);
vocab[vocab_size].cn = 0;
vocab_size++;
// Reallocate memory if needed
if (vocab_size + 2 >= vocab_max_size) {
vocab_max_size += 1000;
vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
}
// add word vocabulary position to `vocab_hash`
hash = GetWordHash(word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = vocab_size - 1;
return vocab_size - 1;
}
// Given pointers `a` and `b` to `vocab_word` structs, return 1 if the
// stored count `cn` of b is greater than that of a, -1 if less than,
// and 0 if equal. (Comparator for a reverse sort by word count.)
int VocabCompare(const void *a, const void *b) {
long long l = ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
if (l > 0) return 1;
if (l < 0) return -1;
return 0;
}
// Sort vocabulary `vocab` by word count, decreasing, while removing
// words that have count less than `min_count`; re-compute `vocab_hash`
// accordingly; shrink vocab memory allocation to minimal size.
void SortVocab() {
int a, size;
unsigned int hash;
// Sort the vocabulary but keep "</s>" at the first position
qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
// clear `vocab_hash` cells
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
// save initial `vocab_size` (we will decrease `vocab_size` as we
// prune infrequent words)
size = vocab_size;
// initialize total word count
train_words = 0;
for (a = 0; a < size; a++) {
if ((vocab[a].cn < min_count) && (a != 0)) {
// word is infrequent and not "</s>", discard it
vocab_size--;
free(vocab[a].word);
} else {
// word is frequent or "</s>", add to hash table
hash=GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
train_words += vocab[a].cn;
}
}
// shrink vocab memory allocation to minimal size
// TODO: to be safe we should probably update vocab_max_size which
// seems to be interpreted as the allocation size
vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word));
// Allocate memory for the binary tree construction
for (a = 0; a < vocab_size; a++) {
vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
}
}
// Reduce vocabulary `vocab` size by removing words with count equal to
// `min_reduce` or less, in order to make room in hash table (not for
// mitigating data sparsity). Increment `min_reduce` by one, so that
// this function can be called in a loop until there is enough space.
void ReduceVocab() {
int a, b = 0;
unsigned int hash;
for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
vocab[b].cn = vocab[a].cn;
vocab[b].word = vocab[a].word;
b++;
} else free(vocab[a].word);
vocab_size = b;
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
// recompute `vocab_hash` as we have removed some items and it may
// now be broken
for (a = 0; a < vocab_size; a++) {
hash = GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
}
fflush(stdout);
min_reduce++;
}
// Create binary Huffman tree from word counts in `vocab`, storing
// codes in `vocab`; frequent words will have short uniqe binary codes.
// Used by hierarchical softmax.
void CreateBinaryTree() {
long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
char code[MAX_CODE_LENGTH];
long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn;
for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15;
pos1 = vocab_size - 1;
pos2 = vocab_size;
// Following algorithm constructs the Huffman tree by adding one node at a time
for (a = 0; a < vocab_size - 1; a++) {
// First, find two smallest nodes 'min1, min2'
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min1i = pos1;
pos1--;
} else {
min1i = pos2;
pos2++;
}
} else {
min1i = pos2;
pos2++;
}
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min2i = pos1;
pos1--;
} else {
min2i = pos2;
pos2++;
}
} else {
min2i = pos2;
pos2++;
}
count[vocab_size + a] = count[min1i] + count[min2i];
parent_node[min1i] = vocab_size + a;
parent_node[min2i] = vocab_size + a;
binary[min2i] = 1;
}
// Now assign binary code to each vocabulary word
for (a = 0; a < vocab_size; a++) {
b = a;
i = 0;
while (1) {
code[i] = binary[b];
point[i] = b;
i++;
b = parent_node[b];
if (b == vocab_size * 2 - 2) break;
}
vocab[a].codelen = i;
vocab[a].point[0] = vocab_size - 2;
for (b = 0; b < i; b++) {
vocab[a].code[i - b - 1] = code[b];
vocab[a].point[i - b] = point[b] - vocab_size;
}
}
free(count);
free(binary);
free(parent_node);
}
// Compute vocabulary `vocab` and corresponding hash table `vocab_hash`
// from text in `train_file`. Insert </s> as vocab item 0. Prune vocab
// incrementally (as needed to keep number of items below effective hash
// table capacity). After reading file, sort vocabulary by word count,
// decreasing.
void LearnVocabFromTrainFile() {
char word[MAX_STRING], eof = 0;
FILE *fin;
long long a, i, wc = 0;
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
fin = fopen(train_file, "rb");
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
vocab_size = 0;
AddWordToVocab((char *)"</s>");
while (1) {
// TODO: if file is not newline-terminated, last word may be
// swallowed
ReadWord(word, fin, &eof);
if (eof) break;
train_words++;
wc++;
if ((debug_mode > 1) && (wc >= 1000000)) {
printf("%lldM%c", train_words / 1000000, 13);
fflush(stdout);
wc = 0;
}
i = SearchVocab(word);
if (i == -1) {
a = AddWordToVocab(word);
vocab[a].cn = 1;
} else vocab[i].cn++;
if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
}
SortVocab();
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
file_size = ftell(fin);
fclose(fin);
}
// Write vocabulary `vocab` to file `save_vocab_file`, one word per
// line, with each line containing a word, a space, the word count, and
// a newline.
void SaveVocab() {
long long i;
FILE *fo = fopen(save_vocab_file, "wb");
for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
fclose(fo);
}
// Read vocabulary from file `read_vocab_file` that has one word per
// line, where each line contains a word, a space, the word count, and a
// newline. Store vocabulary in `vocab` and update `vocab_hash`
// accordingly. After reading, sort vocabulary by word count,
// decreasing.
void ReadVocab() {
long long a, i = 0;
char c, eof = 0;
char word[MAX_STRING];
FILE *fin = fopen(read_vocab_file, "rb");
if (fin == NULL) {
printf("Vocabulary file not found\n");
exit(1);
}
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
vocab_size = 0;
while (1) {
// TODO: if file is not newline-terminated, last word may be
// swallowed
ReadWord(word, fin, &eof);
if (eof) break;
a = AddWordToVocab(word);
fscanf(fin, "%lld%c", &vocab[a].cn, &c);
i++;
}
SortVocab();
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
fin = fopen(train_file, "rb");
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
fseek(fin, 0, SEEK_END);
file_size = ftell(fin);
fclose(fin);
}
// Allocate memory for and initialize neural network parameters. Each
// array has size `vocab_size` x `layer1_size`.
//
// syn0: input word embeddings, initialized uniformly on
// [-0.5/`layer1_size`, 0.5/`layer1_size`)
// syn1: only used by hierarchical softmax; initialized to 0
// syn1neg: output word embeddings; initialized to zero
void InitNet() {
long long a, b;
unsigned long long next_random = 1;
a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
if (hs) {
a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1[a * layer1_size + b] = 0;
}
if (negative>0) {
a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1neg[a * layer1_size + b] = 0;
}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
next_random = next_random * (unsigned long long)25214903917 + 11;
syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
}
CreateBinaryTree();
}
// Given allocated and initialized vocabulary `vocab`, corresponding
// hash `vocab_hash`, and neural network parameters `syn0`, `syn1`, and
// `syn1neg`, train word2vec model on 1 / `num_threads` fraction of text
// in `train_file` (storing learned parameters in `syn0`, `syn1`, and
// `syn1neg`). When cast to long long, `id` should be an integer
// between 0 (inclusive) and `num_threads` (exclusive) representing
// which training thread this is.
//
// Note main loop is broken down into procedures for hierarchical
// softmax versus negative sampling and those for continuous BOW versus
// skip-gram; ensure you are looking in the right code block (for your
// purposes)! The added comments focus on the SGNS case.
void *TrainModelThread(void *id) {
long long
a, // loop counter among other things
b, // offset of dynamic window in max window
// (if 0, use all `window` output
// words on each side of input word;
// otherwise use `window - b` words
// on each side)
d, // loop counter among other things
cw, // (used by CBOW)
word, // index of output word in vocabulary
last_word, // index of input word in vocabulary
sentence_length = 0, // number of words read into the current
// sentence
sentence_position = 0, // position of current output word in
// sentence
word_count = 0, // number of words seen so far in this
// iteration
last_word_count = 0, // number of words seen so far in this
// iteration, as of the most recent
// update of terminal output and learning
// rate
l1, // input word row offset in syn0
l2, // output word row offset in syn1, syn1neg
c, // loop counter among other things
target, // index of output word or negatively-sampled
// word in vocabulary
label, // switch between output word (1) and
// negatively-sampled word (0)
local_iter = iter; // iterations over this thread's chunk of the
// data set left
long long
sen[MAX_SENTENCE_LENGTH + 1]; // index of word in vocabulary for
// each word in current sentence
unsigned long long
next_random = (long long)id; // thread-specific RNG state
char eof = 0; // 1 if end of file has been reached
real f, g; // work space (values of sub-expressions in
// gradient computation)
clock_t now; // current time during training
// allocate memory for gradients
real
*neu1 = (real *)calloc(layer1_size, sizeof(real)),
*neu1e = (real *)calloc(layer1_size, sizeof(real));
FILE *fi = fopen(train_file, "rb");
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
// iteratively read a sentence and train (update gradients) over it;
// read over all sentences in this thread's chunk of the training
// data `iter` times, then break
while (1) {
// every 10k words, update progress in terminal and update learning
// rate
if (word_count - last_word_count > 10000) {
word_count_actual += word_count - last_word_count;
last_word_count = word_count;
if ((debug_mode > 1)) {
now=clock();
printf("%cAlpha: %f Progress: %.2f%% Words/thread/sec: %.2fk ", 13, alpha,
word_count_actual / (real)(iter * train_words + 1) * 100,
word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000));
fflush(stdout);
}
// linear-decay learning rate (decreases from one toward zero
// linearly in number of words seen, but thresholded below
// at one ten-thousandth of initial learning rate)
// (each word in the data is to be seen `iter` times)
alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1));
if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001;
}
// if we have finished training on the most recently-read sentence
// (or we are just starting training), read a new sentence;
// truncate each sentence at `MAX_SENTENCE_LENGTH` words (sentences
// longer than that will be broken up into smaller sentences)
if (sentence_length == 0) {
// iteratively read word and add to sentence
while (1) {
word = ReadWordIndex(fi, &eof);
if (eof) break;
// skip OOV
if (word == -1) continue;
word_count++;
// if EOS, we're done reading this sentence
if (word == 0) break;
// The subsampling randomly discards frequent words while keeping the ranking same
if (sample > 0) {
// discard w.p. 1 - [ sqrt(t / p_{word}) + t / p_{word} ]
// (t is the subsampling threshold `sample`, p_{word} is the
// ML estimate of the probability of `word` in a unigram LM
// (normalized frequency)
// TODO: why is this not merely 1 - sqrt(t / p_{word}) as in
// the paper?
real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
next_random = next_random * (unsigned long long)25214903917 + 11;
if (ran < (next_random & 0xFFFF) / (real)65536) continue;
}
sen[sentence_length] = word;
sentence_length++;
// truncate long sentences
if (sentence_length >= MAX_SENTENCE_LENGTH) break;
}
// set output word position to first word in sentence
sentence_position = 0;
}
// if we are at the end of this iteration (sweep over the data),
// restart and decrement `local_iter`
if (eof || (word_count > train_words / num_threads)) {
word_count_actual += word_count - last_word_count;
local_iter--;
if (local_iter == 0) break;
word_count = 0;
last_word_count = 0;
// signal to read new sentence
sentence_length = 0;
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
continue;
}
// get index of output word
word = sen[sentence_position];
// skip OOV (TODO, checked OOV already when reading sentence?)
if (word == -1) continue;
// reset gradients to zero
for (c = 0; c < layer1_size; c++) neu1[c] = 0;
for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
// pick dynamic window offset (uniformly at random, between 0
// (inclusive) and max window size `window` (exclusive))
next_random = next_random * (unsigned long long)25214903917 + 11;
b = next_random % window;
// CBOW
if (cbow) {
// in -> hidden
cw = 0;
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
c = sentence_position - window + a;
if (c < 0) continue;
if (c >= sentence_length) continue;
last_word = sen[c];
if (last_word == -1) continue;
for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + last_word * layer1_size];
cw++;
}
if (cw) {
for (c = 0; c < layer1_size; c++) neu1[c] /= cw;
// CBOW HIERARCHICAL SOFTMAX
if (hs) for (d = 0; d < vocab[word].codelen; d++) {
f = 0;
l2 = vocab[word].point[d] * layer1_size;
// Propagate hidden -> output
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
// 'g' is the gradient multiplied by the learning rate
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
}
// CBOW NEGATIVE SAMPLING
if (negative > 0) for (d = 0; d < negative + 1; d++) {
if (d == 0) {
target = word;
label = 1;
} else {
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
l2 = target * layer1_size;
f = 0;
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
}
// hidden -> in
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
c = sentence_position - window + a;
if (c < 0) continue;
if (c >= sentence_length) continue;
last_word = sen[c];
if (last_word == -1) continue;
for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c];
}
}
// SKIP-GRAM
} else {
// loop over offsets within dynamic window
// (relative to max window size)
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
// compute position in sentence of input word
// (output word pos - max window size + rel offset)
c = sentence_position - window + a;
// skip if input word position OOB
// (note this is our main constraint on word position w.r.t. sentence
// bounds)
if (c < 0) continue;
if (c >= sentence_length) continue;
// compute input word index
last_word = sen[c];
// skip OOV (TODO checked already, should never fire)
if (last_word == -1) continue;
// compute input word row offset
l1 = last_word * layer1_size;
// initialize gradient for input word (work space)
for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
// SKIP-GRAM HIERARCHICAL SOFTMAX
if (hs) for (d = 0; d < vocab[word].codelen; d++) {
f = 0;
l2 = vocab[word].point[d] * layer1_size;
// Propagate hidden -> output
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
// 'g' is the gradient multiplied by the learning rate
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1];
}
// SKIP-GRAM NEGATIVE SAMPLING
if (negative > 0) for (d = 0; d < negative + 1; d++) {
if (d == 0) {
// fetch output word
target = word;
label = 1;
} else {
// fetch negative-sampled word
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
l2 = target * layer1_size; // output/neg-sample word row offset
// compute f = < v_{w_I}', v_{w_O} >
// (inner product for neg sample)
f = 0;
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg[c + l2];
// compute gradient coeff g = alpha * (label - 1 / (e^-f + 1))
// (alpha is learning rate, label is 1 for output and 0 for neg)
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
// contribute to gradient for input word
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
// perform gradient step for output/neg-sample word
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * syn0[c + l1];
}
// now that we've taken gradient step for output and all neg sample
// words, take gradient step for input word
for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
}
}
// update to next output word; if we are at end of sentence, signal
// to read new sentence
sentence_position++;
if (sentence_position >= sentence_length) {
sentence_length = 0;
continue;
}
}
// clean up
fclose(fi);
free(neu1);
free(neu1e);
pthread_exit(NULL);
}
// Train word embeddings on text in `train_file` using one or more
// threads, either learning vocabulary from that training data (in a
// separate pass over the data) or loading the vocabulary from a file
// `read_vocab_file`. Optionally save vocabulary to a file
// `save_vocab_file`; either save word embeddings to a file
// `output_file` or, if `classes` is greater than zero, run k-means
// clustering and save those clusters to `output_file`.
//
// If `output_file` is empty (first byte is null), do not train; this
// can be used to learn the vocabulary only from a training text file.
void TrainModel() {
long a, b, c, d; // loop counters among other things
FILE *fo; // output file
pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
printf("Starting training using file %s\n", train_file);
// initialize learning rate
starting_alpha = alpha;
// read vocab from file or learn from training data
if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
// save vocab to file
if (save_vocab_file[0] != 0) SaveVocab();
// if no `output_file` is specified, exit (do not train)
if (output_file[0] == 0) return;
// initialize network parameters
InitNet();
// initialize negative sampling distribution
if (negative > 0) InitUnigramTable();
start = clock();
for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a);
for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
fo = fopen(output_file, "wb");
if (classes == 0) {
// Save the word vectors
fprintf(fo, "%lld %lld\n", vocab_size, layer1_size);
for (a = 0; a < vocab_size; a++) {
fprintf(fo, "%s ", vocab[a].word);
if (binary) for (b = 0; b < layer1_size; b++) fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
fprintf(fo, "\n");
}
} else {
// Run K-means on the word vectors
int clcn = classes, iter = 10, closeid;
int *centcn = (int *)malloc(classes * sizeof(int));
int *cl = (int *)calloc(vocab_size, sizeof(int));
real closev, x;
real *cent = (real *)calloc(classes * layer1_size, sizeof(real));
for (a = 0; a < vocab_size; a++) cl[a] = a % clcn;
for (a = 0; a < iter; a++) {
for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0;
for (b = 0; b < clcn; b++) centcn[b] = 1;
for (c = 0; c < vocab_size; c++) {
for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
centcn[cl[c]]++;
}
for (b = 0; b < clcn; b++) {
closev = 0;
for (c = 0; c < layer1_size; c++) {
cent[layer1_size * b + c] /= centcn[b];
closev += cent[layer1_size * b + c] * cent[layer1_size * b + c];
}
closev = sqrt(closev);
for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev;
}
for (c = 0; c < vocab_size; c++) {
closev = -10;
closeid = 0;
for (d = 0; d < clcn; d++) {
x = 0;
for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c * layer1_size + b];
if (x > closev) {
closev = x;
closeid = d;
}
}
cl[c] = closeid;
}
}
// Save the K-means classes
for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]);
free(centcn);
free(cent);
free(cl);
}
fclose(fo);
}
int ArgPos(char *str, int argc, char **argv) {
int a;
for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
if (a == argc - 1) {
printf("Argument missing for %s\n", str);
exit(1);
}
return a;
}
return -1;
}