-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathtokenizer.js
More file actions
82 lines (72 loc) · 2.16 KB
/
tokenizer.js
File metadata and controls
82 lines (72 loc) · 2.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
/**
* PureBee Tokenizer — minimal character-level tokenizer
*
* For Phase 1 proof of concept we use a simple character-level
* tokenizer. Real deployment would use tiktoken (GPT-2 BPE).
* The architecture is identical — this just proves the pipeline.
*/
'use strict';
class CharTokenizer {
constructor() {
// Build a vocabulary from printable ASCII + common chars
this.vocab = [];
this.tokenToId = {};
this.idToToken = {};
// Special tokens
this._addToken('<|endoftext|>');
this._addToken('<|pad|>');
this._addToken('<|unk|>');
// Printable ASCII
for (let i = 32; i < 127; i++) {
this._addToken(String.fromCharCode(i));
}
// Common subwords for slightly better generation
const common = [
'the', 'The', 'and', 'And', 'is', 'was', 'are', 'were',
'he', 'she', 'it', 'they', 'we', 'I', 'you',
'in', 'on', 'at', 'to', 'of', 'for', 'with',
'a', 'an', 'this', 'that', 'his', 'her',
' the', ' and', ' of', ' to', ' a', ' in',
'\n', '\t', ' ', ' '
];
for (const w of common) this._addToken(w);
}
_addToken(token) {
if (this.tokenToId[token] === undefined) {
const id = this.vocab.length;
this.vocab.push(token);
this.tokenToId[token] = id;
this.idToToken[id] = token;
}
}
get vocabSize() { return this.vocab.length; }
get eosId() { return this.tokenToId['<|endoftext|>']; }
get unkId() { return this.tokenToId['<|unk|>']; }
encode(text) {
const ids = [];
let i = 0;
while (i < text.length) {
// Try multi-char tokens first (longest match)
let matched = false;
for (let len = 6; len > 1; len--) {
const chunk = text.slice(i, i + len);
if (this.tokenToId[chunk] !== undefined) {
ids.push(this.tokenToId[chunk]);
i += len;
matched = true;
break;
}
}
if (!matched) {
const ch = text[i];
ids.push(this.tokenToId[ch] !== undefined ? this.tokenToId[ch] : this.unkId);
i++;
}
}
return ids;
}
decode(ids) {
return ids.map(id => this.idToToken[id] || '?').join('');
}
}
module.exports = { CharTokenizer };