-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprotsec.py
More file actions
107 lines (85 loc) · 3.89 KB
/
protsec.py
File metadata and controls
107 lines (85 loc) · 3.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
from pathlib import Path
import argparse
import multiprocessing
from Bio import SeqIO
from embedder import ProteinEmbedder
from concurrent.futures import ThreadPoolExecutor, as_completed
import pickle
def parse_args() -> argparse.Namespace:
"""Parse command line arguments with input validation."""
parser = argparse.ArgumentParser(
description='Build protein vector database from FASTA file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--fasta_path', type=str, required=True, help='Path to input FASTA file')
parser.add_argument('--dim', type=int, default=1024, help='Dimensionality of the embeddings')
parser.add_argument('--num_threads', type=int, default=max(1, multiprocessing.cpu_count() // 4),
help='Number of worker threads to use (default: 1/4 of CPU cores)')
parser.add_argument('--dim_reduct', type=str, default='MDS', choices=['UMAP', 't-SNE', 'MDS'],
help='Algorithm for dimensionality reduction')
parser.add_argument('--dist_func', type=str, default='ASMP', choices=['SMS', 'ASMP', 'SNN'],
help='Distance function for computing distance')
parser.add_argument('--out_file', type=str, default='protein_embedding_ProtSEC.pkl', help='Output file path for the embeddings')
args = parser.parse_args()
# Validate arguments
if not Path(args.fasta_path).exists():
raise FileNotFoundError(f"FASTA file not found: {args.fasta_path}")
return args
def encode_sequence(seq_record, protein_encoder):
"""Process a single sequence with the protein encoder."""
sequence = str(seq_record.seq).strip('*')
if not sequence:
return None
vector = protein_encoder.encode(sequence)
return {
'v': vector,
'info': seq_record.description,
'length': len(sequence)
}
def main():
args = parse_args()
fasta_path = args.fasta_path
dim = args.dim
num_threads = args.num_threads
# dimension reduction method
dim_reduct = args.dim_reduct
print(f"Using dimensionality reduction method: {dim_reduct}")
dist_func = args.dist_func
print(f"Using distance function: {dist_func}")
print(f"Using {num_threads} worker threads")
# Initialize the encoder
protein_encoder = ProteinEmbedder(dim_reduct, dist_func, dim)
# Read all sequences from the FASTA file first
sequences = list(SeqIO.parse(args.fasta_path, "fasta"))
total_sequences = len(sequences)
print(f"Found {total_sequences} sequences to encode")
list_of_vectors = []
cnt = 0
# Process sequences in parallel using ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=num_threads) as executor:
# Submit all encoding tasks to the executor
futures = [
executor.submit(encode_sequence, seq, protein_encoder) for seq in sequences
]
# Process results as they complete
for future in as_completed(futures):
try:
result = future.result()
if result:
list_of_vectors.append(result)
cnt += 1
if cnt % 100 == 0: # Print progress every 100 sequences
print(f"Processed {cnt}/{total_sequences} sequences")
except Exception as exc:
print(f"Seq generated an exception: {exc}")
print(f"Successfully encoded {len(list_of_vectors)} sequences")
out_file_path = Path(args.out_file)
# Create parent directories if they don't exist
out_file_path.parent.mkdir(parents=True, exist_ok=True)
# Save the list of vectors to a file
print(f"Saving embedding to: {out_file_path}")
with open(out_file_path, 'wb') as f:
pickle.dump(list_of_vectors, f)
print(f"Embedding saved successfully to {out_file_path}")
if __name__ == '__main__':
main()