Skip to content

score.py error #4

@caesarbulu

Description

@caesarbulu

hi, I want to know why this error happened? when I used my data with a fasta and a a3m, I met an error like this : (venus) [b20213010037@admin1 PoET-main]$ cat poet_3381857.err
/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py:23: UserWarning: flash_attn module not found. Falling back on standard attention. No module named 'flash_attn'
warnings.warn(
Traceback (most recent call last):
File "/public/home/b20213010037/bjj/protein_design/PoET-main/scripts/score.py", line 308, in
main()
File "/public/home/b20213010037/miniconda3/envs/poet/lib/python3.9/site-packages/torch/utils/contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/scripts/score.py", line 239, in main
jit_warmup(model, alphabet)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/scripts/score.py", line 110, in jit_warmup
_ = embedding_model.embed(x.unsqueeze(0), segment_sizes.unsqueeze(0))
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/poet.py", line 804, in embed
h, (
, _), (key, value) = layer.forward(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/transformer.py", line 896, in forward
return fn(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/transformer.py", line 700, in forward_packed
x2, attn_self = self.self_attn(
File "/public/home/b20213010037/miniconda3/envs/poet/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention.py", line 431, in forward
return fn(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention.py", line 344, in forward_packed
context_packed, attn_weights = self._inner_attn(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py", line 273, in _inner_attn
context_packed, attn_weights = self.inner_attn(
File "/public/home/b20213010037/miniconda3/envs/poet/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py", line 228, in forward
output, attn_weights = algo(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py", line 163, in forward_standard_attn
q, q_mask, positions = q.to_padded(return_mask=True, return_positions=True)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/packed_sequence.py", line 162, in to_padded
raise ValueError("Cannot be to_padded")
ValueError: Cannot be to_padded

Then I think maybe my data have wrong format, so I used your test data in data named data/BLAT_ECOLX_ColabFold_2202.a3m and data/BLAT_ECOLX_Jacquier_2013_variants.fasta also met this error: /public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py:23: UserWarning: flash_attn module not found. Falling back on standard attention. No module named 'flash_attn'
warnings.warn(
Traceback (most recent call last):
File "/public/home/b20213010037/bjj/protein_design/PoET-main/scripts/score.py", line 308, in
main()
File "/public/home/b20213010037/miniconda3/envs/poet/lib/python3.9/site-packages/torch/utils/contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/scripts/score.py", line 239, in main
jit_warmup(model, alphabet)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/scripts/score.py", line 110, in jit_warmup
_ = embedding_model.embed(x.unsqueeze(0), segment_sizes.unsqueeze(0))
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/poet.py", line 804, in embed
h, (
, _), (key, value) = layer.forward(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/transformer.py", line 896, in forward
return fn(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/transformer.py", line 700, in forward_packed
x2, attn_self = self.self_attn(
File "/public/home/b20213010037/miniconda3/envs/poet/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention.py", line 431, in forward
return fn(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention.py", line 344, in forward_packed
context_packed, attn_weights = self._inner_attn(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py", line 273, in _inner_attn
context_packed, attn_weights = self.inner_attn(
File "/public/home/b20213010037/miniconda3/envs/poet/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py", line 228, in forward
output, attn_weights = algo(
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/attention_flash.py", line 163, in forward_standard_attn
q, q_mask, positions = q.to_padded(return_mask=True, return_positions=True)
File "/public/home/b20213010037/bjj/protein_design/PoET-main/poet/models/modules/packed_sequence.py", line 162, in to_padded
raise ValueError("Cannot be to_padded")
ValueError: Cannot be to_padded
Please help me! thanks, hopo your work goes smoothly!

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions