Global seed set to 40091080
Translate: 一个精致的中国传统月饼,放在白色盘子里,月饼上面有雕刻的文字和花朵 --> A delicate traditional Chinese mooncake on a white plate with carved words and flowers
Data shape for DDIM sampling is (4, 4, 64, 64), eta 0
Running DDIM Sampling with 20 timesteps
DDIM Sampler: 0%| | 0/20 [00:00<?, ?it/s]
Traceback (most recent call last):
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\queueing.py", line 625, in process_events
response = await route_utils.call_process_api(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\blocks.py", line 2103, in process_api
result = await self.call_function(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\blocks.py", line 1650, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\anyio_backends_asyncio.py", line 2470, in run_sync_in_worker_thread
return await future
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\anyio_backends_asyncio.py", line 967, in run
result = context.run(func, *args)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\utils.py", line 890, in wrapper
response = f(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\demo.py", line 183, in process
results, rtn_code, rtn_warning, debug_info = inference(input_data, **params)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\modelscope\models\base\base_torch_model.py", line 36, in call
return self.postprocess(self.forward(*args, **kwargs))
File "D:\ai\self_use_package\AnyText2\ms_wrapper.py", line 274, in forward
samples, intermediates = self.ddim_sampler.sample(ddim_steps, img_count,
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\ddim_hacked.py", line 103, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\ddim_hacked.py", line 163, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\ddim_hacked.py", line 190, in p_sample_ddim
model_t = self.model.apply_model(x, t, c)
File "D:\ai\self_use_package\AnyText2\cldm\cldm.py", line 525, in apply_model
_control = self.control_model(x=x_noisy, timesteps=t, context=text_cond, hint=_hint, text_info=cond['text_info'])
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\cldm.py", line 381, in forward
h = module(h, emb, context)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\diffusionmodules\openaimodel.py", line 84, in forward
x = layer(x, context, attnx_scale)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 345, in forward
x = block(x, context=context[i], attnx_scale=attnx_scale)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 276, in forward
return checkpoint(self._forward, (x, context, attnx_scale), self.parameters(), self.checkpoint)
File "D:\ai\self_use_package\AnyText2\ldm\modules\diffusionmodules\util.py", line 116, in checkpoint
return func(*inputs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 279, in _forward
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 192, in forward
out = einsum('b i j, b j d -> b i d', sim, v)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\functional.py", line 377, in einsum
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
RuntimeError: expected scalar type Half but found Float
Global seed set to 40091080
Translate: 一个精致的中国传统月饼,放在白色盘子里,月饼上面有雕刻的文字和花朵 --> A delicate traditional Chinese mooncake on a white plate with carved words and flowers
Data shape for DDIM sampling is (4, 4, 64, 64), eta 0
Running DDIM Sampling with 20 timesteps
DDIM Sampler: 0%| | 0/20 [00:00<?, ?it/s]
Traceback (most recent call last):
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\queueing.py", line 625, in process_events
response = await route_utils.call_process_api(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\blocks.py", line 2103, in process_api
result = await self.call_function(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\blocks.py", line 1650, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\anyio_backends_asyncio.py", line 2470, in run_sync_in_worker_thread
return await future
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\anyio_backends_asyncio.py", line 967, in run
result = context.run(func, *args)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\gradio\utils.py", line 890, in wrapper
response = f(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\demo.py", line 183, in process
results, rtn_code, rtn_warning, debug_info = inference(input_data, **params)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\modelscope\models\base\base_torch_model.py", line 36, in call
return self.postprocess(self.forward(*args, **kwargs))
File "D:\ai\self_use_package\AnyText2\ms_wrapper.py", line 274, in forward
samples, intermediates = self.ddim_sampler.sample(ddim_steps, img_count,
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\ddim_hacked.py", line 103, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\ddim_hacked.py", line 163, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\ddim_hacked.py", line 190, in p_sample_ddim
model_t = self.model.apply_model(x, t, c)
File "D:\ai\self_use_package\AnyText2\cldm\cldm.py", line 525, in apply_model
_control = self.control_model(x=x_noisy, timesteps=t, context=text_cond, hint=_hint, text_info=cond['text_info'])
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\cldm\cldm.py", line 381, in forward
h = module(h, emb, context)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\diffusionmodules\openaimodel.py", line 84, in forward
x = layer(x, context, attnx_scale)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 345, in forward
x = block(x, context=context[i], attnx_scale=attnx_scale)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 276, in forward
return checkpoint(self._forward, (x, context, attnx_scale), self.parameters(), self.checkpoint)
File "D:\ai\self_use_package\AnyText2\ldm\modules\diffusionmodules\util.py", line 116, in checkpoint
return func(*inputs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 279, in _forward
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\self_use_package\AnyText2\ldm\modules\attention.py", line 192, in forward
out = einsum('b i j, b j d -> b i d', sim, v)
File "D:\ai\self_use_package\AnyText2\anytext2_env\lib\site-packages\torch\functional.py", line 377, in einsum
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
RuntimeError: expected scalar type Half but found Float