-
Notifications
You must be signed in to change notification settings - Fork 758
patch: adapt attribution get error #274
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||
|---|---|---|---|---|---|---|---|---|
|
|
@@ -10,7 +10,8 @@ | |||||||
| from verl.workers.rollout.vllm_rollout.vllm_async_server import AsyncvLLMServer | ||||||||
| from vllm.entrypoints.openai.protocol import ChatCompletionRequest, ErrorResponse | ||||||||
|
|
||||||||
| from agentlightning.instrumentation.vllm import ChatCompletionResponsePatched, instrument_vllm | ||||||||
| from agentlightning.instrumentation.vllm import instrument_vllm | ||||||||
| from agentlightning.logging import configure_logger | ||||||||
|
|
||||||||
|
|
||||||||
| def _unwrap_ray_remote(cls): | ||||||||
|
|
@@ -19,9 +20,11 @@ def _unwrap_ray_remote(cls): | |||||||
| return cls | ||||||||
|
|
||||||||
|
|
||||||||
| logger = configure_logger() | ||||||||
|
|
||||||||
|
|
||||||||
| @ray.remote(num_cpus=1) | ||||||||
| class PatchedvLLMServer(_unwrap_ray_remote(AsyncvLLMServer)): | ||||||||
|
|
||||||||
| def __init__(self, *args, **kwargs): | ||||||||
| instrument_vllm() | ||||||||
| super().__init__(*args, **kwargs) | ||||||||
|
|
@@ -36,10 +39,14 @@ async def chat_completion(self, raw_request: Request): | |||||||
| """ | ||||||||
| request_json = await raw_request.json() | ||||||||
| request = ChatCompletionRequest(**request_json) | ||||||||
| generator = await self.openai_serving_chat.create_chat_completion(request, raw_request) | ||||||||
| generator = await self.openai_serving_chat.create_chat_completion( | ||||||||
| request, raw_request | ||||||||
| ) | ||||||||
|
|
||||||||
| if isinstance(generator, ErrorResponse): | ||||||||
| return JSONResponse(content=generator.model_dump(), status_code=generator.code) | ||||||||
| status_code = getattr(generator, "code", None) or 500 | ||||||||
|
||||||||
| status_code = getattr(generator, "code", None) or 500 | |
| status_code = getattr(generator, "code", None) | |
| status_code = status_code if status_code is not None else 500 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do you think this comment makes sense?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not sure. It may be necessary to check the status code to see if it is used in AgentLightning and which code may be returned by verl.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
it should be
logger = logging.getLogger(__name__)