|
| 1 | +/* Copyright 2025 The xLLM Authors. All Rights Reserved. |
| 2 | +
|
| 3 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +you may not use this file except in compliance with the License. |
| 5 | +You may obtain a copy of the License at |
| 6 | +
|
| 7 | + https://github.com/jd-opensource/xllm/blob/main/LICENSE |
| 8 | +
|
| 9 | +Unless required by applicable law or agreed to in writing, software |
| 10 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +See the License for the specific language governing permissions and |
| 13 | +limitations under the License. |
| 14 | +==============================================================================*/ |
| 15 | + |
| 16 | +#include "llmrec_worker_impl.h" |
| 17 | + |
| 18 | +#include <glog/logging.h> |
| 19 | +#include <torch/torch.h> |
| 20 | + |
| 21 | +#include <algorithm> |
| 22 | +#include <vector> |
| 23 | + |
| 24 | +#include "common/types.h" |
| 25 | +#include "core/layers/word_embedding.h" |
| 26 | + |
| 27 | +namespace xllm { |
| 28 | + |
| 29 | +LlmRecWorkerImpl::LlmRecWorkerImpl(const ParallelArgs& parallel_args, |
| 30 | + const torch::Device& device, |
| 31 | + const runtime::Options& options) |
| 32 | + : RecWorkerImpl(parallel_args, device, options) {} |
| 33 | + |
| 34 | +void LlmRecWorkerImpl::prepare_work_before_execute( |
| 35 | + const ForwardInput& inputs, |
| 36 | + ForwardInput& processed_inputs) { |
| 37 | + WorkerImpl::prepare_work_before_execute(inputs, processed_inputs); |
| 38 | + |
| 39 | + if (!inputs.input_params.mm_data.valid()) { |
| 40 | + return; |
| 41 | + } |
| 42 | + |
| 43 | + torch::Tensor input_embedding; |
| 44 | + torch::Tensor input_tokens_tensor; |
| 45 | + torch::Tensor input_indices_tensor; |
| 46 | + |
| 47 | + const auto& mm_data = inputs.input_params.mm_data; |
| 48 | + const auto& processed_mm_data = processed_inputs.input_params.mm_data; |
| 49 | + |
| 50 | + if (auto res = |
| 51 | + processed_mm_data.get<torch::Tensor>(LLM_REC_INPUT_TOKENS)) { |
| 52 | + input_tokens_tensor = res.value(); |
| 53 | + } |
| 54 | + |
| 55 | + // input indices 需要在 Host 侧生成位置索引 |
| 56 | + if (auto res = mm_data.get<torch::Tensor>(LLM_REC_INPUT_INDICES)) { |
| 57 | + input_indices_tensor = res.value(); |
| 58 | + } |
| 59 | + |
| 60 | + if (auto res = |
| 61 | + processed_mm_data.get<torch::Tensor>(LLM_REC_INPUT_EMBEDDING)) { |
| 62 | + input_embedding = res.value(); |
| 63 | + } |
| 64 | + |
| 65 | + if (input_embedding.defined()) { |
| 66 | + input_embedding = input_embedding.to(dtype()); |
| 67 | + } |
| 68 | + |
| 69 | + if (input_indices_tensor.defined()) { |
| 70 | + layer::WordEmbedding word_embedding = get_word_embedding(); |
| 71 | + torch::Tensor input_tokens_embedding = |
| 72 | + word_embedding(input_tokens_tensor, 0); |
| 73 | + |
| 74 | + if (input_embedding.defined()) { |
| 75 | + std::vector<int> input_indices( |
| 76 | + input_indices_tensor.data_ptr<int>(), |
| 77 | + input_indices_tensor.data_ptr<int>() + input_indices_tensor.numel()); |
| 78 | + |
| 79 | + processed_inputs.input_params.input_embedding = |
| 80 | + merge_embeddings_by_indices( |
| 81 | + input_tokens_embedding, input_embedding, input_indices); |
| 82 | + } else { |
| 83 | + processed_inputs.input_params.input_embedding = input_tokens_embedding; |
| 84 | + } |
| 85 | + } else if (input_embedding.defined()) { |
| 86 | + processed_inputs.input_params.input_embedding = input_embedding; |
| 87 | + } |
| 88 | +} |
| 89 | + |
| 90 | +torch::Tensor LlmRecWorkerImpl::merge_embeddings_by_indices( |
| 91 | + const torch::Tensor& input_tokens_embedding, |
| 92 | + const torch::Tensor& input_embedding, |
| 93 | + const std::vector<int>& input_indices) { |
| 94 | + CHECK_EQ(input_embedding.dim(), 2); |
| 95 | + CHECK_EQ(input_tokens_embedding.dim(), 2); |
| 96 | + CHECK_EQ(input_tokens_embedding.size(1), input_embedding.size(1)); |
| 97 | + CHECK_EQ(input_tokens_embedding.dtype(), input_embedding.dtype()); |
| 98 | + CHECK_EQ(input_tokens_embedding.device(), input_embedding.device()); |
| 99 | + |
| 100 | + const int64_t total_rows = |
| 101 | + input_tokens_embedding.size(0) + input_embedding.size(0); |
| 102 | + const int64_t cols = input_embedding.size(1); |
| 103 | + |
| 104 | + torch::Device device = input_embedding.device(); |
| 105 | + torch::Tensor merged = torch::empty( |
| 106 | + {total_rows, cols}, torch::dtype(input_embedding.dtype()).device(device)); |
| 107 | + |
| 108 | + std::vector<int> input_embedding_indices; |
| 109 | + for (int i = 0; i < static_cast<int>(total_rows); ++i) { |
| 110 | + if (std::find(input_indices.begin(), input_indices.end(), i) == |
| 111 | + input_indices.end()) { |
| 112 | + input_embedding_indices.push_back(i); |
| 113 | + } |
| 114 | + } |
| 115 | + |
| 116 | + CHECK_EQ(input_embedding_indices.size(), input_embedding.size(0)); |
| 117 | + |
| 118 | + torch::Tensor input_embedding_indices_tensor = |
| 119 | + torch::tensor(input_embedding_indices, torch::kInt64).to(device); |
| 120 | + merged.index_put_({input_embedding_indices_tensor, torch::indexing::Ellipsis}, |
| 121 | + input_embedding); |
| 122 | + |
| 123 | + torch::Tensor input_indices_tensor = |
| 124 | + torch::tensor(input_indices, torch::kInt64).to(device); |
| 125 | + merged.index_put_({input_indices_tensor, torch::indexing::Ellipsis}, |
| 126 | + input_tokens_embedding); |
| 127 | + |
| 128 | + return merged; |
| 129 | +} |
| 130 | + |
| 131 | +} // namespace xllm |
0 commit comments