|
| 1 | +# Copyright 2024 The HuggingFace Team. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from transformers.models.llama.modeling_llama import ( |
| 16 | + LlamaAttention, |
| 17 | + LlamaDecoderLayer, |
| 18 | + LlamaForCausalLM, |
| 19 | + LlamaModel, |
| 20 | + LlamaRMSNorm, |
| 21 | +) |
| 22 | + |
| 23 | +from optimum.intel.utils.import_utils import is_ipex_version |
| 24 | + |
| 25 | +from .modeling_utils import ( |
| 26 | + _IPEXLlamaDecoderLayerRef, |
| 27 | + _llama_attn_forward, |
| 28 | + _llama_layer_norm_forward, |
| 29 | + _llama_model_forward, |
| 30 | +) |
| 31 | + |
| 32 | + |
| 33 | +_IPEX_EXPORTED_ARCH = ("LlamaForCausalLM",) |
| 34 | +_IPEX_EXPORTED_TASK = ("text-generation",) |
| 35 | + |
| 36 | + |
| 37 | +def convert_func(m, func_name, new_function): |
| 38 | + bound_method = new_function.__get__(m, m.__class__) |
| 39 | + setattr(m, func_name, bound_method) |
| 40 | + |
| 41 | + |
| 42 | +def convert_functions(m, target_m, new_function_name, new_function): |
| 43 | + for _, sub_m in m.named_children(): |
| 44 | + if isinstance(sub_m, target_m): |
| 45 | + convert_func(sub_m, new_function_name, new_function) |
| 46 | + convert_functions(sub_m, target_m, new_function_name, new_function) |
| 47 | + |
| 48 | + |
| 49 | +def convert_class(m, target_m, new_class, config, distributed=False): |
| 50 | + for name, sub_m in m.named_children(): |
| 51 | + if isinstance(sub_m, target_m): |
| 52 | + new_m = new_class(sub_m, config, distributed) |
| 53 | + setattr(m, name, new_m) |
| 54 | + convert_class(sub_m, target_m, new_class, config, distributed) |
| 55 | + |
| 56 | + |
| 57 | +def patch_op(m, target_m, new_op_name, new_op): |
| 58 | + for name, sub_m in m.named_children(): |
| 59 | + if isinstance(sub_m, target_m): |
| 60 | + setattr(sub_m, new_op_name, new_op) |
| 61 | + patch_op(sub_m, target_m, new_op_name, new_op) |
| 62 | + |
| 63 | + |
| 64 | +def _patch_llama_model(model): |
| 65 | + if is_ipex_version("<", "2.5.0"): |
| 66 | + raise ImportError("Only ipex version > 2.3.0 supports RotaryEmbedding and IndirectAccessKVCache") |
| 67 | + |
| 68 | + from intel_extension_for_pytorch.llm.modules import IndirectAccessKVCache, RotaryEmbedding |
| 69 | + |
| 70 | + ipex_rope = RotaryEmbedding( |
| 71 | + model.config.max_position_embeddings, |
| 72 | + model.config.hidden_size // model.config.num_attention_heads, |
| 73 | + model.config.rope_theta, |
| 74 | + model.config.architectures[0], |
| 75 | + ) |
| 76 | + ipex_scale_dot_product = IndirectAccessKVCache(text_max_length=model.config.max_position_embeddings) |
| 77 | + patch_op(model, LlamaAttention, "ipex_rope", ipex_rope) |
| 78 | + patch_op(model, LlamaAttention, "ipex_scale_dot_product", ipex_scale_dot_product) |
| 79 | + |
| 80 | + convert_functions(model, LlamaModel, "forward", _llama_model_forward) |
| 81 | + convert_functions(model, LlamaAttention, "forward", _llama_attn_forward) |
| 82 | + convert_functions(model, LlamaRMSNorm, "forward", _llama_layer_norm_forward) |
| 83 | + |
| 84 | + convert_class(model, LlamaDecoderLayer, _IPEXLlamaDecoderLayerRef, model.config) |
| 85 | + return model |
| 86 | + |
| 87 | + |
| 88 | +def _patch_model(model): |
| 89 | + if isinstance(model, LlamaForCausalLM): |
| 90 | + model = _patch_llama_model(model) |
| 91 | + return model |
0 commit comments