From 0e8b47811e711b46fe80e6b4f5304186f83744d6 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 8 Aug 2023 13:43:40 +0200 Subject: [PATCH] Llama change. (#793) # What does this PR do? Reflecting https://github.com/huggingface/transformers/pull/24998 Current status wants to make sure integration tests *are* broken with this. Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- .../models/custom_modeling/flash_llama_modeling.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index 2c22ea4..d0185ed 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -185,8 +185,11 @@ class FlashLlamaAttention(torch.nn.Module): self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads - self.rotary_emb = PositionRotaryEmbedding.load( - config=config, prefix=f"{prefix}.rotary_emb", weights=weights + # self.rotary_emb = PositionRotaryEmbedding.load( + # config=config, prefix=f"{prefix}.rotary_emb", weights=weights + # ) + self.rotary_emb = PositionRotaryEmbedding.static( + config=config, dim=self.head_size, base=10000.0, device=weights.device ) self.softmax_scale = self.head_size**-0.5