turboderp / exllama

A more memory-efficient rewrite of the HF transformers implementation of Llama for use with quantized weights.
MIT License
2.67k stars 214 forks source link

"temp_state buffer is too small" when using LLama 13b at full context length #211

Closed anujnayyar1 closed 11 months ago

anujnayyar1 commented 11 months ago

Hey @turboderp, @aljungberg Firstly thank you for the awesome repo!!

I am running into issues I think it is related to #160 and #128.

When running LLama2 13b at full context length (4096 token length), batch size 2. I seem to consitantly hit temp_state buffer is too small. For reference this is running on 4090, which should have enough vram to cope with this.

Here is a script for you to be able to reproduce what is happening here:

from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import os, glob
import logging
from typing import Generator, Union
import runpod
from huggingface_hub import snapshot_download
from copy import copy

import re
import codecs

ESCAPE_SEQUENCE_RE = re.compile(r'''
    ( \\U........      # 8-digit hex escapes
    | \\u....          # 4-digit hex escapes
    | \\x..            # 2-digit hex escapes
    | \\[0-7]{1,3}     # Octal escapes
    | \\N\{[^}]+\}     # Unicode characters by name
    | \\[\\'"abfnrtv]  # Single-character escapes
    )''', re.UNICODE | re.VERBOSE)

def decode_escapes(s):
    def decode_match(match):
        return codecs.decode(match.group(0), 'unicode-escape')

    return ESCAPE_SEQUENCE_RE.sub(decode_match, s)

logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))

def load_model(num_return):
    global generator, default_settings

    if not generator:
        model_directory = snapshot_download(repo_id="TheBloke/StableBeluga-13B-GPTQ", revision="main")
        tokenizer_path = os.path.join(model_directory, "tokenizer.model")
        model_config_path = os.path.join(model_directory, "config.json")
        st_pattern = os.path.join(model_directory, "*.safetensors")
        st_files = glob.glob(st_pattern)
        if not st_files:
            raise ValueError(f"No safetensors files found in {model_directory}")
        model_path = st_files[0]

        # Create config, model, tokenizer and generator
        config = ExLlamaConfig(model_config_path)               # create config from config.json
        config.model_path = model_path                          # supply path to model weights file

        gpu_split = os.getenv("GPU_SPLIT", "")
        if gpu_split:
            config.set_auto_map(gpu_split)
            config.gpu_peer_fix = True
        alpha_value = int(os.getenv("ALPHA_VALUE", "1"))
        config.max_seq_len = 4096
        if alpha_value != 1:
            config.alpha_value = alpha_value
            config.calculate_rotary_embedding_base()

        model = ExLlama(config)                                 # create ExLlama instance and load the weights
        tokenizer = ExLlamaTokenizer(tokenizer_path)            # create tokenizer from tokenizer model file

        cache = ExLlamaCache(model, batch_size=4)                             # create cache for inference
        generator = ExLlamaGenerator(model, tokenizer, cache)   # create generator
        default_settings = {
            k: getattr(generator.settings, k) for k in dir(generator.settings) if k[:2] != '__'
        }
    return generator, default_settings

generator = None
default_settings = None
prompt_prefix = decode_escapes(os.getenv("PROMPT_PREFIX", ""))
prompt_suffix = decode_escapes(os.getenv("PROMPT_SUFFIX", ""))

def generate_with_streaming(prompt, max_new_tokens):
    global generator
    generator.end_beam_search()

    # Tokenizing the input
    ids = generator.tokenizer.encode(prompt)
    ids = ids[:, -generator.model.config.max_seq_len:]

    generator.gen_begin_reuse(ids)
    initial_len = generator.sequence[0].shape[0]
    has_leading_space = False
    for i in range(max_new_tokens):
        token = generator.gen_single_token()
        if i == 0 and generator.tokenizer.tokenizer.IdToPiece(int(token)).startswith('▁'):
            has_leading_space = True

        decoded_text = generator.tokenizer.decode(generator.sequence[0][initial_len:])
        if has_leading_space:
            decoded_text = ' ' + decoded_text

        yield decoded_text
        if token.item() == generator.tokenizer.eos_token_id:
            break

def inference(event) -> Union[str, Generator[str, None, None]]:
    logging.info(event)
    job_input = event["input"]
    if not job_input:
        raise ValueError("No input provided")

    n = job_input.pop("n", 1)
    print(n)
    prompt = [job_input.pop("prompt")] * n
    print(prompt)
    max_new_tokens = job_input.pop("max_new_tokens", 100)
    stream: bool = job_input.pop("stream", False)

    generator, default_settings = load_model(n)

    settings = copy(default_settings)
    settings.update(job_input)
    for key, value in settings.items():
        setattr(generator.settings, key, value)

    output_text = generator.generate_simple(prompt, max_new_tokens = max_new_tokens)
    doneout = []
    for idx, i in enumerate(output_text):
        doneout.append(i.replace(prompt[0],""))
    return doneout

In the above script I have changed the batch size to be of size n. I am trying to enable the model to return 2 decoded sequences per prompt. As both prompts are identical there should be no padding issues.

Here is the Inference script. Hidden for brevity. ``` input_event = { "input": { "prompt": """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Laoreet suspendisse interdum consectetur libero id. Posuere morbi leo urna molestie at elementum eu facilisis sed. Enim sit amet venenatis urna cursus eget nunc scelerisque. Tellus mauris a diam maecenas. Sed euismod nisi porta lorem mollis aliquam. Augue interdum velit euismod in pellentesque massa. Viverra accumsan in nisl nisi scelerisque. Adipiscing diam donec adipiscing tristique risus. Sed augue lacus viverra vitae congue eu consequat ac. Egestas sed sed risus pretium quam vulputate. Et netus et malesuada fames ac turpis. Velit aliquet sagittis id consectetur purus ut faucibus pulvinar elementum. Volutpat diam ut venenatis tellus in. Purus in mollis nunc sed id. Consequat interdum varius sit amet mattis vulputate enim nulla. Nisi vitae suscipit tellus mauris a diam maecenas sed. Rutrum quisque non tellus orci ac auctor augue mauris augue. Cursus turpis massa tincidunt dui ut ornare. Quis lectus nulla at volutpat diam ut. Ipsum faucibus vitae aliquet nec ullamcorper sit. Erat velit scelerisque in dictum. Sit amet risus nullam eget felis eget nunc lobortis. Quam lacus suspendisse faucibus interdum posuere lorem ipsum dolor sit. Tempus imperdiet nulla malesuada pellentesque elit eget. Quis viverra nibh cras pulvinar mattis nunc sed blandit libero. Eget lorem dolor sed viverra ipsum nunc aliquet. Sapien pellentesque habitant morbi tristique senectus et netus et malesuada. Lobortis elementum nibh tellus molestie nunc. Proin gravida hendrerit lectus a. Orci dapibus ultrices in iaculis nunc. Ut aliquam purus sit amet luctus. Nullam vehicula ipsum a arcu. Mi bibendum neque egestas congue quisque egestas diam. Vel turpis nunc eget lorem dolor sed. Quam lacus suspendisse faucibus interdum. Tortor dignissim convallis aenean et tortor. Nibh praesent tristique magna sit. Viverra nibh cras pulvinar mattis nunc sed blandit libero. Tempus urna et pharetra pharetra massa massa ultricies mi quis. Bibendum neque egestas congue quisque egestas diam in arcu cursus. Id venenatis a condimentum vitae sapien pellentesque habitant morbi tristique. Egestas erat imperdiet sed euismod nisi porta. Lectus vestibulum mattis ullamcorper velit sed ullamcorper morbi tincidunt. Eget nulla facilisi etiam dignissim diam quis enim. Diam vulputate ut pharetra sit amet aliquam. Pellentesque dignissim enim sit amet venenatis urna cursus eget. Commodo viverra maecenas accumsan lacus vel facilisis volutpat est velit. Risus commodo viverra maecenas accumsan lacus vel. Neque convallis a cras semper auctor neque. Sem fringilla ut morbi tincidunt. Nisl tincidunt eget nullam non nisi est. Enim facilisis gravida neque convallis a. In iaculis nunc sed augue lacus viverra vitae. Odio ut sem nulla pharetra diam. At urna condimentum mattis pellentesque id nibh tortor id aliquet. In vitae turpis massa sed elementum tempus egestas. Purus faucibus ornare suspendisse sed nisi lacus sed. Nulla facilisi etiam dignissim diam quis enim lobortis scelerisque fermentum. Adipiscing elit pellentesque habitant morbi tristique senectus et. Ac tortor dignissim convallis aenean et tortor at risus viverra. Condimentum mattis pellentesque id nibh tortor id aliquet. Iaculis nunc sed augue lacus viverra. Turpis egestas integer eget aliquet nibh praesent. Ultricies lacus sed turpis tincidunt id aliquet. Magna fermentum iaculis eu non diam phasellus. Id consectetur purus ut faucibus pulvinar elementum. In cursus turpis massa tincidunt dui ut ornare lectus. Tristique senectus et netus et malesuada fames. Adipiscing tristique risus nec feugiat in fermentum posuere urna nec. Tristique risus nec feugiat in fermentum. Leo duis ut diam quam nulla. Tristique sollicitudin nibh sit amet commodo. Felis donec et odio pellentesque. Volutpat commodo sed egestas egestas fringilla phasellus. Eu ultrices vitae auctor eu augue ut lectus. Id nibh tortor id aliquet. Commodo viverra maecenas accumsan lacus vel facilisis volutpat est velit. Vitae et leo duis ut diam. Maecenas sed enim ut sem viverra aliquet eget sit amet. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Nec nam aliquam sem et. Arcu cursus euismod quis viverra nibh cras pulvinar. Lacus vestibulum sed arcu non. Nec feugiat nisl pretium fusce id. Velit scelerisque in dictum non. Nam aliquam sem et tortor consequat id porta nibh. Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Volutpat lacus laoreet non curabitur. Ut ornare lectus sit amet est placerat in egestas. Commodo quis imperdiet massa tincidunt nunc. Enim sed faucibus turpis in eu mi. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar proin. Ligula ullamcorper malesuada proin libero nunc consequat interdum. Nam libero justo laoreet sit amet cursus sit amet dictum. Lacus vel facilisis volutpat est velit. Eget aliquet nibh praesent tristique magna sit. Pellentesque nec nam aliquam sem et. Ultrices in iaculis nunc sed augue lacus. Vel quam elementum pulvinar etiam non quam. Amet porttitor eget dolor morbi non arcu risus quis. Erat velit scelerisque in dictum non consectetur a erat nam. Vulputate sapien nec sagittis aliquam. Odio euismod lacinia at quis. Enim eu turpis egestas pretium aenean pharetra magna ac placerat. Est pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus. Nulla facilisi morbi tempus iaculis urna id volutpat lacus laoreet. Mauris pharetra et ultrices neque ornare aenean euismod elementum nisi. Urna duis convallis convallis tellus id interdum velit laoreet. Turpis egestas sed tempus urna et. Sit amet risus nullam eget felis eget nunc. Laoreet suspendisse interdum consectetur libero. Dolor sit amet consectetur adipiscing elit. Sed velit dignissim sodales ut. Amet consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Morbi tincidunt ornare massa eget egestas purus viverra accumsan. Nunc aliquet bibendum enim facilisis gravida neque convallis. Ac tincidunt vitae semper quis lectus. Quisque egestas diam in arcu cursus. Senectus et netus et malesuada fames ac turpis egestas sed. Enim facilisis gravida neque convallis a. Aliquet nibh praesent tristique magna sit amet purus gravida quis. Vel orci porta non pulvinar. Et netus et malesuada fames ac turpis. Ipsum dolor sit amet consectetur. Ornare massa eget egestas purus. Mi sit amet mauris commodo quis imperdiet massa tincidunt nunc. Rhoncus aenean vel elit scelerisque mauris. Adipiscing elit duis tristique sollicitudin nibh sit amet. Dignissim cras tincidunt lobortis feugiat vivamus at. In hendrerit gravida rutrum quisque non tellus. Faucibus turpis in eu mi. Rhoncus urna neque viverra justo nec ultrices dui sapien. Adipiscing enim eu turpis egestas pretium aenean pharetra. Elit at imperdiet dui accumsan sit. Rhoncus mattis rhoncus urna neque. Viverra vitae congue eu consequat ac felis donec et odio. At augue eget arcu dictum varius duis at consectetur lorem. Convallis a cras semper auctor neque vitae tempus quam. Nunc vel risus commodo viverra maecenas accumsan. Odio ut enim blandit volutpat. Pellentesque elit ullamcorper dignissim cras tincidunt. Morbi tincidunt ornare massa eget egestas purus. Sed risus pretium quam vulputate dignissim suspendisse in est. Sed elementum tempus egestas sed sed risus pretium quam. Vitae auctor eu augue ut lectus. Ultrices sagittis orci a scelerisque purus. Interdum velit laoreet id donec ultrices tincidunt arcu. In fermentum et sollicitudin ac orci phasellus egestas tellus. Eu tincidunt tortor aliquam nulla facilisi cras fermentum. Posuere ac ut consequat semper viverra. Varius morbi enim nunc faucibus a pellentesque sit. Eu scelerisque felis imperdiet proin fermentum leo vel. Eget gravida cum sociis natoque penatibus et magnis. Accumsan in nisl nisi scelerisque. Laoreet suspendisse interdum consectetur libero id faucibus nisl tincidunt. Nunc mi ipsum faucibus vitae aliquet nec. Vestibulum rhoncus est pellentesque elit ullamcorper. Varius duis at consectetur lorem. Mollis nunc sed id semper risus. Eget duis at tellus at urna condimentum. Mauris pharetra et ultrices neque ornare aenean. Aliquet lectus proin nibh nisl condimentum id venenatis a condimentum. Aliquam vestibulum morbi blandit cursus risus. Et tortor at risus viverra. Orci sagittis eu volutpat odio facilisis mauris sit amet. Duis ultricies lacus sed turpis tincidunt id aliquet risus feugiat. Pharetra diam sit amet nisl suscipit. Suscipit adipiscing bibendum est ultricies integer quis auctor elit. Commodo nulla facilisi nullam vehicula ipsum a. Nunc vel risus commodo viverra maecenas accumsan lacus. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sit amet purus gravida quis. Habitasse platea dictumst quisque sagittis purus sit amet. Et malesuada fames ac turpis. Fringilla est ullamcorper eget nulla facilisi etiam dignissim. At tempor commodo ullamcorper a lacus vestibulum sed. Laoreet suspendisse interdum consectetur libero id faucibus nisl. Mauris pharetra et ultrices neque ornare aenean euismod. Urna molestie at elementum eu facilisis. Dignissim suspendisse in est ante in nibh. Nisl tincidunt eget nullam non nisi est. Aliquet porttitor lacus luctus accumsan tortor posuere ac. Amet cursus sit amet dictum sit amet justo donec. Turpis tincidunt id aliquet risus feugiat in. Porttitor leo a diam sollicitudin tempor. Malesuada fames ac turpis egestas integer eget aliquet nibh. Ut diam quam nulla porttitor massa id neque. Nec ultrices dui sapien eget mi proin sed. Ullamcorper malesuada proin libero nunc consequat interdum varius. Sit amet venenatis urna cursus. Quam adipiscing vitae proin sagittis. Nullam vehicula ipsum a arcu cursus vitae. Ac odio tempor orci dapibus ultrices in iaculis. Nec ultrices dui sapien eget mi proin sed. Nibh tortor id aliquet lectus. Consequat interdum varius sit amet mattis vulputate enim. Rhoncus est pellentesque elit ullamcorper. Massa vitae tortor condimentum lacinia quis vel. Urna duis convallis convallis tellus. Ac odio tempor orci dapibus ultrices in iaculis. Nibh sit amet commodo nulla facilisi nullam vehicula. Ut faucibus pulvinar elementum integer. Sed blandit libero volutpat sed cras ornare. Nullam ac tortor vitae purus faucibus. Viverra vitae congue eu consequat ac felis donec et. Eget nullam non nisi est sit amet facilisis. Ullamcorper morbi tincidunt ornare massa. Diam donec adipiscing tristique risus nec feugiat in fermentum posuere. Enim nulla aliquet porttitor lacus luctus. Accumsan lacus vel facilisis volutpat. Ultricies tristique nulla aliquet enim. Mauris rhoncus aenean vel elit. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Nisi lacus sed viverra tellus in hac habitasse platea dictumst. Elementum facilisis leo vel fringilla est ullamcorper. Velit scelerisque in dictum non consectetur a erat nam at. Nulla facilisi etiam dignissim diam quis enim lobortis scelerisque fermentum.""", "n": 2, "temperature": 0.9, "max_new_tokens":200 } } output_text = inference(input_event) for line in output_text: print("---") print(line) ```
anujnayyar1 commented 11 months ago

Please also see this traceback for further information:

Full Traceback ``` --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) Cell In[8], line 39 1 input_event = { 2 "input": { 3 "prompt": """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Laoreet suspendisse interdum consectetur libero id. Posuere morbi leo urna molestie at elementum eu facilisis sed. Enim sit amet venenatis urna cursus eget nunc scelerisque. Tellus mauris a diam maecenas. Sed euismod nisi porta lorem mollis aliquam. Augue interdum velit euismod in pellentesque massa. Viverra accumsan in nisl nisi scelerisque. Adipiscing diam donec adipiscing tristique risus. Sed augue lacus viverra vitae congue eu consequat ac. Egestas sed sed risus pretium quam vulputate. Et netus et malesuada fames ac turpis. Velit aliquet sagittis id consectetur purus ut faucibus pulvinar elementum. Volutpat diam ut venenatis tellus in. Purus in mollis nunc sed id. Consequat interdum varius sit amet mattis vulputate enim nulla. Nisi vitae suscipit tellus mauris a diam maecenas sed. (...) 35 } 36 } 38 # Call the inference function ---> 39 output_text = inference(input_event) 41 for line in output_text: 42 print("---") Cell In[1], line 117, in inference(event) 114 for key, value in settings.items(): 115 setattr(generator.settings, key, value) --> 117 output_text = generator.generate_simple(prompt, max_new_tokens = max_new_tokens) 118 doneout = [] 119 for idx, i in enumerate(output_text): File ~/exllama/generator.py:316, in ExLlamaGenerator.generate_simple(self, prompt, max_new_tokens) 313 self.end_beam_search() 315 ids, mask = self.tokenizer.encode(prompt, return_mask = True) --> 316 self.gen_begin(ids, mask = mask) 318 max_new_tokens = min(max_new_tokens, self.model.config.max_seq_len - ids.shape[1]) 320 eos = torch.zeros((ids.shape[0],), dtype = torch.bool) File ~/exllama/generator.py:186, in ExLlamaGenerator.gen_begin(self, in_tokens, mask) 183 self.sequence_actual = in_tokens.clone() 184 self.cache.current_seq_len = 0 --> 186 self.model.forward(self.sequence[:, :-1], self.cache, preprocess_only = True, lora = self.lora, input_mask = mask) File ~/exllama/model.py:967, in ExLlama.forward(self, input_ids, cache, last_id_only, preprocess_only, lora, output_device, input_mask) 964 _last_id_only = last_id_only 965 _preprocess_only = preprocess_only or (chunk_end < q_len and last_id_only) --> 967 r = self._forward(input_ids[:, chunk_begin : chunk_end], 968 cache, 969 _last_id_only, 970 _preprocess_only, 971 lora, 972 output_device, 973 input_mask) 975 if not _preprocess_only: 976 result = r if result is None else torch.cat((result, r), dim = 1) File ~/exllama/model.py:1053, in ExLlama._forward(self, input_ids, cache, last_id_only, preprocess_only, lora, output_device, input_mask) 1050 device = self.config.device_map.layers[i] 1051 hidden_states = _move_tensor(hidden_states, device, "hidden_states", self.config) -> 1053 hidden_states = decoder_layer.forward(hidden_states, cache, buffers[device], lora) 1055 cache.current_seq_len += seq_len 1057 # Early exit when we don't need logits File ~/exllama/model.py:549, in ExLlamaDecoderLayer.forward(self, hidden_states, cache, buffer, lora) 547 residual = hidden_states 548 hidden_states = self.post_attention_layernorm.forward(hidden_states, buffer) --> 549 hidden_states = self.mlp.forward(hidden_states, buffer, lora) 550 hidden_states = residual + hidden_states 552 return hidden_states File ~/exllama/model.py:271, in ExLlamaMLP.forward(self, x, buffer, lora) 269 y = self.act_fn(y) 270 y *= self.up_proj.forward(x, lora) --> 271 y = self.down_proj.forward(y, lora) 273 return y File ~/exllama/model.py:214, in Ex4bitLinear.forward(self, x, lora) 212 out = cuda_ext.ext_q4_matmul(x, self.q4, self.width, lora_a, lora_b) 213 else: --> 214 out = cuda_ext.ext_q4_matmul(x, self.q4, self.width) 216 # out = cuda_ext.ext_q4_matmul(x, self.q4, self.width) 217 # if self.lora_applies(lora): 218 # out += self.lora_apply(lora, x) 220 if self.bias is not None: out.add_(self.bias) File ~/exllama/cuda_ext.py:105, in ext_q4_matmul(x, q4, q4_width, lora_A, lora_B) 102 output = torch.empty((x.shape[0], q4_width), dtype = torch.float16, device = x.device) 104 if lora_A is None: --> 105 q4_matmul(x, q4, output) 106 else: 107 lora_temp = torch.empty((x.shape[0], lora_A.shape[1]), dtype = torch.float16, device = x.device) RuntimeError: temp_state buffer is too small ```
nikshepsvn commented 11 months ago

I've had this issue while batching too and never really found a fix, https://github.com/turboderp/exllama/issues/160 -- I was able to get around by just disabling batching cause I didn't NEED it but would love for a fix

nikshepsvn commented 11 months ago

@turboderp any chance you've been able to look at this?

turboderp commented 11 months ago

Sorry, didn't have time to look at this until now. There was a bug that caused the attention size calculation to override the max input length, which became an issue with batches of long inputs. It should be fixed now with the latest commit.

anujnayyar1 commented 11 months ago

@turboderp worked like a charm. You are the MVP! Sent some ☕