THUDM / VisualGLM-6B

Chinese and English multimodal conversational language model | 多模态中英双语对话语言模型
Apache License 2.0
4.07k stars 414 forks source link

自己的数据集太大,内存溢出 #283

Open AshOneN opened 11 months ago

AshOneN commented 11 months ago

我看微调代码里的class FewShotDataset(Dataset)这个类里要读取整个JSON文件,但是个人数据集太大,所以JSON文件也巨大无比,大概有三百万行图文对,所以直接内存溢出了,有什么办法可以解决吗,将小样本微调扩展成大一点的数据集。

chenjingcheng commented 10 months ago

我碰到你一样的,要修改加载数据的方式,我修改的代码:

class FewShotDataset(Dataset): def init(self, path, processor, tokenizer, args):

max_seq_length = args.max_source_length + args.max_target_length

    with open(path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    self.images = []
    self.input_ids = []
    self.labels = []
    self.items = []
    i = 0
    for item in data:
        self.items.append(item)
    random.shuffle(self.items)

    self.processor = processor
    self.tokenizer = tokenizer
    self.args = args

    # self.count = i
    # for item in data:
    #     image = processor(Image.open(item['img']).convert('RGB'))
    #     i = i + 1
    #     print(i)
    #     input0 = tokenizer.encode("<img>", add_special_tokens=False)
    #     input1 = [tokenizer.pad_token_id] * args.image_length
    #     input2 = tokenizer.encode("</img>问:"+item['prompt']+"\n答:", add_special_tokens=False)
    #     a_ids = sum([input0, input1, input2], [])
    #     b_ids = tokenizer.encode(text=item['label'], add_special_tokens=False)
    #     if len(a_ids) > args.max_source_length - 1:
    #         a_ids = a_ids[: args.max_source_length - 1]
    #     if len(b_ids) > args.max_target_length - 2:
    #         b_ids = b_ids[: args.max_target_length - 2]
    #     pre_image = len(input0)
    #     input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)

    #     context_length = input_ids.index(tokenizer.bos_token_id)
    #     mask_position = context_length - 1
    #     labels = [-100] * context_length + input_ids[mask_position+1:]

    #     pad_len = max_seq_length - len(input_ids)
    #     input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
    #     labels = labels + [tokenizer.pad_token_id] * pad_len
    #     if args.ignore_pad_token_for_loss:
    #         labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
    #     self.images.append(image)
    #     self.input_ids.append(input_ids)
    #     self.labels.append(labels)
    # self.pre_image = pre_image

def __len__(self):
    return len(self.items) 

def __getitem__(self, idx):
    item = self.items[idx]
    processor = self.processor
    tokenizer = self.tokenizer
    args = self.args

    max_seq_length = args.max_source_length + args.max_target_length
    # print(item['img'])
    image = processor(Image.open(item['img']).resize((128,128)).convert('RGB'))
    # i = i + 1
    # print(i)
    input0 = tokenizer.encode("<img>", add_special_tokens=False)
    input1 = [tokenizer.pad_token_id] * args.image_length
    input2 = tokenizer.encode("</img>问:"+item['prompt']+"\n答:", add_special_tokens=False)
    a_ids = sum([input0, input1, input2], [])
    b_ids = tokenizer.encode(text=item['label'], add_special_tokens=False)
    if len(a_ids) > args.max_source_length - 1:
        a_ids = a_ids[: args.max_source_length - 1]
    if len(b_ids) > args.max_target_length - 2:
        b_ids = b_ids[: args.max_target_length - 2]
    pre_image = len(input0)
    input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)

    context_length = input_ids.index(tokenizer.bos_token_id)
    mask_position = context_length - 1
    labels = [-100] * context_length + input_ids[mask_position+1:]

    pad_len = max_seq_length - len(input_ids)
    input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
    labels = labels + [tokenizer.pad_token_id] * pad_len
    if args.ignore_pad_token_for_loss:
        labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]

    return {
        "image": image,
        "input_ids": input_ids,
        "labels": labels,
        "pre_image": pre_image
    }
1049451037 commented 10 months ago

可以参考CogVLM里数据集的写法:https://github.com/THUDM/CogVLM/blob/main/utils/dataset.py

AshOneN commented 10 months ago

我碰到你一样的,要修改加载数据的方式,我修改的代码:

class FewShotDataset(Dataset): def init(self, path, processor, tokenizer, args): # max_seq_length = args.max_source_length + args.max_target_length with open(path, 'r', encoding='utf-8') as f: data = json.load(f) self.images = [] self.input_ids = [] self.labels = [] self.items = [] i = 0 for item in data: self.items.append(item) random.shuffle(self.items)

    self.processor = processor
    self.tokenizer = tokenizer
    self.args = args

    # self.count = i
    # for item in data:
    #     image = processor(Image.open(item['img']).convert('RGB'))
    #     i = i + 1
    #     print(i)
    #     input0 = tokenizer.encode("<img>", add_special_tokens=False)
    #     input1 = [tokenizer.pad_token_id] * args.image_length
    #     input2 = tokenizer.encode("</img>问:"+item['prompt']+"\n答:", add_special_tokens=False)
    #     a_ids = sum([input0, input1, input2], [])
    #     b_ids = tokenizer.encode(text=item['label'], add_special_tokens=False)
    #     if len(a_ids) > args.max_source_length - 1:
    #         a_ids = a_ids[: args.max_source_length - 1]
    #     if len(b_ids) > args.max_target_length - 2:
    #         b_ids = b_ids[: args.max_target_length - 2]
    #     pre_image = len(input0)
    #     input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)

    #     context_length = input_ids.index(tokenizer.bos_token_id)
    #     mask_position = context_length - 1
    #     labels = [-100] * context_length + input_ids[mask_position+1:]

    #     pad_len = max_seq_length - len(input_ids)
    #     input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
    #     labels = labels + [tokenizer.pad_token_id] * pad_len
    #     if args.ignore_pad_token_for_loss:
    #         labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
    #     self.images.append(image)
    #     self.input_ids.append(input_ids)
    #     self.labels.append(labels)
    # self.pre_image = pre_image

def __len__(self):
    return len(self.items) 

def __getitem__(self, idx):
    item = self.items[idx]
    processor = self.processor
    tokenizer = self.tokenizer
    args = self.args

    max_seq_length = args.max_source_length + args.max_target_length
    # print(item['img'])
    image = processor(Image.open(item['img']).resize((128,128)).convert('RGB'))
    # i = i + 1
    # print(i)
    input0 = tokenizer.encode("<img>", add_special_tokens=False)
    input1 = [tokenizer.pad_token_id] * args.image_length
    input2 = tokenizer.encode("</img>问:"+item['prompt']+"\n答:", add_special_tokens=False)
    a_ids = sum([input0, input1, input2], [])
    b_ids = tokenizer.encode(text=item['label'], add_special_tokens=False)
    if len(a_ids) > args.max_source_length - 1:
        a_ids = a_ids[: args.max_source_length - 1]
    if len(b_ids) > args.max_target_length - 2:
        b_ids = b_ids[: args.max_target_length - 2]
    pre_image = len(input0)
    input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)

    context_length = input_ids.index(tokenizer.bos_token_id)
    mask_position = context_length - 1
    labels = [-100] * context_length + input_ids[mask_position+1:]

    pad_len = max_seq_length - len(input_ids)
    input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
    labels = labels + [tokenizer.pad_token_id] * pad_len
    if args.ignore_pad_token_for_loss:
        labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]

    return {
        "image": image,
        "input_ids": input_ids,
        "labels": labels,
        "pre_image": pre_image
    }

非常感谢!

AshOneN commented 10 months ago

可以参考CogVLM里数据集的写法:https://github.com/THUDM/CogVLM/blob/main/utils/dataset.py

感谢指教,学习一下