微调qwen2.5-vl

按照GitHub上面写的方式,使用T4微调,flash-attn会报错,因此参考了一篇文章Qwen2-VL多模态大模型微调实战(完整代码)_qwen2-vl微调-CSDN博客

1、安装环境

conda create -n qwen2-5-vl-0429 python==3.10 -y

pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121

 pip install transformers==4.50.0

pip install accelerate==1.4.0

pip install modelscope

pip install qwen-vl-utils[decord]

pip install swanlab

pip install peft

2、按照实际需求整理数据

3、训练代码(我是先看的GitHub官方教程,按照代码中的示例整理的,因此这部分代码会跟帖子不一样,总得来说就是将自己准备的数据中输入、输出和图片路径获取到就可以。模型是提前参考通义千问2.5-VL-7B-Instruct · 模型库里面的方法,使用命令下载好了模型,存放的路径和帖子不一样,还有就是去掉了测试集的部分)

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3,4,5,6,7"

import torch
from datasets import Dataset
from modelscope import snapshot_download, AutoTokenizer
from swanlab.integration.transformers import SwanLabCallback
from qwen_vl_utils import process_vision_info
from peft import LoraConfig, TaskType, get_peft_model, PeftModel
from transformers import (
    TrainingArguments,
    Trainer,
    DataCollatorForSeq2Seq,
    Qwen2_5_VLForConditionalGeneration,
    AutoProcessor,
)
import swanlab
# import json


def process_func(example):
    """
    将数据集进行预处理
    """
    MAX_LENGTH = 8192
    input_ids, attention_mask, labels = [], [], []
    file_path = example["image"]
    conversation = example["conversations"]
    input_content = conversation[0]["value"].split('<image>')[0]
    output_content = conversation[1]["value"]
    # prompt = input_content.split("<|vision_start|>")[1].split("<|vision_end|>")[0]  # 获取图像路径
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": f"{file_path}",
                    "resized_height": 280,
                    "resized_width": 280,
                },
                {"type": "text", "text": input_content},
            ],
        }
    ]
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )  # 获取文本
    image_inputs, video_inputs = process_vision_info(messages)  # 获取数据数据(预处理过)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = {key: value.tolist() for key, value in inputs.items()}  # tensor -> list,为了方便拼接
    instruction = inputs

    response = tokenizer(f"{output_content}", add_special_tokens=False)

    input_ids = (
            instruction["input_ids"][0] + response["input_ids"] + [tokenizer.pad_token_id]
    )

    attention_mask = instruction["attention_mask"][0] + response["attention_mask"] + [1]
    labels = (
            [-100] * len(instruction["input_ids"][0])
            + response["input_ids"]
            + [tokenizer.pad_token_id]
    )
    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]

    input_ids = torch.tensor(input_ids)
    attention_mask = torch.tensor(attention_mask)
    labels = torch.tensor(labels)
    inputs['pixel_values'] = torch.tensor(inputs['pixel_values'])
    inputs['image_grid_thw'] = torch.tensor(inputs['image_grid_thw']).squeeze(0)  # 由(1,h,w)变换为(h,w)
    return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels,
            "pixel_values": inputs['pixel_values'], "image_grid_thw": inputs['image_grid_thw']}


def predict(messages, model):
    # 准备推理
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")

    # 生成输出
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )

    return output_text[0]


# 使用Transformers加载模型权重
tokenizer = AutoTokenizer.from_pretrained("./train_model/", use_fast=False, trust_remote_code=True)
processor = AutoProcessor.from_pretrained("./train_model")

model = Qwen2_5_VLForConditionalGeneration.from_pretrained("./train_model/", device_map="auto",
                                                        torch_dtype=torch.bfloat16, trust_remote_code=True, )
model.enable_input_require_grads()  # 开启梯度检查点时,要执行该方法


train_ds = Dataset.from_json("./qwenvl/data/my_dataset/annotations.json")
train_dataset = train_ds.map(process_func)

# 配置LoRA
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=False,  # 训练模式
    r=64,  # Lora 秩
    lora_alpha=16,  # Lora alaph,具体作用参见 Lora 原理
    lora_dropout=0.05,  # Dropout 比例
    bias="none",
)

# 获取LoRA模型
peft_model = get_peft_model(model, config)

# 配置训练参数
args = TrainingArguments(
    output_dir="./output/Qwen2-5-VL-7B",
    per_device_train_batch_size=4,
    gradient_accumulation_steps=4,
    logging_steps=10,
    logging_first_step=5,
    num_train_epochs=2,
    save_steps=100,
    learning_rate=1e-4,
    save_on_each_node=True,
    gradient_checkpointing=True,
    report_to="none",
)

# 设置SwanLab回调
swanlab_callback = SwanLabCallback(
    experiment_name="qwen2-5-vl-finetune"
)


# 配置Trainer
trainer = Trainer(
    model=peft_model,
    args=args,
    train_dataset=train_dataset,
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
    callbacks=[swanlab_callback],
)

# 开启模型训练
trainer.train()

4、第一次直接前台运行,会有2个交互的地方(图片上传不了,文字简单描述一下)

根据提示进行选择和输入即可,我这里先在swanlab上注册了账号,因此在询问是否有账号时,直接输入已有账号前面的数字,回车,之后把我的apikey输入了进去,再按回车,就可以在swanlab中监控训练过程

apiKey可以在工作区左下角自己头像那块,点击后,进入设置页面找到

也可以在代码中加入:

import swanlab
swanlab.login(api_key="你的apikey", save=True)

-------------------------------------------------------------------------------------------------------------------------------

上面只是为了跑通,后面我又增加了轮次训练,下面附上测试代码,可以按照我想要的格式输出:

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3,4,5,6,7"

from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
from peft import PeftModel, LoraConfig, TaskType

config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=True,
    r=64,  # Lora 秩
    lora_alpha=16,  # Lora alaph,具体作用参见 Lora 原理
    lora_dropout=0.05,  # Dropout 比例
    bias="none",
)

# default: Load the model on the available device(s)
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    "./train_model", torch_dtype="auto", device_map="auto"
)
model = PeftModel.from_pretrained(model, model_id="./output/Qwen2-5-VL-7B/checkpoint-100", config=config)
processor = AutoProcessor.from_pretrained("./train_model/")

def reference(image_url, prompt):
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": image_url,
                },
                {"type": "text", "text": prompt},
            ],
        }
    ]

    # Preparation for inference
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    print(output_text)
    return output_text


if __name__ == '__main__':
    prompt = "你的提示词(我是针对自己的业务训练,每次的提示词格式一样,就是内容有一些改变,这里和训练时候的格式保持一致)"
    reference('2.jpg', prompt)

### 微调Qwen2.5-VL模型的方法 对于希望对Qwen2.5-VL进行微调的需求,可以采用特定策略来优化这一过程。通常情况下,微调涉及准备合适的硬件环境、获取高质量的数据集以及调整超参数设置。 #### 准备工作 为了有效开展微调活动,建议先确认计算资源是否充足。由于Qwen系列属于大型预训练模型,因此推荐使用配备有高性能GPU的工作站或者云端实例来进行操作[^1]。 #### 数据收集与处理 获得适合目标任务的标注数据至关重要。这些数据应该尽可能贴近实际应用场景中的分布情况,以便让模型学习到更贴合需求的知识表示形式。针对视觉-语言任务特性,需特别注意图像及其对应描述之间的匹配度和多样性[^2]。 #### 实施微调流程 以下是基于PyTorch框架的一个简化版微调脚本: ```python from transformers import AutoModelForVision2Seq, AutoProcessor model_name_or_path = "qwen/Qwen2.5-VL" processor = AutoProcessor.from_pretrained(model_name_or_path) model = AutoModelForVision2Seq.from_pretrained(model_name_or_path) # 假设已经加载好了自己的训练/验证dataset对象train_dataset,val_dataset training_args = TrainingArguments( output_dir="./results", num_train_epochs=3, per_device_train_batch_size=8, per_device_eval_batch_size=8, warmup_steps=500, weight_decay=0.01, logging_dir='./logs', evaluation_strategy="epoch" ) trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset, data_collator=collate_fn # 自定义函数用于打包batch ) trainer.train() ``` 上述代码片段展示了如何利用Hugging Face库快速搭建起一个基本的微调架构。需要注意的是,在具体实践中可能还需要进一步定制化配置项以适应不同场景下的特殊要求。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值