From af1d4f2f111c47ceb497d2bb073c0833d67fd9bb Mon Sep 17 00:00:00 2001 From: zhipuch Date: Mon, 4 Nov 2024 21:33:07 +0800 Subject: [PATCH] adapt transformers==4.46 --- finetune_demo/configs/lora.yaml | 1 + finetune_demo/finetune_vision.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/finetune_demo/configs/lora.yaml b/finetune_demo/configs/lora.yaml index 1875424..95cf26a 100644 --- a/finetune_demo/configs/lora.yaml +++ b/finetune_demo/configs/lora.yaml @@ -47,3 +47,4 @@ peft_config: lora_alpha: 32 lora_dropout: 0.1 target_modules: ["query_key_value"] + #target_modules: ["q_proj", "k_proj", "v_proj"] if model is glm-4-9b-chat-hf diff --git a/finetune_demo/finetune_vision.py b/finetune_demo/finetune_vision.py index b86c91c..cd859ff 100644 --- a/finetune_demo/finetune_vision.py +++ b/finetune_demo/finetune_vision.py @@ -399,7 +399,7 @@ def load_tokenizer_and_model( model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=True, - empty_init=False, + empty_init=False, #if transformers>=4.46 and load glm-4-9b-chat-hf, delete this use_cache=False, torch_dtype=torch.bfloat16 # Must use BFloat 16 ) @@ -409,7 +409,7 @@ def load_tokenizer_and_model( model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=True, - empty_init=False, + empty_init=False, #if transformers>=4.46 and load glm-4-9b-chat-hf, delete this use_cache=False, torch_dtype=torch.bfloat16 )