{"id":593,"date":"2024-02-19T14:52:40","date_gmt":"2024-02-19T06:52:40","guid":{"rendered":"http:\/\/lingbo.online\/?p=593"},"modified":"2024-03-06T10:24:29","modified_gmt":"2024-03-06T02:24:29","slug":"llama-factory","status":"publish","type":"post","link":"https:\/\/lingbo.online\/index.php\/algorithm_learning\/llms\/llama-factory\/","title":{"rendered":"LLaMA-Factory \u4f7f\u7528\u653b\u7565"},"content":{"rendered":"<h1>\u5355GPU\u8bad\u7ec3<\/h1>\n<h2>\u9884\u8bad\u7ec3<\/h2>\n<pre><code class=\"language-bash\">CUDA_VISIBLE_DEVICES=0 python src\/train_bash.py \\\n    --stage pt \\                               # Pre\u2014\u2014Training\u9884\u8bad\u7ec3\u6a21\u5f0f\n    --model_name_or_path path_to_llama_model \\ # \u6a21\u578b\u5730\u5740\n    --do_train \\                               # \u8868\u793a\u8fdb\u884c\u8bad\u7ec3\n    --dataset wiki_demo \\                      # \u4f7f\u7528\u7684\u6570\u636e\u96c6\n    --finetuning_type lora \\                   # \u5fae\u8c03\u7684\u65b9\u6cd5\n    --lora_target W_pack \\                     # LoRA\u4f5c\u7528\u6a21\u5757\uff1aBaichuan\u4e3aW_pack\n    --output_dir path_to_pt_checkpoint \\       # \u65ad\u70b9\u4fdd\u5b58\uff1a\u4fdd\u5b58\u6a21\u578b\u65ad\u70b9\u7684\u4f4d\u7f6e\n    --overwrite_cache \\                        # \u8868\u793a\u662f\u5426\u8986\u76d6\u7f13\u5b58\u6587\u4ef6\n    --per_device_train_batch_size 4 \\          # \u6279\u5904\u7406\u5927\u5c0f\uff1a\u6bcf\u5757 GPU \u4e0a\u5904\u7406\u7684\u6837\u672c\u6570\u91cf\n    --gradient_accumulation_steps 4 \\          # \u68af\u5ea6\u7d2f\u79ef\uff1a\u68af\u5ea6\u7d2f\u79ef\u7684\u6b65\u6570\uff08\u8282\u7701\u663e\u5b58\u7684\u65b9\u6cd5\uff09\n    --lr_scheduler_type cosine \\               # \u5b66\u4e60\u7387\u8c03\u8282\u5668\uff1a\u91c7\u7528\u7684\u5b66\u4e60\u7387\u8c03\u8282\u5668\u540d\u79f0\n    --logging_steps 10 \\                       # \u65e5\u5fd7\u95f4\u9694\uff1a\u6bcf\u4e24\u6b21\u65e5\u5fd7\u8f93\u51fa\u95f4\u7684\u66f4\u65b0\u6b65\u6570\n    --save_steps 1000 \\                        # \u4fdd\u5b58\u95f4\u9694\uff1a\u6bcf\u4e24\u6b21\u65ad\u70b9\u4fdd\u5b58\u95f4\u7684\u66f4\u65b0\u6b65\u6570\n    --learning_rate 5e-5 \\                     # \u5b66\u4e60\u7387\uff1aAdamW\u4f18\u5316\u5668\u7684\u521d\u59cb\u5b66\u4e60\u7387\n    --num_train_epochs 3.0 \\                   # \u8bad\u7ec3\u8f6e\u6570\uff1a\u9700\u8981\u6267\u884c\u7684\u8bad\u7ec3\u603b\u8f6e\u6570\n    --plot_loss \\                              # \u7ed8\u5236\u635f\u5931\u51fd\u6570\u56fe\n    --fp16                                     # \u8ba1\u7b97\u7c7b\u578b\uff1a\u662f\u5426\u542f\u7528fp16\u6216bf16\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\u3002<\/code><\/pre>\n<h2>\u6307\u4ee4\u76d1\u7763\u5fae\u8c03<\/h2>\n<pre><code class=\"language-bash\">CUDA_VISIBLE_DEVICES=0 python src\/train_bash.py \\\n    --stage sft \\\n    --model_name_or_path path_to_llama_model \\\n    --do_train \\\n    --dataset alpaca_gpt4_zh \\        # \u63d0\u793a\u6a21\u677f\uff1a\u6784\u5efa\u63d0\u793a\u8bcd\u65f6\u4f7f\u7528\u7684\u6a21\u677f\n    --template default \\              \n    --finetuning_type lora \\\n    --lora_target W_pack \\\n    --output_dir path_to_sft_checkpoint \\\n    --overwrite_cache \\\n    --per_device_train_batch_size 4 \\\n    --gradient_accumulation_steps 4 \\\n    --lr_scheduler_type cosine \\\n    --logging_steps 10 \\\n    --save_steps 1000 \\\n    --learning_rate 5e-5 \\\n    --num_train_epochs 3.0 \\\n    --plot_loss \\\n    --fp16<\/code><\/pre>\n<h2>\u5956\u52b1\u6a21\u578b\u8bad\u7ec3<\/h2>\n<pre><code class=\"language-bash\">CUDA_VISIBLE_DEVICES=0 python src\/train_bash.py \\\n    --stage rm \\\n    --model_name_or_path path_to_llama_model \\\n    --do_train \\\n    --dataset comparison_gpt4_zh \\              # \u5956\u52b1\u6a21\u578b\u8bad\u7ec3\u6570\u636e\u96c6\n    --template default \\\n    --finetuning_type lora \\\n    --lora_target W_pack \\\n    --resume_lora_training False \\              # \u63a5\u7740\u4e0a\u6b21\u7684LoRA\u6743\u91cd\u8bad\u7ec3\u6216\u521b\u5efa\u4e00\u4e2a\u65b0\u7684LoRA\u6743\u91cd\n    --checkpoint_dir path_to_sft_checkpoint \\   # \u6307\u4ee4\u5fae\u8c03\u6a21\u578b\u7684\u65ad\u70b9\n    --output_dir path_to_rm_checkpoint \\        # \u5956\u52b1\u6a21\u578b\u7684\u8f93\u51fa\u4f4d\u7f6e\n    --per_device_train_batch_size 2 \\\n    --gradient_accumulation_steps 4 \\\n    --lr_scheduler_type cosine \\\n    --logging_steps 10 \\\n    --save_steps 1000 \\\n    --learning_rate 1e-6 \\\n    --num_train_epochs 1.0 \\\n    --plot_loss \\\n    --fp16<\/code><\/pre>\n<h2>PPO\u8bad\u7ec3<\/h2>\n<p>PPO\u8bad\u7ec3\u9700\u8981\u5148\u8fdb\u884c\u4e0a\u4e00\u6b65RM\u7684\u8bad\u7ec3\uff0c\u7136\u540e\u5bfc\u5165\u5fae\u8c03\u540e\u6a21\u578b\u548cRM\u8fdb\u884c\u8bad\u7ec3\u8f93\u51fa<\/p>\n<pre><code class=\"language-bash\">CUDA_VISIBLE_DEVICES=0 python src\/train_bash.py \\\n    --stage ppo \\\n    --model_name_or_path path_to_llama_model \\\n    --do_train \\\n    --dataset alpaca_gpt4_zh \\\n    --template default \\\n    --finetuning_type lora \\\n    --lora_target W_pack \\\n    --resume_lora_training False \\\n    --checkpoint_dir path_to_sft_checkpoint \\   # \u52a0\u8f7d\u6307\u4ee4\u5fae\u8c03\u7684\u65ad\u70b9\u6a21\u578b\n    --reward_model path_to_rm_checkpoint \\      # \u5956\u52b1\u6a21\u578b\u7684\u65ad\u70b9\u8def\u5f84\n    --output_dir path_to_ppo_checkpoint \\       # ppo\u8bad\u7ec3\u7684\u65ad\u70b9\u8f93\u51fa\u4f4d\u7f6e\n    --per_device_train_batch_size 2 \\\n    --gradient_accumulation_steps 4 \\\n    --lr_scheduler_type cosine \\\n    --logging_steps 10 \\\n    --save_steps 1000 \\\n    --learning_rate 1e-5 \\\n    --num_train_epochs 1.0 \\\n    --plot_loss<\/code><\/pre>\n<h2>DPO\u8bad\u7ec3<\/h2>\n<p>\u4e0d\u9700\u8981\u5148\u8bad\u7ec3RM\uff0c\u76f4\u63a5\u5bfc\u5165\u5fae\u8c03\u6a21\u578b\u8fdb\u884cDPO\u8bad\u7ec3<\/p>\n<pre><code class=\"language-bash\">CUDA_VISIBLE_DEVICES=0 python src\/train_bash.py \\\n    --stage dpo \\\n    --model_name_or_path path_to_llama_model \\\n    --do_train \\\n    --dataset comparison_gpt4_zh \\\n    --template default \\\n    --finetuning_type lora \\\n    --lora_target W_pack \\\n    --resume_lora_training False \\\n    --checkpoint_dir path_to_sft_checkpoint \\\n    --output_dir path_to_dpo_checkpoint \\\n    --per_device_train_batch_size 2 \\\n    --gradient_accumulation_steps 4 \\\n    --lr_scheduler_type cosine \\\n    --logging_steps 10 \\\n    --save_steps 1000 \\\n    --learning_rate 1e-5 \\\n    --num_train_epochs 1.0 \\\n    --plot_loss \\\n    --fp16<\/code><\/pre>\n<h1>\u591a\u5361\u8bad\u7ec3<\/h1>\n<h2>\u4f7f\u7528DeepSpeed<\/h2>\n<p>\u8fdb\u884c\u591a\u5361\u8bad\u7ec3\u65f6\u9700\u8981\u914d\u7f6eDeepSpeed\u53c2\u6570\uff0c\u4fdd\u5b58\u4e3ads_config.json\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a<\/p>\n<pre><code class=\"language-json\">{\n  &quot;train_batch_size&quot;: &quot;auto&quot;,\n  &quot;train_micro_batch_size_per_gpu&quot;: &quot;auto&quot;,\n  &quot;gradient_accumulation_steps&quot;: &quot;auto&quot;,\n  &quot;gradient_clipping&quot;: &quot;auto&quot;,\n  &quot;zero_allow_untested_optimizer&quot;: true,\n  &quot;fp16&quot;: {\n    &quot;enabled&quot;: &quot;auto&quot;,\n    &quot;loss_scale&quot;: 0,\n    &quot;initial_scale_power&quot;: 16,\n    &quot;loss_scale_window&quot;: 1000,\n    &quot;hysteresis&quot;: 2,\n    &quot;min_loss_scale&quot;: 1\n  },\n  &quot;zero_optimization&quot;: {\n    &quot;stage&quot;: 2,\n    &quot;allgather_partitions&quot;: true,\n    &quot;allgather_bucket_size&quot;: 5e8,\n    &quot;reduce_scatter&quot;: true,\n    &quot;reduce_bucket_size&quot;: 5e8,\n    &quot;overlap_comm&quot;: false,\n    &quot;contiguous_gradients&quot;: true\n  }\n}<\/code><\/pre>\n<h2>\u8bad\u7ec3<\/h2>\n<p>\u591a\u5361\u8bad\u7ec3\u9700\u8981\u7684\u53c2\u6570\u4e0e\u5355\u5361\u8bad\u7ec3\u57fa\u672c\u4e00\u81f4\uff0c\u4f46\u662f\u9700\u8981\u589e\u52a0\u5bf9GPU\u6570\u91cf\u548c\u4e3b\u7aef\u53e3\u53f7\u7684\u8bf4\u660e\uff0c\u5982\uff0c\u5bf9\u4e8eSFT\u9636\u6bb5\uff0c\u53ef\u4ee5\u4f7f\u7528\u7684\u793a\u4f8b\u5982\u4e0b\uff1a<\/p>\n<pre><code class=\"language-python\">USE_MODELSCOPE_HUB=1 HF_ENDPOINT=https:\/\/hf-mirror.com deepspeed --num_gpus 2 --master_port=30422 src\/train_bash.py \\\n    --deepspeed .\/ds_config.json \\\n    --stage sft \\\n    --model_name_or_path .\/Qwen1.5-1.8B\\\n    --do_train \\\n    --dataset self_cognition\\\n    --template default \\\n    --finetuning_type lora \\\n    --lora_target q_proj,v_proj \\\n    --output_dir output\/test1\/ \\\n    --overwrite_cache \\\n    --per_device_train_batch_size 4 \\\n    --gradient_accumulation_steps 4 \\\n    --lr_scheduler_type cosine \\\n    --logging_steps 10 \\\n    --save_steps 50 \\\n    --learning_rate 5e-5 \\\n    --num_train_epochs 100 \\\n    --plot_loss \\\n    --overwrite_output_dir True \\\n    --fp16<\/code><\/pre>\n<h1>\u6a21\u578b\u8bc4\u4f30<\/h1>\n<pre><code class=\"language-bash\">CUDA_VISIBLE_DEVICES=0 python src\/evaluate.py \\\n    --model_name_or_path path_to_llama_model \\\n    --finetuning_type lora \\\n    --checkpoint_dir path_to_checkpoint \\\n    --template vanilla \\\n    --task ceval \\\n    --split validation \\\n    --lang zh \\\n    --n_shot 5 \\\n    --batch_size 4<\/code><\/pre>\n<h1>\u6a21\u578b\u9884\u6d4b<\/h1>\n<pre><code class=\"language-bash\">CUDA_VISIBLE_DEVICES=0 python src\/train_bash.py \\\n    --stage sft \\\n    --model_name_or_path path_to_llama_model \\\n    --do_predict \\\n    --dataset alpaca_gpt4_zh \\\n    --template default \\\n    --finetuning_type lora \\\n    --checkpoint_dir path_to_checkpoint \\\n    --output_dir path_to_predict_result \\\n    --per_device_eval_batch_size 8 \\\n    --max_samples 100 \\                     # \u6700\u5927\u6837\u672c\u6570\uff1a\u6bcf\u4e2a\u6570\u636e\u96c6\u6700\u591a\u4f7f\u7528\u7684\u6837\u672c\u6570\n    --predict_with_generate<\/code><\/pre>\n<p>\u5efa\u8bae\u5728\u91cf\u5316\u6a21\u578b\u7684\u9884\u6d4b\u4e2d\u4f7f\u7528<\/p>\n<pre><code class=\"language-bash\">--per_device_eval_batch_size=1 \n--max_target_length 128<\/code><\/pre>\n<h1>\u6a21\u578b\u6d4b\u8bd5<\/h1>\n<h2>\u6d4f\u89c8\u5668\u6d4b\u8bd5<\/h2>\n<pre><code class=\"language-bash\">python src\/web_demo.py \\\n    --model_name_or_path .\/Qwen1.5-1.8B \\\n    --template default \\\n    --finetuning_type lora \\\n    --checkpoint_dir .\/output\/test1\/checkpoint-200<\/code><\/pre>\n<h1>\u62a5\u9519\u95ee\u9898<\/h1>\n<ol>\n<li>TimeoutConnect Error\uff1a\u7531\u4e8e\u56fd\u5185huggingface\u88ab\u5899\uff0c\u6682\u65f6\u65e0\u6cd5\u4ecehuggingface\u8fdc\u7a0b\u4e0b\u8f7d\u76f8\u5173\u6a21\u578b\u8d44\u6e90\uff0c\u53ef\u4ee5\u4e0b\u8f7d\u5728\u672c\u5730\u6587\u4ef6\u3002<strong>\u4f46\u5728\u914d\u7f6e\u8bad\u7ec3\u53c2\u6570\u65f6\u5e94\u4f7f\u7528\u7edd\u5bf9\u8def\u5f84\u6216.\/\u4fdd\u8bc1\u4ece\u672c\u5730\u52a0\u8f7d\u8d44\u6e90\u3002<\/strong><\/li>\n<\/ol>\n","protected":false},"excerpt":{"rendered":"<p>\u5355GPU\u8bad\u7ec3 \u9884\u8bad\u7ec3 CUDA_VISIBLE_DEVICES=0 python src\/train_bash.py \\ &#8211;st &#8230;<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"emotion":"","emotion_color":"","title_style":"","license":"","footnotes":""},"categories":[45],"tags":[],"class_list":["post-593","post","type-post","status-publish","format-standard","hentry","category-llms"],"_links":{"self":[{"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/posts\/593","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/comments?post=593"}],"version-history":[{"count":3,"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/posts\/593\/revisions"}],"predecessor-version":[{"id":601,"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/posts\/593\/revisions\/601"}],"wp:attachment":[{"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/media?parent=593"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/categories?post=593"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/lingbo.online\/index.php\/wp-json\/wp\/v2\/tags?post=593"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}