Merge pull request #97 from Noir97/master

Update demo.ipynb
This commit is contained in:
Noir97
2023-10-16 11:54:39 +08:00
committed by GitHub

View File

@@ -228,7 +228,7 @@
],
"source": [
"base_model = 'llama2'\n",
"peft_model = 'fingpt-mt_llama2-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-llama2-linear_202309241345'\n",
"peft_model = 'FinGPT/fingpt-mt_llama2-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-llama2-linear_202309241345'\n",
"\n",
"model, tokenizer = load_model(base_model, peft_model, FROM_REMOTE)\n",
"test_demo(model, tokenizer)"
@@ -331,7 +331,7 @@
],
"source": [
"base_model = 'qwen'\n",
"peft_model = 'fingpt-mt_qwen-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-qwen-linear_202309221011'\n",
"peft_model = 'FinGPT/fingpt-mt_qwen-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-qwen-linear_202309221011'\n",
"\n",
"model, tokenizer = load_model(base_model, peft_model, FROM_REMOTE)\n",
"test_demo(model, tokenizer)"
@@ -422,7 +422,7 @@
],
"source": [
"base_model = 'falcon'\n",
"peft_model = 'fingpt-mt_falcon-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-falcon-linear_202309210126'\n",
"peft_model = 'FinGPT/fingpt-mt_falcon-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-falcon-linear_202309210126'\n",
"\n",
"model, tokenizer = load_model(base_model, peft_model, FROM_REMOTE)\n",
"test_demo(model, tokenizer)"
@@ -504,7 +504,7 @@
],
"source": [
"base_model = 'chatglm2'\n",
"peft_model = 'fingpt-mt_chatglm2-6b_lora' if FROM_REMOTE else 'finetuned_models/MT-chatglm2-linear_202309201120'\n",
"peft_model = 'FinGPT/fingpt-mt_chatglm2-6b_lora' if FROM_REMOTE else 'finetuned_models/MT-chatglm2-linear_202309201120'\n",
"\n",
"model, tokenizer = load_model(base_model, peft_model, FROM_REMOTE)\n",
"test_demo(model, tokenizer)"
@@ -586,7 +586,7 @@
],
"source": [
"base_model = 'bloom'\n",
"peft_model = 'fingpt-mt_bloom-7b1_lora' if FROM_REMOTE else 'finetuned_models/MT-bloom-linear_202309211510'\n",
"peft_model = 'FinGPT/fingpt-mt_bloom-7b1_lora' if FROM_REMOTE else 'finetuned_models/MT-bloom-linear_202309211510'\n",
"\n",
"model, tokenizer = load_model(base_model, peft_model, FROM_REMOTE)\n",
"test_demo(model, tokenizer)"
@@ -684,7 +684,7 @@
],
"source": [
"base_model = 'mpt'\n",
"peft_model = 'fingpt-mt_mpt-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-mpt-linear_202309230221'\n",
"peft_model = 'FinGPT/fingpt-mt_mpt-7b_lora' if FROM_REMOTE else 'finetuned_models/MT-mpt-linear_202309230221'\n",
"\n",
"model, tokenizer = load_model(base_model, peft_model, FROM_REMOTE)\n",
"test_demo(model, tokenizer)"