From 0ef093c1a2a6306003549093f40483671d8bdafe Mon Sep 17 00:00:00 2001 From: zhipuch Date: Tue, 24 Sep 2024 11:15:20 +0000 Subject: [PATCH] update readme --- basic_demo/README.md | 10 ++++++++++ basic_demo/README_en.md | 12 ++++++++++++ 2 files changed, 22 insertions(+) diff --git a/basic_demo/README.md b/basic_demo/README.md index 8f76576..cd747ea 100644 --- a/basic_demo/README.md +++ b/basic_demo/README.md @@ -142,5 +142,15 @@ python openai_api_request.py python trans_stress_test.py ``` +##使用昇腾卡运行代码 +用户可以在昇腾硬件环境下运行以上代码,只需将transformers修改为openmind,将device中的cuda设备修改为npu: + +```shell +#from transformers import AutoModelForCausalLM, AutoTokenizer +from openmind import AutoModelForCausalLM, AutoTokenizer + +#device = 'cuda' +device = 'npu' +``` diff --git a/basic_demo/README_en.md b/basic_demo/README_en.md index 9fc53ab..434c08c 100644 --- a/basic_demo/README_en.md +++ b/basic_demo/README_en.md @@ -147,3 +147,15 @@ Users can use this code to test the generation speed of the model on the transfo ```shell python trans_stress_test.py ``` + +##Use Ascend card to run code + +Users can run the above code in the Ascend hardware environment. They only need to change the transformers to openmind and the cuda device in device to npu. + +```shell +#from transformers import AutoModelForCausalLM, AutoTokenizer +from openmind import AutoModelForCausalLM, AutoTokenizer + +#device = 'cuda' +device = 'npu' +```