From c0a6d1e0fa744ebc47d5245aa479cb9e455f2976 Mon Sep 17 00:00:00 2001
From: duzx16 <904663169@qq.com>
Date: Wed, 5 Jun 2024 10:22:16 +0800
Subject: [PATCH] init commit
---
.github/ISSUE_TEMPLATE/bug_report.yaml | 72 +
.github/ISSUE_TEMPLATE/feature-request.yaml | 34 +
.github/PULL_REQUEST_TEMPLATE/pr_template.md | 34 +
.gitignore | 6 +
LICENSE | 201 +
README.md | 272 ++
README_en.md | 280 ++
basic_demo/README.md | 114 +
basic_demo/README_en.md | 114 +
basic_demo/openai_api_request.py | 88 +
basic_demo/openai_api_server.py | 543 +++
basic_demo/requirements.txt | 23 +
basic_demo/trans_batch_demo.py | 90 +
basic_demo/trans_cli_demo.py | 120 +
basic_demo/trans_stress_test.py | 128 +
basic_demo/trans_web_demo.py | 165 +
basic_demo/vllm_cli_demo.py | 108 +
composite_demo/.gitignore | 181 +
composite_demo/README.md | 167 +
composite_demo/README_en.md | 155 +
composite_demo/assets/cogview.png | Bin 0 -> 1957719 bytes
composite_demo/assets/demo.png | Bin 0 -> 629325 bytes
composite_demo/assets/doc_reader.png | Bin 0 -> 1148207 bytes
composite_demo/assets/tool.png | Bin 0 -> 617637 bytes
composite_demo/assets/vlm.png | Bin 0 -> 700380 bytes
composite_demo/assets/weather.png | Bin 0 -> 1036977 bytes
composite_demo/assets/web_plot_1.png | Bin 0 -> 958309 bytes
composite_demo/assets/web_plot_2.png | Bin 0 -> 871637 bytes
composite_demo/browser/.gitignore | 144 +
composite_demo/browser/package-lock.json | 3575 ++++++++++++++++++
composite_demo/browser/package.json | 26 +
composite_demo/browser/pnpm-lock.yaml | 1580 ++++++++
composite_demo/browser/src/browser.ts | 745 ++++
composite_demo/browser/src/config.ts | 10 +
composite_demo/browser/src/server.ts | 55 +
composite_demo/browser/src/types.ts | 25 +
composite_demo/browser/src/utils.ts | 56 +
composite_demo/browser/tsconfig.json | 15 +
composite_demo/requirements.txt | 22 +
composite_demo/src/client.py | 98 +
composite_demo/src/clients/hf.py | 59 +
composite_demo/src/clients/vllm.py | 64 +
composite_demo/src/conversation.py | 165 +
composite_demo/src/main.py | 356 ++
composite_demo/src/tools/browser.py | 61 +
composite_demo/src/tools/cogview.py | 23 +
composite_demo/src/tools/config.py | 6 +
composite_demo/src/tools/interface.py | 10 +
composite_demo/src/tools/python.py | 200 +
composite_demo/src/tools/tool_registry.py | 188 +
composite_demo/src/utils.py | 29 +
finetune_demo/README.md | 249 ++
finetune_demo/README_en.md | 245 ++
finetune_demo/configs/ds_zero_2.json | 29 +
finetune_demo/configs/ds_zero_3.json | 31 +
finetune_demo/configs/lora.yaml | 44 +
finetune_demo/configs/ptuning_v2.yaml | 44 +
finetune_demo/configs/sft.yaml | 37 +
finetune_demo/finetune.py | 447 +++
finetune_demo/inference.py | 109 +
finetune_demo/requirements.txt | 5 +
resources/WECHAT.md | 7 +
resources/eval_needle.jpeg | Bin 0 -> 462372 bytes
resources/longbench.png | Bin 0 -> 167920 bytes
resources/wechat.jpg | Bin 0 -> 154573 bytes
65 files changed, 11654 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yaml
create mode 100644 .github/ISSUE_TEMPLATE/feature-request.yaml
create mode 100644 .github/PULL_REQUEST_TEMPLATE/pr_template.md
create mode 100644 .gitignore
create mode 100644 LICENSE
create mode 100644 README.md
create mode 100644 README_en.md
create mode 100644 basic_demo/README.md
create mode 100644 basic_demo/README_en.md
create mode 100644 basic_demo/openai_api_request.py
create mode 100644 basic_demo/openai_api_server.py
create mode 100644 basic_demo/requirements.txt
create mode 100644 basic_demo/trans_batch_demo.py
create mode 100644 basic_demo/trans_cli_demo.py
create mode 100644 basic_demo/trans_stress_test.py
create mode 100644 basic_demo/trans_web_demo.py
create mode 100644 basic_demo/vllm_cli_demo.py
create mode 100644 composite_demo/.gitignore
create mode 100644 composite_demo/README.md
create mode 100644 composite_demo/README_en.md
create mode 100644 composite_demo/assets/cogview.png
create mode 100644 composite_demo/assets/demo.png
create mode 100644 composite_demo/assets/doc_reader.png
create mode 100644 composite_demo/assets/tool.png
create mode 100644 composite_demo/assets/vlm.png
create mode 100644 composite_demo/assets/weather.png
create mode 100644 composite_demo/assets/web_plot_1.png
create mode 100644 composite_demo/assets/web_plot_2.png
create mode 100644 composite_demo/browser/.gitignore
create mode 100644 composite_demo/browser/package-lock.json
create mode 100644 composite_demo/browser/package.json
create mode 100644 composite_demo/browser/pnpm-lock.yaml
create mode 100644 composite_demo/browser/src/browser.ts
create mode 100644 composite_demo/browser/src/config.ts
create mode 100644 composite_demo/browser/src/server.ts
create mode 100644 composite_demo/browser/src/types.ts
create mode 100644 composite_demo/browser/src/utils.ts
create mode 100644 composite_demo/browser/tsconfig.json
create mode 100644 composite_demo/requirements.txt
create mode 100644 composite_demo/src/client.py
create mode 100644 composite_demo/src/clients/hf.py
create mode 100644 composite_demo/src/clients/vllm.py
create mode 100644 composite_demo/src/conversation.py
create mode 100644 composite_demo/src/main.py
create mode 100644 composite_demo/src/tools/browser.py
create mode 100644 composite_demo/src/tools/cogview.py
create mode 100644 composite_demo/src/tools/config.py
create mode 100644 composite_demo/src/tools/interface.py
create mode 100644 composite_demo/src/tools/python.py
create mode 100644 composite_demo/src/tools/tool_registry.py
create mode 100644 composite_demo/src/utils.py
create mode 100644 finetune_demo/README.md
create mode 100644 finetune_demo/README_en.md
create mode 100644 finetune_demo/configs/ds_zero_2.json
create mode 100644 finetune_demo/configs/ds_zero_3.json
create mode 100644 finetune_demo/configs/lora.yaml
create mode 100644 finetune_demo/configs/ptuning_v2.yaml
create mode 100644 finetune_demo/configs/sft.yaml
create mode 100644 finetune_demo/finetune.py
create mode 100644 finetune_demo/inference.py
create mode 100644 finetune_demo/requirements.txt
create mode 100644 resources/WECHAT.md
create mode 100644 resources/eval_needle.jpeg
create mode 100644 resources/longbench.png
create mode 100644 resources/wechat.jpg
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
new file mode 100644
index 0000000..2797f4b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -0,0 +1,72 @@
+name: "\U0001F41B Bug Report"
+description: Submit a bug report to help us improve GLM-4-9B / 提交一个 Bug 问题报告来帮助我们改进 GLM-4-9B
+body:
+ - type: textarea
+ id: system-info
+ attributes:
+ label: System Info / 系統信息
+ description: Your operating environment / 您的运行环境信息
+ placeholder: Includes Cuda version, Transformers version, Python version, operating system, hardware information (if you suspect a hardware problem)... / 包括Cuda版本,Transformers版本,Python版本,操作系统,硬件信息(如果您怀疑是硬件方面的问题)...
+ validations:
+ required: true
+
+ - type: textarea
+ id: who-can-help
+ attributes:
+ label: Who can help? / 谁可以帮助到您?
+ description: |
+ Your issue will be replied to more quickly if you can figure out the right person to tag with @
+ All issues are read by one of the maintainers, so if you don't know who to tag, just leave this blank and our maintainer will ping the right person.
+
+ Please tag fewer than 3 people.
+
+ 如果您能找到合适的标签 @,您的问题会更快得到回复。
+ 所有问题都会由我们的维护者阅读,如果您不知道该标记谁,只需留空,我们的维护人员会找到合适的开发组成员来解决问题。
+
+ 标记的人数应该不超过 3 个人。
+
+ If it's not a bug in these three subsections, you may not specify the helper. Our maintainer will find the right person in the development group to solve the problem.
+
+ 如果不是这三个子版块的bug,您可以不指明帮助者,我们的维护人员会找到合适的开发组成员来解决问题。
+
+ placeholder: "@Username ..."
+
+ - type: checkboxes
+ id: information-scripts-examples
+ attributes:
+ label: Information / 问题信息
+ description: 'The problem arises when using: / 问题出现在'
+ options:
+ - label: "The official example scripts / 官方的示例脚本"
+ - label: "My own modified scripts / 我自己修改的脚本和任务"
+
+ - type: textarea
+ id: reproduction
+ validations:
+ required: true
+ attributes:
+ label: Reproduction / 复现过程
+ description: |
+ Please provide a code example that reproduces the problem you encountered, preferably with a minimal reproduction unit.
+ If you have code snippets, error messages, stack traces, please provide them here as well.
+ Please format your code correctly using code tags. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
+ Do not use screenshots, as they are difficult to read and (more importantly) do not allow others to copy and paste your code.
+
+ 请提供能重现您遇到的问题的代码示例,最好是最小复现单元。
+ 如果您有代码片段、错误信息、堆栈跟踪,也请在此提供。
+ 请使用代码标签正确格式化您的代码。请参见 https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
+ 请勿使用截图,因为截图难以阅读,而且(更重要的是)不允许他人复制粘贴您的代码。
+ placeholder: |
+ Steps to reproduce the behavior/复现Bug的步骤:
+
+ 1.
+ 2.
+ 3.
+
+ - type: textarea
+ id: expected-behavior
+ validations:
+ required: true
+ attributes:
+ label: Expected behavior / 期待表现
+ description: "A clear and concise description of what you would expect to happen. /简单描述您期望发生的事情。"
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/feature-request.yaml b/.github/ISSUE_TEMPLATE/feature-request.yaml
new file mode 100644
index 0000000..003045d
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature-request.yaml
@@ -0,0 +1,34 @@
+name: "\U0001F680 Feature request"
+description: Submit a request for a new GLM-4-9B feature / 提交一个新的 GLM-4-9B 的功能建议
+labels: [ "feature" ]
+body:
+ - type: textarea
+ id: feature-request
+ validations:
+ required: true
+ attributes:
+ label: Feature request / 功能建议
+ description: |
+ A brief description of the functional proposal. Links to corresponding papers and code are desirable.
+ 对功能建议的简述。最好提供对应的论文和代码链接
+
+ - type: textarea
+ id: motivation
+ validations:
+ required: true
+ attributes:
+ label: Motivation / 动机
+ description: |
+ Your motivation for making the suggestion. If that motivation is related to another GitHub issue, link to it here.
+ 您提出建议的动机。如果该动机与另一个 GitHub 问题有关,请在此处提供对应的链接。
+
+ - type: textarea
+ id: contribution
+ validations:
+ required: true
+ attributes:
+ label: Your contribution / 您的贡献
+ description: |
+
+ Your PR link or any other link you can help with.
+ 您的PR链接或者其他您能提供帮助的链接。
\ No newline at end of file
diff --git a/.github/PULL_REQUEST_TEMPLATE/pr_template.md b/.github/PULL_REQUEST_TEMPLATE/pr_template.md
new file mode 100644
index 0000000..1830e40
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/pr_template.md
@@ -0,0 +1,34 @@
+# Raise valuable PR / 提出有价值的PR
+
+## Caution/ 注意事项:
+Users should keep the following points in mind when submitting PRs:
+
+1. The proposed PR should be about this project.
+2. the proposed PR should be relevant, if there are multiple ideas and optimizations, they should be assigned to different PRs.
+
+用户在提交PR时候应该注意以下几点:
+
+1. 提出的PR应该是关于本项目的。
+2. 提出的PR应该具有针对性,如果具有多个不同的想法和优化方案,应该分配到不同的PR中。
+
+## 不应该提出的PR / PRs that should not be proposed
+
+If a developer proposes a PR about any of the following, it may be closed or Rejected.
+
+1. those that don't describe improvement options.
+2. multiple issues of different types combined in one PR.
+3. The proposed PR is highly duplicative of already existing PRs.
+
+如果开发者提出关于以下方面的PR,则可能会被直接关闭或拒绝通过。
+
+1. 没有说明改进方案的。
+2. 多个不同类型的问题合并在一个PR中的。
+3. 提出的PR与已经存在的PR高度重复的。
+
+
+# 检查您的PR
+- [ ] Have you read the Contributor Guidelines, Pull Request section? / 您是否阅读了贡献者指南、Pull Request 部分?
+- [ ] Has this been discussed/approved via a Github issue or forum? If so, add a link. / 是否通过 Github 问题或论坛讨论/批准过?如果是,请添加链接。
+- [ ] Did you make sure you updated the documentation with your changes? Here are the Documentation Guidelines, and here are the Documentation Formatting Tips. /您是否确保根据您的更改更新了文档?这里是文档指南,这里是文档格式化技巧。
+- [ ] Did you write new required tests? / 您是否编写了新的必要测试?
+- [ ] Are your PRs for only one issue / 您的PR是否仅针对一个问题
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f91b46d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+*venv
+*.DS_Store
+*base_model
+*multimodal
+chat_model
+*.idea/
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..4191e11
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2024 GLM-4-9B Model Team @ Zhipu AI
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..e6fe0b4
--- /dev/null
+++ b/README.md
@@ -0,0 +1,272 @@
+# GLM-4
+
+
+🤗 HF Repo • 🤖 ModelScope • 🐦 Twitter • 👋 加入我们的 Slack 和 微信
+
+
+📍在 智谱AI开放平台 体验和使用更大规模的 GLM 商业模型。
+
+
+Read this in [English](README_en.md)
+
+## 模型介绍
+
+GLM-4-9B 是智谱 AI 推出的最新一代预训练模型 GLM-4 系列中的开源版本。 在语义、数学、推理、代码和知识等多方面的数据集测评中,**GLM-4-9B**
+及其人类偏好对齐的版本 **GLM-4-9B-Chat** 均表现出超越 Llama-3-8B 的卓越性能。除了能进行多轮对话,GLM-4-9B-Chat
+还具备网页浏览、代码执行、自定义工具调用(Function Call)和长文本推理(支持最大 128K 上下文)等高级功能。本代模型增加了多语言支持,支持包括日语,韩语,德语在内的
+26 种语言。我们还推出了支持 1M 上下文长度(约 200 万中文字符)的 **GLM-4-9B-Chat-1M** 模型和基于 GLM-4-9B 的多模态模型
+GLM-4V-9B。**GLM-4V-9B** 具备 1120 * 1120 高分辨率下的中英双语多轮对话能力,在中英文综合能力、感知推理、文字识别、图表理解等多方面多模态评测中,GLM-4V-9B 表现出超越 GPT-4-turbo-2024-04-09、Gemini
+1.0 Pro、Qwen-VL-Max 和 Claude 3 Opus 的卓越性能。
+
+## 模型列表
+
+| Model | Seq Length | Download |
+|------------------|------------|-----------------------------------------------------------------------------------------------------------------------------------------|
+| GLM-4-9B | 8K | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4-9b) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4-9b) |
+| GLM-4-9B-Chat | 128K | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4-9b-chat) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4-9b-chat) |
+| GLM-4-9B-Chat-1M | 1M | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4-9b-chat-1m) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4-9b-chat-1m) |
+| GLM-4V-9B | 8K | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4v-9b) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4v-9b) |
+
+## 评测结果
+
+### 对话模型典型任务
+
+| Model | AlignBench | MT-Bench | IFEval | MMLU | C-Eval | GSM8K | MATH | HumanEval | NaturalCodeBench |
+|:--------------------|:----------:|:--------:|:------:|:----:|:------:|:-----:|:----:|:---------:|:----------------:|
+| Llama-3-8B-Instruct | 6.40 | 8.00 | 68.58 | 68.4 | 51.3 | 79.6 | 30.0 | 62.2 | 24.7 |
+| ChatGLM3-6B | 5.18 | 5.50 | 28.1 | 66.4 | 69.0 | 72.3 | 25.7 | 58.5 | 11.3 |
+| GLM-4-9B-Chat | 7.01 | 8.35 | 69.0 | 72.4 | 75.6 | 79.6 | 50.6 | 71.8 | 32.2 |
+
+### 基座模型典型任务
+
+| Model | MMLU | C-Eval | GPQA | GSM8K | MATH | HumanEval |
+|:--------------------|:----:|:------:|:----:|:-----:|:----:|:---------:|
+| Llama-3-8B | 66.6 | 51.2 | - | 45.8 | - | 33.5 |
+| Llama-3-8B-Instruct | 68.4 | 51.3 | 34.2 | 79.6 | 30.0 | 62.2 |
+| ChatGLM3-6B-Base | 61.4 | 69.0 | 26.8 | 72.3 | 25.7 | 58.5 |
+| GLM-4-9B | 74.7 | 77.1 | 34.3 | 84.0 | 30.4 | 70.1 |
+
+> 由于 `GLM-4-9B` 在预训练过程中加入了部分数学、推理、代码相关的 instruction 数据,所以将 Llama-3-8B-Instruct 也列入比较范围。
+
+### 长文本
+
+在 1M 的上下文长度下进行[大海捞针实验](https://github.com/LargeWorldModel/LWM/blob/main/scripts/eval_needle.py),结果如下:
+
+
+
+在 LongBench-Chat 上对长文本能力进行了进一步评测,结果如下:
+
+
+
+
+
+### 多语言能力
+
+在六个多语言数据集上对 GLM-4-9B-Chat 和 Llama-3-8B-Instruct 进行了测试,测试结果及数据集对应选取语言如下表
+
+| Dataset | Llama-3-8B-Instruct | GLM-4-9B-Chat | Languages
+|:------------|:-------------------:|:-------------:|:----------------------------------------------------------------------------------------------:|
+| M-MMLU | 49.6 | 56.6 | all
+| FLORES | 25.0 | 28.8 | ru, es, de, fr, it, pt, pl, ja, nl, ar, tr, cs, vi, fa, hu, el, ro, sv, uk, fi, ko, da, bg, no
+| MGSM | 54.0 | 65.3 | zh, en, bn, de, es, fr, ja, ru, sw, te, th
+| XWinograd | 61.7 | 73.1 | zh, en, fr, jp, ru, pt
+| XStoryCloze | 84.7 | 90.7 | zh, en, ar, es, eu, hi, id, my, ru, sw, te
+| XCOPA | 73.3 | 80.1 | zh, et, ht, id, it, qu, sw, ta, th, tr, vi
+
+### 工具调用能力
+
+我们在 [Berkeley Function Calling Leaderboard](https://github.com/ShishirPatil/gorilla/tree/main/berkeley-function-call-leaderboard)
+上进行了测试并得到了以下结果:
+
+| Model | Overall Acc. | AST Summary | Exec Summary | Relevance |
+|:-----------------------|:------------:|:-----------:|:------------:|:---------:|
+| Llama-3-8B-Instruct | 58.88 | 59.25 | 70.01 | 45.83 |
+| gpt-4-turbo-2024-04-09 | 81.24 | 82.14 | 78.61 | 88.75 |
+| ChatGLM3-6B | 57.88 | 62.18 | 69.78 | 5.42 |
+| GLM-4-9B-Chat | 81.00 | 80.26 | 84.40 | 87.92 |
+
+### 多模态能力
+
+GLM-4V-9B 是一个多模态语言模型,具备视觉理解能力,其相关经典任务的评测结果如下:
+
+| | **MMBench-EN-Test** | **MMBench-CN-Test** | **SEEDBench_IMG** | **MMStar** | **MMMU** | **MME** | **HallusionBench** | **AI2D** | **OCRBench** |
+|----------------------------|---------------------|---------------------|-------------------|------------|----------|---------|--------------------|----------|--------------|
+| **gpt-4o-2024-05-13** | 83.4 | 82.1 | 77.1 | 63.9 | 69.2 | 2310.3 | 55 | 84.6 | 736 |
+| **gpt-4-turbo-2024-04-09** | 81.0 | 80.2 | 73.0 | 56.0 | 61.7 | 2070.2 | 43.9 | 78.6 | 656 |
+| **gpt-4-1106-preview** | 77.0 | 74.4 | 72.3 | 49.7 | 53.8 | 1771.5 | 46.5 | 75.9 | 516 |
+| **InternVL-Chat-V1.5** | 82.3 | 80.7 | 75.2 | 57.1 | 46.8 | 2189.6 | 47.4 | 80.6 | 720 |
+| **LLaVA-Next-Yi-34B** | 81.1 | 79 | 75.7 | 51.6 | 48.8 | 2050.2 | 34.8 | 78.9 | 574 |
+| **Step-1V** | 80.7 | 79.9 | 70.3 | 50.0 | 49.9 | 2206.4 | 48.4 | 79.2 | 625 |
+| **MiniCPM-Llama3-V2.5** | 77.6 | 73.8 | 72.3 | 51.8 | 45.8 | 2024.6 | 42.4 | 78.4 | 725 |
+| **Qwen-VL-Max** | 77.6 | 75.7 | 72.7 | 49.5 | 52 | 2281.7 | 41.2 | 75.7 | 684 |
+| **Gemini 1.0 Pro** | 73.6 | 74.3 | 70.7 | 38.6 | 49 | 2148.9 | 45.7 | 72.9 | 680 |
+| **Claude 3 Opus** | 63.3 | 59.2 | 64 | 45.7 | 54.9 | 1586.8 | 37.8 | 70.6 | 694 |
+| **GLM-4V-9B** | 81.1 | 79.4 | 76.8 | 58.7 | 47.2 | 2163.8 | 46.6 | 81.1 | 786 |
+
+## 快速调用
+
+### 使用以下方法快速调用 GLM-4-9B-Chat 语言模型
+
+使用 transformers 后端进行推理:
+
+```python
+import torch
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+device = "cuda"
+
+tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat", trust_remote_code=True)
+
+query = "你好"
+
+inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
+ add_generation_prompt=True,
+ tokenize=True,
+ return_tensors="pt",
+ return_dict=True
+ )
+
+inputs = inputs.to(device)
+model = AutoModelForCausalLM.from_pretrained(
+ "THUDM/glm-4-9b-chat",
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ trust_remote_code=True
+).to(device).eval()
+
+gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
+with torch.no_grad():
+ outputs = model.generate(**inputs, **gen_kwargs)
+ outputs = outputs[:, inputs['input_ids'].shape[1]:]
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
+```
+
+使用 vLLM 后端进行推理:
+
+```python
+from transformers import AutoTokenizer
+from vllm import LLM, SamplingParams
+
+# GLM-4-9B-Chat-1M
+# max_model_len, tp_size = 1048576, 4
+
+# GLM-4-9B-Chat
+max_model_len, tp_size = 131072, 1
+model_name = "THUDM/glm-4-9b-chat"
+prompt = '你好'
+
+tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
+llm = LLM(
+ model=model_name,
+ tensor_parallel_size=tp_size,
+ max_model_len=max_model_len,
+ trust_remote_code=True,
+ enforce_eager=True,
+ # GLM-4-9B-Chat-1M 如果遇见 OOM 现象,建议开启下述参数
+ # enable_chunked_prefill=True,
+ # max_num_batched_tokens=8192
+)
+stop_token_ids = [151329, 151336, 151338]
+sampling_params = SamplingParams(temperature=0.95, max_tokens=1024, stop_token_ids=stop_token_ids)
+
+inputs = tokenizer.build_chat_input(prompt, history=None, role='user')['input_ids'].tolist()
+outputs = llm.generate(prompt_token_ids=inputs, sampling_params=sampling_params)
+
+generated_text = [output.outputs[0].text for output in outputs]
+print(generated_text)
+```
+
+### 使用以下方法快速调用 GLM-4V-9B 多模态模型
+
+使用 transformers 后端进行推理:
+
+```python
+import torch
+from PIL import Image
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+device = "cuda"
+
+tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4v-9b", trust_remote_code=True)
+
+query = '描述这张图片'
+image = Image.open("your image").convert('RGB')
+inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}],
+ add_generation_prompt=True, tokenize=True, return_tensors="pt",
+ return_dict=True) # chat mode
+
+inputs = inputs.to(device)
+model = AutoModelForCausalLM.from_pretrained(
+ "THUDM/glm-4v-9b",
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ trust_remote_code=True
+).to(device).eval()
+
+gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
+with torch.no_grad():
+ outputs = model.generate(**inputs, **gen_kwargs)
+ outputs = outputs[:, inputs['input_ids'].shape[1]:]
+ print(tokenizer.decode(outputs[0]))
+```
+
+注意: GLM-4V-9B 暂不支持使用 vLLM 方式调用。
+
+## 完整项目列表
+
+如果你想更进一步了解 GLM-4-9B 系列开源模型,本开源仓库通过以下内容为开发者提供基础的 GLM-4-9B的使用和开发代码
+
++ [base](basic_demo/README.md): 在这里包含了
+ + 使用 transformers 和 VLLM 后端的交互代码
+ + OpenAI API 后端交互代码
+ + Batch 推理代码
+
++ [composite_demo](composite_demo/README.md): 在这里包含了
+ + GLM-4-9B 以及 GLM-4V-9B 开源模型的完整功能演示代码,包含了 All Tools 能力、长文档解读和多模态能力的展示。
+
++ [fintune_demo](finetune_demo/README.md): 在这里包含了
+ + PEFT (LORA, P-Tuning) 微调代码
+ + SFT 微调代码
+
+## 协议
+
++ GLM-4 模型的权重的使用则需要遵循 [模型协议](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE)。
+
++ 本开源仓库的代码则遵循 [Apache 2.0](LICENSE) 协议。
+
+请您严格遵循开源协议。
+
+## 引用
+
+如果你觉得我们的工作有帮助的话,请考虑引用下列论文。
+
+```
+@inproceedings{zeng2022glm,
+ title={{GLM-130B:} An Open Bilingual Pre-trained Model},
+ author={Zeng, Aohan and Liu, Xiao and Du, Zhengxiao and Wang, Zihan and Lai, Hanyu and Ding, Ming and Yang, Zhuoyi and Xu, Yifan and Zheng, Wendi and Xia, Xiao and others},
+ booktitle={The Eleventh International Conference on Learning Representations,
+ {ICLR} 2023, Kigali, Rwanda, May 1-5, 2023},
+ year= {2023},
+}
+```
+
+```
+@inproceedings{du2022glm,
+ title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling},
+ author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie},
+ booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
+ pages={320--335},
+ year={2022}
+}
+```
+
+```
+@misc{wang2023cogvlm,
+ title={CogVLM: Visual Expert for Pretrained Language Models},
+ author={Weihan Wang and Qingsong Lv and Wenmeng Yu and Wenyi Hong and Ji Qi and Yan Wang and Junhui Ji and Zhuoyi Yang and Lei Zhao and Xixuan Song and Jiazheng Xu and Bin Xu and Juanzi Li and Yuxiao Dong and Ming Ding and Jie Tang},
+ year={2023},
+ eprint={2311.03079},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/README_en.md b/README_en.md
new file mode 100644
index 0000000..df2a70b
--- /dev/null
+++ b/README_en.md
@@ -0,0 +1,280 @@
+# GLM-4
+
+
+🤗 HF Repo • 🤖 ModelScope • 🐦 Twitter • 👋 Join Slack and WeChat
+
+
+📍Experience and use a larger-scale GLM business model on the Zhipu AI Open Platform
+
+
+## Model Introduction
+
+GLM-4-9B is the open-source version of the latest generation of pre-trained models in the GLM-4 series launched by Zhipu
+AI. In the evaluation of data sets in semantics, mathematics, reasoning, code, and knowledge, **GLM-4-9B**
+and its human preference-aligned version **GLM-4-9B-Chat** have shown superior performance beyond Llama-3-8B. In addition to
+multi-round conversations, GLM-4-9B-Chat
+also has advanced features such as web browsing, code execution, custom tool calls (Function Call), and long text
+reasoning (supporting up to 128K context). This generation of models has added multi-language support, supporting 26
+languages including Japanese, Korean, and German. We have also launched the **GLM-4-9B-Chat-1M** model that supports 1M
+context length (about 2 million Chinese characters) and the multimodal model GLM-4V-9B based on GLM-4-9B.
+**GLM-4V-9B** possesses dialogue capabilities in both Chinese and English at a high resolution of 1120*1120.
+In various multimodal evaluations, including comprehensive abilities in Chinese and English, perception & reasoning, text recognition, and chart understanding, GLM-4V-9B demonstrates superior performance compared to GPT-4-turbo-2024-04-09, Gemini 1.0 Pro, Qwen-VL-Max, and Claude 3 Opus.
+
+## Model List
+
+| Model | Seq Length | Download |
+|------------------|------------|-----------------------------------------------------------------------------------------------------------------------------------------|
+| GLM-4-9B | 8K | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4-9b) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4-9b) |
+| GLM-4-9B-Chat | 128K | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4-9b-chat) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4-9b-chat) |
+| GLM-4-9B-Chat-1M | 1M | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4-9b-chat-1m) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4-9b-chat-1m) |
+| GLM-4V-9B | 8K | [🤗 Huggingface](https://huggingface.co/THUDM/glm-4v-9b) [🤖 ModelScope](https://modelscope.cn/models/ZhipuAI/glm-4v-9b) |
+
+## BenchMark
+
+### Typical Tasks
+
+| Model | AlignBench | MT-Bench | IFEval | MMLU | C-Eval | GSM8K | MATH | HumanEval | NaturalCodeBench |
+|:--------------------|:----------:|:--------:|:------:|:----:|:------:|:-----:|:----:|:---------:|:----------------:|
+| Llama-3-8B-Instruct | 6.40 | 8.00 | 68.58 | 68.4 | 51.3 | 79.6 | 30.0 | 62.2 | 24.7 |
+| ChatGLM3-6B | 5.18 | 5.50 | 28.1 | 66.4 | 69.0 | 72.3 | 25.7 | 58.5 | 11.3 |
+| GLM-4-9B-Chat | 7.01 | 8.35 | 69.0 | 72.4 | 75.6 | 79.6 | 50.6 | 71.8 | 32.2 |
+
+### Base Model
+
+| Model | MMLU | C-Eval | GPQA | GSM8K | MATH | HumanEval |
+|:--------------------|:----:|:------:|:----:|:-----:|:----:|:---------:|
+| Llama-3-8B | 66.6 | 51.2 | - | 45.8 | - | 33.5 |
+| Llama-3-8B-Instruct | 68.4 | 51.3 | 34.2 | 79.6 | 30.0 | 62.2 |
+| ChatGLM3-6B-Base | 61.4 | 69.0 | 26.8 | 72.3 | 25.7 | 58.5 |
+| GLM-4-9B | 74.7 | 77.1 | 34.3 | 84.0 | 30.4 | 70.1 |
+
+> Since `GLM-4-9B` adds some math, reasoning, and code-related instruction data during pre-training, Llama-3-8B-Instruct
+> is also included in the comparison range.
+
+### Long Context
+
+The [needle-in-the-haystack experiment](https://github.com/LargeWorldModel/LWM/blob/main/scripts/eval_needle.py) was
+conducted with a context length of 1M, and the results are as follows:
+
+
+
+The long text capability was further evaluated on LongBench-Chat, and the results are as follows:
+
+
+
+
+
+### 多语言能力
+
+The tests for GLM-4-9B-Chat and Llama-3-8B-Instruct are conducted on six multilingual datasets. The test results and the corresponding languages selected for each dataset are shown in the table below:
+
+| Dataset | Llama-3-8B-Instruct | GLM-4-9B-Chat | Languages
+|:------------|:-------------------:|:-------------:|:----------------------------------------------------------------------------------------------:|
+| M-MMLU | 49.6 | 56.6 | all
+| FLORES | 25.0 | 28.8 | ru, es, de, fr, it, pt, pl, ja, nl, ar, tr, cs, vi, fa, hu, el, ro, sv, uk, fi, ko, da, bg, no
+| MGSM | 54.0 | 65.3 | zh, en, bn, de, es, fr, ja, ru, sw, te, th
+| XWinograd | 61.7 | 73.1 | zh, en, fr, jp, ru, pt
+| XStoryCloze | 84.7 | 90.7 | zh, en, ar, es, eu, hi, id, my, ru, sw, te
+| XCOPA | 73.3 | 80.1 | zh, et, ht, id, it, qu, sw, ta, th, tr, vi
+
+### Function Call
+
+Tested
+on [Berkeley Function Calling Leaderboard](https://github.com/ShishirPatil/gorilla/tree/main/berkeley-function-call-leaderboard).
+
+| Model | Overall Acc. | AST Summary | Exec Summary | Relevance |
+|:-----------------------|:------------:|:-----------:|:------------:|:---------:|
+| Llama-3-8B-Instruct | 58.88 | 59.25 | 70.01 | 45.83 |
+| gpt-4-turbo-2024-04-09 | 81.24 | 82.14 | 78.61 | 88.75 |
+| ChatGLM3-6B | 57.88 | 62.18 | 69.78 | 5.42 |
+| GLM-4-9B-Chat | 81.00 | 80.26 | 84.40 | 87.92 |
+
+### Multi-Modal
+
+GLM-4V-9B is a multimodal language model with visual understanding capabilities. The evaluation results of its related
+classic tasks are as follows:
+
+| | **MMBench-EN-Test** | **MMBench-CN-Test** | **SEEDBench_IMG** | **MMStar** | **MMMU** | **MME** | **HallusionBench** | **AI2D** | **OCRBench** |
+|----------------------------|---------------------|---------------------|-------------------|------------|----------|---------|--------------------|----------|--------------|
+| **gpt-4o-2024-05-13** | 83.4 | 82.1 | 77.1 | 63.9 | 69.2 | 2310.3 | 55 | 84.6 | 736 |
+| **gpt-4-turbo-2024-04-09** | 81.0 | 80.2 | 73.0 | 56.0 | 61.7 | 2070.2 | 43.9 | 78.6 | 656 |
+| **gpt-4-1106-preview** | 77.0 | 74.4 | 72.3 | 49.7 | 53.8 | 1771.5 | 46.5 | 75.9 | 516 |
+| **InternVL-Chat-V1.5** | 82.3 | 80.7 | 75.2 | 57.1 | 46.8 | 2189.6 | 47.4 | 80.6 | 720 |
+| **LLaVA-Next-Yi-34B** | 81.1 | 79 | 75.7 | 51.6 | 48.8 | 2050.2 | 34.8 | 78.9 | 574 |
+| **Step-1V** | 80.7 | 79.9 | 70.3 | 50.0 | 49.9 | 2206.4 | 48.4 | 79.2 | 625 |
+| **MiniCPM-Llama3-V2.5** | 77.6 | 73.8 | 72.3 | 51.8 | 45.8 | 2024.6 | 42.4 | 78.4 | 725 |
+| **Qwen-VL-Max** | 77.6 | 75.7 | 72.7 | 49.5 | 52 | 2281.7 | 41.2 | 75.7 | 684 |
+| **Gemini 1.0 Pro** | 73.6 | 74.3 | 70.7 | 38.6 | 49 | 2148.9 | 45.7 | 72.9 | 680 |
+| **Claude 3 Opus** | 63.3 | 59.2 | 64 | 45.7 | 54.9 | 1586.8 | 37.8 | 70.6 | 694 |
+| **GLM-4V-9B** | 81.1 | 79.4 | 76.8 | 58.7 | 47.2 | 2163.8 | 46.6 | 81.1 | 786 |
+
+## Quick call
+
+### Use the following method to quickly call the GLM-4-9B-Chat language model
+
+Use the transformers backend for inference:
+
+```python
+import torch
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+device = "cuda"
+
+tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat", trust_remote_code=True)
+
+query = "你好"
+
+inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
+ add_generation_prompt=True,
+ tokenize=True,
+ return_tensors="pt",
+ return_dict=True
+ )
+
+inputs = inputs.to(device)
+model = AutoModelForCausalLM.from_pretrained(
+ "THUDM/glm-4-9b-chat",
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ trust_remote_code=True
+).to(device).eval()
+
+gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
+with torch.no_grad():
+ outputs = model.generate(**inputs, **gen_kwargs)
+ outputs = outputs[:, inputs['input_ids'].shape[1]:]
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
+```
+
+Use the vLLM backend for inference:
+
+```python
+from transformers import AutoTokenizer
+from vllm import LLM, SamplingParams
+
+# GLM-4-9B-Chat-1M
+# max_model_len, tp_size = 1048576, 4
+
+# GLM-4-9B-Chat
+max_model_len, tp_size = 131072, 1
+model_name = "THUDM/glm-4-9b-chat"
+prompt = '你好'
+
+tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
+llm = LLM(
+ model=model_name,
+ tensor_parallel_size=tp_size,
+ max_model_len=max_model_len,
+ trust_remote_code=True,
+ enforce_eager=True,
+ # GLM-4-9B-Chat-1M If you encounter OOM phenomenon, it is recommended to turn on the following parameters
+ # enable_chunked_prefill=True,
+ # max_num_batched_tokens=8192
+)
+stop_token_ids = [151329, 151336, 151338]
+sampling_params = SamplingParams(temperature=0.95, max_tokens=1024, stop_token_ids=stop_token_ids)
+
+inputs = tokenizer.build_chat_input(prompt, history=None, role='user')['input_ids'].tolist()
+outputs = llm.generate(prompt_token_ids=inputs, sampling_params=sampling_params)
+
+generated_text = [output.outputs[0].text for output in outputs]
+print(generated_text)
+```
+
+### Use the following method to quickly call the GLM-4V-9B multimodal model
+
+Use the transformers backend for inference:
+
+```python
+import torch
+from PIL import Image
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+device = "cuda"
+
+tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4v-9b", trust_remote_code=True)
+
+query = 'display this image'
+image = Image.open("your image").convert('RGB')
+inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}],
+ add_generation_prompt=True, tokenize=True, return_tensors="pt",
+ return_dict=True) # chat mode
+
+inputs = inputs.to(device)
+model = AutoModelForCausalLM.from_pretrained(
+ "THUDM/glm-4v-9b",
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ trust_remote_code=True
+).to(device).eval()
+
+gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
+with torch.no_grad():
+ outputs = model.generate(**inputs, **gen_kwargs)
+ outputs = outputs[:, inputs['input_ids'].shape[1]:]
+ print(tokenizer.decode(outputs[0]))
+```
+
+Note: GLM-4V-9B does not support calling using vLLM method yet.
+
+## Complete project list
+
+If you want to learn more about the GLM-4-9B series open source models, this open source repository provides developers
+with basic GLM-4-9B usage and development code through the following content
+
++ [base](basic_demo/README.md): Contains
++ Interaction code using transformers and VLLM backend
++ OpenAI API backend interaction code
++ Batch reasoning code
+
++ [composite_demo](composite_demo/README.md): Contains
++ Fully functional demonstration code for GLM-4-9B and GLM-4V-9B open source models, including All Tools capabilities,
+ long document interpretation, and multimodal capabilities.
+
++ [fintune_demo](finetune_demo/README.md): Contains
++ PEFT (LORA, P-Tuning) fine-tuning code
++ SFT fine-tuning code
+
+## License
+
++ The use of GLM-4 model weights must follow
+ the [Model License](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE).
+
++ The code in this open source repository follows the [Apache 2.0](LICENSE) license.
+
+Please strictly follow the open source license.
+
+## Reference
+
+If you find our work helpful, please consider citing the following paper.
+
+```
+@inproceedings{zeng2022glm,
+ title={{GLM-130B:} An Open Bilingual Pre-trained Model},
+ author={Zeng, Aohan and Liu, Xiao and Du, Zhengxiao and Wang, Zihan and Lai, Hanyu and Ding, Ming and Yang, Zhuoyi and Xu, Yifan and Zheng, Wendi and Xia, Xiao and others},
+ booktitle={The Eleventh International Conference on Learning Representations,
+ {ICLR} 2023, Kigali, Rwanda, May 1-5, 2023},
+ year= {2023},
+}
+```
+
+```
+@inproceedings{du2022glm,
+ title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling},
+ author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie},
+ booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
+ pages={320--335},
+ year={2022}
+}
+```
+
+```
+@misc{wang2023cogvlm,
+ title={CogVLM: Visual Expert for Pretrained Language Models},
+ author={Weihan Wang and Qingsong Lv and Wenmeng Yu and Wenyi Hong and Ji Qi and Yan Wang and Junhui Ji and Zhuoyi Yang and Lei Zhao and Xixuan Song and Jiazheng Xu and Bin Xu and Juanzi Li and Yuxiao Dong and Ming Ding and Jie Tang},
+ year={2023},
+ eprint={2311.03079},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/basic_demo/README.md b/basic_demo/README.md
new file mode 100644
index 0000000..1b58271
--- /dev/null
+++ b/basic_demo/README.md
@@ -0,0 +1,114 @@
+# Basic Demo
+
+Read this in [English](README_en.md)
+
+本 demo 中,你将体验到如何使用 glm-4-9b 开源模型进行基本的任务。
+
+请严格按照文档的步骤进行操作,以避免不必要的错误。
+
+## 设备和依赖检查
+
+### 相关推理测试数据
+
+**本文档的数据均在以下硬件环境测试,实际运行环境需求和运行占用的显存略有不同,请以实际运行环境为准。**
+测试硬件信息:
+
++ OS: Ubuntu 22.04
++ Memory: 512GB
++ Python: 3.12.3
++ CUDA Version: 12.3
++ GPU Driver: 535.104.05
++ GPU: NVIDIA A100-SXM4-80GB * 8
+
+相关推理的压力测试数据如下:
+
+**所有测试均在单张GPU上进行测试,所有显存消耗都按照峰值左右进行测算**
+
+| 精度 | 显存占用 | Prefilling / 首响 | Decode Speed | Remarks |
+|------|----------|-----------------|------------------|--------------|
+| BF16 | 19047MiB | 0.1554s | 27.8193 tokens/s | 输入长度为 1000 |
+| BF16 | 20629MiB | 0.8199s | 31.8613 tokens/s | 输入长度为 8000 |
+| BF16 | 27779MiB | 4.3554s | 14.4108 tokens/s | 输入长度为 32000 |
+| BF16 | 57379MiB | 38.1467s | 3.4205 tokens/s | 输入长度为 128000 |
+| BF16 | 74497MiB | 98.4930s | 2.3653 tokens/s | 输入长度为 200000 |
+
+| 精度 | 显存占用 | Prefilling / 首响 | Decode Speed | Remarks |
+|------|----------|-----------------|------------------|-------------|
+| Int4 | 8251MiB | 0.1667s | 23.3903 tokens/s | 输入长度为 1000 |
+| Int4 | 9613MiB | 0.8629s | 23.4248 tokens/s | 输入长度为 8000 |
+| Int4 | 16065MiB | 4.3906s | 14.6553 tokens/s | 输入长度为 32000 |
+
+### 最低硬件要求
+
+如果您希望运行官方提供的最基础代码 (transformers 后端) 您需要:
+
++ Python >= 3.10
++ 内存不少于 32 GB
+
+如果您希望运行官方提供的本文件夹的所有代码,您还需要:
+
++ Linux 操作系统 (Debian 系列最佳)
++ 大于 8GB 显存的,支持 CUDA 或者 ROCM 并且支持 `BF16` 推理的 GPU 设备 (A100以上GPU,V100,20以及更老的GPU架构不受支持)
+
+安装依赖
+
+```shell
+pip install -r requirements.txt
+```
+
+## 基础功能调用
+
+**除非特殊说明,本文件夹所有 demo 并不支持 Function Call 和 All Tools 等进阶用法**
+
+### 使用 transformers 后端代码
+
++ 使用 命令行 与 glm-4-9b 模型进行对话。
+
+```shell
+python trans_cli_demo.py
+```
+
++ 使用 Gradio 网页端与 glm-4-9b 模型进行对话。
+
+```shell
+python trans_web_demo.py
+```
+
++ 使用 Batch 推理。
+
+```shell
+python cli_batch_request_demo.py
+```
+
+### 使用 VLLM 后端代码
+
++ 使用命令行与 glm-4-9b 模型进行对话。
+
+```shell
+python vllm_cli_demo.py
+```
+
++ 自行构建服务端,并使用 `OpenAI API` 的请求格式与 glm-4-9b 模型进行对话。本 demo 支持 Function Call 和 All Tools功能。
+
+启动服务端:
+
+```shell
+python openai_api_server.py
+```
+
+客户端请求:
+
+```shell
+python openai_api_request.py
+```
+
+## 压力测试
+
+用户可以在自己的设备上使用本代码测试模型在 transformers后端的生成速度:
+
+```shell
+python trans_stress_test.py
+```
+
+
+
diff --git a/basic_demo/README_en.md b/basic_demo/README_en.md
new file mode 100644
index 0000000..72cb32b
--- /dev/null
+++ b/basic_demo/README_en.md
@@ -0,0 +1,114 @@
+# Basic Demo
+
+In this demo, you will experience how to use the glm-4-9b open source model to perform basic tasks.
+
+Please follow the steps in the document strictly to avoid unnecessary errors.
+
+## Device and dependency check
+
+### Related inference test data
+
+**The data in this document are tested in the following hardware environment. The actual operating environment
+requirements and the video memory occupied by the operation are slightly different. Please refer to the actual operating
+environment. **
+Test hardware information:
+
++ OS: Ubuntu 22.04
++ Memory: 512GB
++ Python: 3.12.3
++ CUDA Version: 12.3
++ GPU Driver: 535.104.05
++ GPU: NVIDIA A100-SXM4-80GB * 8
+
+The stress test data of relevant inference are as follows:
+
+**All tests are performed on a single GPU, and all video memory consumption is calculated based on the peak value**
+
+| Accuracy | Video memory usage | Prefilling / First ring | Decode Speed | Remarks |
+|----------|--------------------|-------------------------|------------------|------------------------|
+| BF16 | 19047MiB | 0.1554s | 27.8193 tokens/s | Input length is 1000 |
+| BF16 | 20629MiB | 0.8199s | 31.8613 tokens/s | Input length is 8000 |
+| BF16 | 27779MiB | 4.3554s | 14.4108 tokens/s | Input length is 32000 |
+| BF16 | 57379MiB | 38.1467s | 3.4205 tokens/s | Input length is 128000 |
+| BF16 | 74497MiB | 98.4930s | 2.3653 tokens/s | Input length is 200000 |
+
+| Precision | Video Memory | Prefilling / First Sound | Decode Speed | Remarks |
+|-----------|--------------|--------------------------|------------------|-----------------------|
+| Int4 | 8251MiB | 0.1667s | 23.3903 tokens/s | Input length is 1000 |
+| Int4 | 9613MiB | 0.8629s | 23.4248 tokens/s | Input length is 8000 |
+| Int4 | 16065MiB | 4.3906s | 14.6553 tokens/s | Input length is 32000 |
+
+### Minimum hardware requirements
+
+If you want to run the most basic code provided by the official (transformers backend) you need:
+
++ Python >= 3.10
++ Memory of at least 32 GB
+
+If you want to run all the codes in this folder provided by the official, you also need:
+
++ Linux operating system (Debian series is best)
++ GPU device with more than 8GB video memory, supporting CUDA or ROCM and supporting `BF16` reasoning (GPUs above A100,
+ V100, 20 and older GPU architectures are not supported)
+
+Install dependencies
+
+```shell
+pip install -r requirements.txt
+```
+
+## Basic function calls
+
+**Unless otherwise specified, all demos in this folder do not support advanced usage such as Function Call and All Tools
+**
+
+### Use transformers backend code
+
++ Use the command line to communicate with the glm-4-9b model.
+
+```shell
+python trans_cli_demo.py
+```
+
++ Use the Gradio web client to communicate with the glm-4-9b model.
+
+```shell
+python trans_web_demo.py
+```
+
++ Use Batch inference.
+
+```shell
+python cli_batch_request_demo.py
+```
+
+### Use VLLM backend code
+
++ Use the command line to communicate with the glm-4-9b model.
+
+```shell
+python vllm_cli_demo.py
+```
+
++ Build the server by yourself and use the request format of `OpenAI API` to communicate with the glm-4-9b model. This
+ demo supports Function Call and All Tools functions.
+
+Start the server:
+
+```shell
+python openai_api_server.py
+```
+
+Client request:
+
+```shell
+python openai_api_request.py
+```
+
+## Stress test
+
+Users can use this code to test the generation speed of the model on the transformers backend on their own devices:
+
+```shell
+python trans_stress_test.py
+```
\ No newline at end of file
diff --git a/basic_demo/openai_api_request.py b/basic_demo/openai_api_request.py
new file mode 100644
index 0000000..cf412fa
--- /dev/null
+++ b/basic_demo/openai_api_request.py
@@ -0,0 +1,88 @@
+"""
+This script creates a OpenAI Request demo for the glm-4-9b model, just Use OpenAI API to interact with the model.
+"""
+
+from openai import OpenAI
+
+base_url = "http://127.0.0.1:8000/v1/"
+client = OpenAI(api_key="EMPTY", base_url=base_url)
+
+
+def function_chat():
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
+ tools = [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
+ },
+ "required": ["location"],
+ },
+ },
+ }
+ ]
+
+ # All Tools 能力: 绘图
+ # messages = [{"role": "user", "content": "帮我画一张天空的画画吧"}]
+ # tools = [{"type": "cogview"}]
+ #
+ # All Tools 能力: 联网查询
+ # messages = [{"role": "user", "content": "今天黄金的价格"}]
+ # tools = [{"type": "simple_browser"}]
+
+ response = client.chat.completions.create(
+ model="glm-4",
+ messages=messages,
+ tools=tools,
+ tool_choice="auto", # use "auto" to let the model choose the tool automatically
+ # tool_choice={"type": "function", "function": {"name": "my_function"}},
+ )
+ if response:
+ content = response.choices[0].message.content
+ print(content)
+ else:
+ print("Error:", response.status_code)
+
+
+def simple_chat(use_stream=False):
+ messages = [
+ {
+ "role": "system",
+ "content": "你是 GLM-4,请你热情回答用户的问题。",
+ },
+ {
+ "role": "user",
+ "content": "你好,请你用生动的话语给我讲一个小故事吧"
+ }
+ ]
+ response = client.chat.completions.create(
+ model="glm-4",
+ messages=messages,
+ stream=use_stream,
+ max_tokens=1024,
+ temperature=0.8,
+ presence_penalty=1.1,
+ top_p=0.8)
+ if response:
+ if use_stream:
+ for chunk in response:
+ print(chunk.choices[0].delta.content)
+ else:
+ content = response.choices[0].message.content
+ print(content)
+ else:
+ print("Error:", response.status_code)
+
+
+if __name__ == "__main__":
+ simple_chat()
+ function_chat()
diff --git a/basic_demo/openai_api_server.py b/basic_demo/openai_api_server.py
new file mode 100644
index 0000000..8f64053
--- /dev/null
+++ b/basic_demo/openai_api_server.py
@@ -0,0 +1,543 @@
+import os
+import time
+from asyncio.log import logger
+
+import uvicorn
+import gc
+import json
+import torch
+
+from vllm import SamplingParams, AsyncEngineArgs, AsyncLLMEngine
+from fastapi import FastAPI, HTTPException, Response
+from fastapi.middleware.cors import CORSMiddleware
+from contextlib import asynccontextmanager
+from typing import List, Literal, Optional, Union
+from pydantic import BaseModel, Field
+from transformers import AutoTokenizer, LogitsProcessor
+from sse_starlette.sse import EventSourceResponse
+
+EventSourceResponse.DEFAULT_PING_INTERVAL = 1000
+MODEL_PATH = 'THUDM/glm-4-9b'
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ yield
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.ipc_collect()
+
+
+app = FastAPI(lifespan=lifespan)
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+class ModelCard(BaseModel):
+ id: str
+ object: str = "model"
+ created: int = Field(default_factory=lambda: int(time.time()))
+ owned_by: str = "owner"
+ root: Optional[str] = None
+ parent: Optional[str] = None
+ permission: Optional[list] = None
+
+
+class ModelList(BaseModel):
+ object: str = "list"
+ data: List[ModelCard] = []
+
+
+class FunctionCallResponse(BaseModel):
+ name: Optional[str] = None
+ arguments: Optional[str] = None
+
+
+class ChatMessage(BaseModel):
+ role: Literal["user", "assistant", "system", "tool"]
+ content: str = None
+ name: Optional[str] = None
+ function_call: Optional[FunctionCallResponse] = None
+
+
+class DeltaMessage(BaseModel):
+ role: Optional[Literal["user", "assistant", "system"]] = None
+ content: Optional[str] = None
+ function_call: Optional[FunctionCallResponse] = None
+
+
+class EmbeddingRequest(BaseModel):
+ input: Union[List[str], str]
+ model: str
+
+
+class CompletionUsage(BaseModel):
+ prompt_tokens: int
+ completion_tokens: int
+ total_tokens: int
+
+
+class EmbeddingResponse(BaseModel):
+ data: list
+ model: str
+ object: str
+ usage: CompletionUsage
+
+
+class UsageInfo(BaseModel):
+ prompt_tokens: int = 0
+ total_tokens: int = 0
+ completion_tokens: Optional[int] = 0
+
+
+class ChatCompletionRequest(BaseModel):
+ model: str
+ messages: List[ChatMessage]
+ temperature: Optional[float] = 0.8
+ top_p: Optional[float] = 0.8
+ max_tokens: Optional[int] = None
+ stream: Optional[bool] = False
+ tools: Optional[Union[dict, List[dict]]] = None
+ tool_choice: Optional[Union[str, dict]] = "None"
+ repetition_penalty: Optional[float] = 1.1
+
+
+class ChatCompletionResponseChoice(BaseModel):
+ index: int
+ message: ChatMessage
+ finish_reason: Literal["stop", "length", "function_call"]
+
+
+class ChatCompletionResponseStreamChoice(BaseModel):
+ delta: DeltaMessage
+ finish_reason: Optional[Literal["stop", "length", "function_call"]]
+ index: int
+
+
+class ChatCompletionResponse(BaseModel):
+ model: str
+ id: str
+ object: Literal["chat.completion", "chat.completion.chunk"]
+ choices: List[Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice]]
+ created: Optional[int] = Field(default_factory=lambda: int(time.time()))
+ usage: Optional[UsageInfo] = None
+
+
+class InvalidScoreLogitsProcessor(LogitsProcessor):
+ def __call__(
+ self, input_ids: torch.LongTensor, scores: torch.FloatTensor
+ ) -> torch.FloatTensor:
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
+ scores.zero_()
+ scores[..., 5] = 5e4
+ return scores
+
+
+def process_response(output: str, use_tool: bool = False) -> Union[str, dict]:
+ content = ""
+ for response in output.split(""):
+ metadata, content = response.split("\n", maxsplit=1)
+ if not metadata.strip():
+ content = content.strip()
+ else:
+ if use_tool:
+ content = "\n".join(content.split("\n")[1:-1])
+ parameters = eval(content)
+ content = {
+ "name": metadata.strip(),
+ "arguments": json.dumps(parameters, ensure_ascii=False)
+ }
+ else:
+ content = {
+ "name": metadata.strip(),
+ "content": content
+ }
+ return content
+
+
+@torch.inference_mode()
+async def generate_stream_glm4(params):
+ messages = params["messages"]
+ tools = params["tools"]
+ tool_choice = params["tool_choice"]
+ temperature = float(params.get("temperature", 1.0))
+ repetition_penalty = float(params.get("repetition_penalty", 1.0))
+ top_p = float(params.get("top_p", 1.0))
+ max_new_tokens = int(params.get("max_tokens", 8192))
+ messages = process_messages(messages, tools=tools, tool_choice=tool_choice)
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
+ params_dict = {
+ "n": 1,
+ "best_of": 1,
+ "presence_penalty": 1.0,
+ "frequency_penalty": 0.0,
+ "temperature": temperature,
+ "top_p": top_p,
+ "top_k": -1,
+ "repetition_penalty": repetition_penalty,
+ "use_beam_search": False,
+ "length_penalty": 1,
+ "early_stopping": False,
+ "stop_token_ids": [151329, 151336, 151338],
+ "ignore_eos": False,
+ "max_tokens": max_new_tokens,
+ "logprobs": None,
+ "prompt_logprobs": None,
+ "skip_special_tokens": True,
+ }
+ sampling_params = SamplingParams(**params_dict)
+ async for output in engine.generate(inputs=inputs, sampling_params=sampling_params, request_id="glm-4-9b"):
+ output_len = len(output.outputs[0].token_ids)
+ input_len = len(output.prompt_token_ids)
+ ret = {
+ "text": output.outputs[0].text,
+ "usage": {
+ "prompt_tokens": input_len,
+ "completion_tokens": output_len,
+ "total_tokens": output_len + input_len
+ },
+ "finish_reason": output.outputs[0].finish_reason,
+ }
+ yield ret
+ gc.collect()
+ torch.cuda.empty_cache()
+
+
+def process_messages(messages, tools=None, tool_choice="none"):
+ _messages = messages
+ messages = []
+ msg_has_sys = False
+
+ def filter_tools(tool_choice, tools):
+ function_name = tool_choice.get('function', {}).get('name', None)
+ if not function_name:
+ return []
+ filtered_tools = [
+ tool for tool in tools
+ if tool.get('function', {}).get('name') == function_name
+ ]
+ return filtered_tools
+
+ if tool_choice != "none":
+ if isinstance(tool_choice, dict):
+ tools = filter_tools(tool_choice, tools)
+ if tools:
+ messages.append(
+ {
+ "role": "system",
+ "content": None,
+ "tools": tools
+ }
+ )
+ msg_has_sys = True
+
+ # add to metadata
+ if isinstance(tool_choice, dict) and tools:
+ messages.append(
+ {
+ "role": "assistant",
+ "metadata": tool_choice["function"]["name"],
+ "content": ""
+ }
+ )
+
+ for m in _messages:
+ role, content, func_call = m.role, m.content, m.function_call
+ if role == "function":
+ messages.append(
+ {
+ "role": "observation",
+ "content": content
+ }
+ )
+ elif role == "assistant" and func_call is not None:
+ for response in content.split(""):
+ metadata, sub_content = response.split("\n", maxsplit=1)
+ messages.append(
+ {
+ "role": role,
+ "metadata": metadata,
+ "content": sub_content.strip()
+ }
+ )
+ else:
+ if role == "system" and msg_has_sys:
+ msg_has_sys = False
+ continue
+ messages.append({"role": role, "content": content})
+
+ return messages
+
+
+@app.get("/health")
+async def health() -> Response:
+ """Health check."""
+ return Response(status_code=200)
+
+
+@app.get("/v1/models", response_model=ModelList)
+async def list_models():
+ model_card = ModelCard(id="glm-4")
+ return ModelList(data=[model_card])
+
+
+@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
+async def create_chat_completion(request: ChatCompletionRequest):
+ if len(request.messages) < 1 or request.messages[-1].role == "assistant":
+ raise HTTPException(status_code=400, detail="Invalid request")
+
+ gen_params = dict(
+ messages=request.messages,
+ temperature=request.temperature,
+ top_p=request.top_p,
+ max_tokens=request.max_tokens or 1024,
+ echo=False,
+ stream=request.stream,
+ repetition_penalty=request.repetition_penalty,
+ tools=request.tools,
+ tool_choice=request.tool_choice,
+ )
+ logger.debug(f"==== request ====\n{gen_params}")
+
+ if request.stream:
+ predict_stream_generator = predict_stream(request.model, gen_params)
+ output = await anext(predict_stream_generator)
+ if not output and 'get_' in output:
+ return EventSourceResponse(predict_stream_generator, media_type="text/event-stream")
+ logger.debug(f"First result output:\n{output}")
+
+ function_call = None
+ if output and request.tools:
+ try:
+ function_call = process_response(output, use_tool=True)
+ except:
+ logger.warning("Failed to parse tool call")
+
+ # CallFunction
+ if isinstance(function_call, dict):
+ function_call = FunctionCallResponse(**function_call)
+ tool_response = ""
+ if not gen_params.get("messages"):
+ gen_params["messages"] = []
+ gen_params["messages"].append(ChatMessage(role="assistant", content=output))
+ gen_params["messages"].append(ChatMessage(role="tool", name=function_call.name, content=tool_response))
+ generate = predict(request.model, gen_params)
+ return EventSourceResponse(generate, media_type="text/event-stream")
+ else:
+ generate = parse_output_text(request.model, output)
+ return EventSourceResponse(generate, media_type="text/event-stream")
+
+ response = ""
+ async for response in generate_stream_glm4(gen_params):
+ pass
+
+ if response["text"].startswith("\n"):
+ response["text"] = response["text"][1:]
+ response["text"] = response["text"].strip()
+
+ usage = UsageInfo()
+ function_call, finish_reason = None, "stop"
+ if request.tools:
+ try:
+ function_call = process_response(response["text"], use_tool=True)
+ except:
+ logger.warning(
+ "Failed to parse tool call, maybe the response is not a function call(such as cogview drawing) or have been answered.")
+
+ if isinstance(function_call, dict):
+ finish_reason = "function_call"
+ function_call = FunctionCallResponse(**function_call)
+
+ message = ChatMessage(
+ role="assistant",
+ content=response["text"],
+ function_call=function_call if isinstance(function_call, FunctionCallResponse) else None,
+ )
+
+ logger.debug(f"==== message ====\n{message}")
+
+ choice_data = ChatCompletionResponseChoice(
+ index=0,
+ message=message,
+ finish_reason=finish_reason,
+ )
+ task_usage = UsageInfo.model_validate(response["usage"])
+ for usage_key, usage_value in task_usage.model_dump().items():
+ setattr(usage, usage_key, getattr(usage, usage_key) + usage_value)
+
+ return ChatCompletionResponse(
+ model=request.model,
+ id="", # for open_source model, id is empty
+ choices=[choice_data],
+ object="chat.completion",
+ usage=usage
+ )
+
+
+async def predict(model_id: str, params: dict):
+ choice_data = ChatCompletionResponseStreamChoice(
+ index=0,
+ delta=DeltaMessage(role="assistant"),
+ finish_reason=None
+ )
+ chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk")
+ yield "{}".format(chunk.model_dump_json(exclude_unset=True))
+
+ previous_text = ""
+ async for new_response in generate_stream_glm4(params):
+ decoded_unicode = new_response["text"]
+ delta_text = decoded_unicode[len(previous_text):]
+ previous_text = decoded_unicode
+
+ finish_reason = new_response["finish_reason"]
+ if len(delta_text) == 0 and finish_reason != "function_call":
+ continue
+
+ function_call = None
+ if finish_reason == "function_call":
+ try:
+ function_call = process_response(decoded_unicode, use_tool=True)
+ except:
+ logger.warning(
+ "Failed to parse tool call, maybe the response is not a tool call or have been answered.")
+
+ if isinstance(function_call, dict):
+ function_call = FunctionCallResponse(**function_call)
+
+ delta = DeltaMessage(
+ content=delta_text,
+ role="assistant",
+ function_call=function_call if isinstance(function_call, FunctionCallResponse) else None,
+ )
+
+ choice_data = ChatCompletionResponseStreamChoice(
+ index=0,
+ delta=delta,
+ finish_reason=finish_reason
+ )
+ chunk = ChatCompletionResponse(
+ model=model_id,
+ id="",
+ choices=[choice_data],
+ object="chat.completion.chunk"
+ )
+ yield "{}".format(chunk.model_dump_json(exclude_unset=True))
+
+ choice_data = ChatCompletionResponseStreamChoice(
+ index=0,
+ delta=DeltaMessage(),
+ finish_reason="stop"
+ )
+ chunk = ChatCompletionResponse(
+ model=model_id,
+ id="",
+ choices=[choice_data],
+ object="chat.completion.chunk"
+ )
+ yield "{}".format(chunk.model_dump_json(exclude_unset=True))
+ yield '[DONE]'
+
+
+async def predict_stream(model_id, gen_params):
+ output = ""
+ is_function_call = False
+ has_send_first_chunk = False
+ async for new_response in generate_stream_glm4(gen_params):
+ decoded_unicode = new_response["text"]
+ delta_text = decoded_unicode[len(output):]
+ output = decoded_unicode
+
+ if not is_function_call and len(output) > 7:
+ is_function_call = output and 'get_' in output
+ if is_function_call:
+ continue
+
+ finish_reason = new_response["finish_reason"]
+ if not has_send_first_chunk:
+ message = DeltaMessage(
+ content="",
+ role="assistant",
+ function_call=None,
+ )
+ choice_data = ChatCompletionResponseStreamChoice(
+ index=0,
+ delta=message,
+ finish_reason=finish_reason
+ )
+ chunk = ChatCompletionResponse(
+ model=model_id,
+ id="",
+ choices=[choice_data],
+ created=int(time.time()),
+ object="chat.completion.chunk"
+ )
+ yield "{}".format(chunk.model_dump_json(exclude_unset=True))
+
+ send_msg = delta_text if has_send_first_chunk else output
+ has_send_first_chunk = True
+ message = DeltaMessage(
+ content=send_msg,
+ role="assistant",
+ function_call=None,
+ )
+ choice_data = ChatCompletionResponseStreamChoice(
+ index=0,
+ delta=message,
+ finish_reason=finish_reason
+ )
+ chunk = ChatCompletionResponse(
+ model=model_id,
+ id="",
+ choices=[choice_data],
+ created=int(time.time()),
+ object="chat.completion.chunk"
+ )
+ yield "{}".format(chunk.model_dump_json(exclude_unset=True))
+
+ if is_function_call:
+ yield output
+ else:
+ yield '[DONE]'
+
+
+async def parse_output_text(model_id: str, value: str):
+ choice_data = ChatCompletionResponseStreamChoice(
+ index=0,
+ delta=DeltaMessage(role="assistant", content=value),
+ finish_reason=None
+ )
+ chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk")
+ yield "{}".format(chunk.model_dump_json(exclude_unset=True))
+ choice_data = ChatCompletionResponseStreamChoice(
+ index=0,
+ delta=DeltaMessage(),
+ finish_reason="stop"
+ )
+ chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk")
+ yield "{}".format(chunk.model_dump_json(exclude_unset=True))
+ yield '[DONE]'
+
+
+if __name__ == "__main__":
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
+ engine_args = AsyncEngineArgs(
+ model=MODEL_PATH,
+ tokenizer=MODEL_PATH,
+ tokenizer_mode="slow",
+ tensor_parallel_size=1,
+ dtype="bfloat16",
+ trust_remote_code=True,
+ gpu_memory_utilization=0.3,
+ enforce_eager=True,
+ worker_use_ray=True,
+ engine_use_ray=False,
+ disable_log_requests=True
+ )
+ engine = AsyncLLMEngine.from_engine_args(engine_args)
+ uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)
diff --git a/basic_demo/requirements.txt b/basic_demo/requirements.txt
new file mode 100644
index 0000000..95dccd6
--- /dev/null
+++ b/basic_demo/requirements.txt
@@ -0,0 +1,23 @@
+torch>=2.3.0
+torchvision>=0.18.0
+transformers==4.40.0
+huggingface-hub>=0.23.1
+sentencepiece>=0.2.0
+pydantic>=2.7.1
+timm>=0.9.16
+tiktoken>=0.7.0
+accelerate>=0.30.1
+sentence_transformers>=2.7.0
+vllm>=0.4.3
+
+# web demo
+gradio>=4.31.5
+
+# openai demo
+openai>=1.30.3
+einops>=0.7.0
+sse-starlette>=2.1.0
+
+# Int4
+
+bitsandbytes>=0.43.1
\ No newline at end of file
diff --git a/basic_demo/trans_batch_demo.py b/basic_demo/trans_batch_demo.py
new file mode 100644
index 0000000..71a6ebf
--- /dev/null
+++ b/basic_demo/trans_batch_demo.py
@@ -0,0 +1,90 @@
+"""
+
+Here is an example of using batch request glm-4-9b,
+here you need to build the conversation format yourself and then call the batch function to make batch requests.
+Please note that in this demo, the memory consumption is significantly higher.
+
+"""
+
+from typing import Optional, Union
+from transformers import AutoModel, AutoTokenizer, LogitsProcessorList
+
+MODEL_PATH = 'THUDM/glm-4-9b-chat'
+
+tokenizer = AutoTokenizer.from_pretrained(
+ MODEL_PATH,
+ trust_remote_code=True,
+ encode_special_tokens=True)
+model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval()
+
+
+def process_model_outputs(inputs, outputs, tokenizer):
+ responses = []
+ for input_ids, output_ids in zip(inputs.input_ids, outputs):
+ response = tokenizer.decode(output_ids[len(input_ids):], skip_special_tokens=True).strip()
+ responses.append(response)
+ return responses
+
+
+def batch(
+ model,
+ tokenizer,
+ messages: Union[str, list[str]],
+ max_input_tokens: int = 8192,
+ max_new_tokens: int = 8192,
+ num_beams: int = 1,
+ do_sample: bool = True,
+ top_p: float = 0.8,
+ temperature: float = 0.8,
+ logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(),
+):
+ messages = [messages] if isinstance(messages, str) else messages
+ batched_inputs = tokenizer(messages, return_tensors="pt", padding="max_length", truncation=True,
+ max_length=max_input_tokens).to(model.device)
+
+ gen_kwargs = {
+ "max_new_tokens": max_new_tokens,
+ "num_beams": num_beams,
+ "do_sample": do_sample,
+ "top_p": top_p,
+ "temperature": temperature,
+ "logits_processor": logits_processor,
+ "eos_token_id": model.config.eos_token_id
+ }
+ batched_outputs = model.generate(**batched_inputs, **gen_kwargs)
+ batched_response = process_model_outputs(batched_inputs, batched_outputs, tokenizer)
+ return batched_response
+
+
+if __name__ == "__main__":
+
+ batch_message = [
+ [
+ {"role": "user", "content": "我的爸爸和妈妈结婚为什么不能带我去"},
+ {"role": "assistant", "content": "因为他们结婚时你还没有出生"},
+ {"role": "user", "content": "我刚才的提问是"}
+ ],
+ [
+ {"role": "user", "content": "你好,你是谁"}
+ ]
+ ]
+
+ batch_inputs = []
+ max_input_tokens = 1024
+ for i, messages in enumerate(batch_message):
+ new_batch_input = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
+ max_input_tokens = max(max_input_tokens, len(new_batch_input))
+ batch_inputs.append(new_batch_input)
+ gen_kwargs = {
+ "max_input_tokens": max_input_tokens,
+ "max_new_tokens": 8192,
+ "do_sample": True,
+ "top_p": 0.8,
+ "temperature": 0.8,
+ "num_beams": 1,
+ }
+
+ batch_responses = batch(model, tokenizer, batch_inputs, **gen_kwargs)
+ for response in batch_responses:
+ print("=" * 10)
+ print(response)
diff --git a/basic_demo/trans_cli_demo.py b/basic_demo/trans_cli_demo.py
new file mode 100644
index 0000000..fc3c33c
--- /dev/null
+++ b/basic_demo/trans_cli_demo.py
@@ -0,0 +1,120 @@
+"""
+This script creates a CLI demo with transformers backend for the glm-4-9b model,
+allowing users to interact with the model through a command-line interface.
+
+Usage:
+- Run the script to start the CLI demo.
+- Interact with the model by typing questions and receiving responses.
+
+Note: The script includes a modification to handle markdown to plain text conversion,
+ensuring that the CLI interface displays formatted text correctly.
+"""
+
+import os
+import torch
+from threading import Thread
+from typing import Union
+from pathlib import Path
+from peft import AutoPeftModelForCausalLM, PeftModelForCausalLM
+from transformers import (
+ AutoModelForCausalLM,
+ AutoTokenizer,
+ PreTrainedModel,
+ PreTrainedTokenizer,
+ PreTrainedTokenizerFast,
+ StoppingCriteria,
+ StoppingCriteriaList,
+ TextIteratorStreamer
+)
+
+ModelType = Union[PreTrainedModel, PeftModelForCausalLM]
+TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
+
+MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/glm-4-9b')
+
+
+def load_model_and_tokenizer(
+ model_dir: Union[str, Path], trust_remote_code: bool = True
+) -> tuple[ModelType, TokenizerType]:
+ model_dir = Path(model_dir).expanduser().resolve()
+ if (model_dir / 'adapter_config.json').exists():
+ model = AutoPeftModelForCausalLM.from_pretrained(
+ model_dir, trust_remote_code=trust_remote_code, device_map='auto')
+ tokenizer_dir = model.peft_config['default'].base_model_name_or_path
+ else:
+ model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=trust_remote_code, device_map='auto')
+ tokenizer_dir = model_dir
+
+ tokenizer = AutoTokenizer.from_pretrained(
+ tokenizer_dir, trust_remote_code=trust_remote_code, encode_special_tokens=True, use_fast=False
+ )
+ return model, tokenizer
+
+
+model, tokenizer = load_model_and_tokenizer(MODEL_PATH, trust_remote_code=True)
+
+
+class StopOnTokens(StoppingCriteria):
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
+ stop_ids = model.config.eos_token_id
+ for stop_id in stop_ids:
+ if input_ids[0][-1] == stop_id:
+ return True
+ return False
+
+
+if __name__ == "__main__":
+ history = []
+ max_length = 8192
+ top_p = 0.8
+ temperature = 0.6
+ stop = StopOnTokens()
+
+ print("Welcome to the GLM-4-9B CLI chat. Type your messages below.")
+ while True:
+ user_input = input("\nYou: ")
+ if user_input.lower() in ["exit", "quit"]:
+ break
+ history.append([user_input, ""])
+
+ messages = []
+ for idx, (user_msg, model_msg) in enumerate(history):
+ if idx == len(history) - 1 and not model_msg:
+ messages.append({"role": "user", "content": user_msg})
+ break
+ if user_msg:
+ messages.append({"role": "user", "content": user_msg})
+ if model_msg:
+ messages.append({"role": "assistant", "content": model_msg})
+ model_inputs = tokenizer.apply_chat_template(
+ messages,
+ add_generation_prompt=True,
+ tokenize=True,
+ return_tensors="pt"
+ ).to(model.device)
+ streamer = TextIteratorStreamer(
+ tokenizer=tokenizer,
+ timeout=60,
+ skip_prompt=True,
+ skip_special_tokens=True
+ )
+ generate_kwargs = {
+ "input_ids": model_inputs,
+ "streamer": streamer,
+ "max_new_tokens": max_length,
+ "do_sample": True,
+ "top_p": top_p,
+ "temperature": temperature,
+ "stopping_criteria": StoppingCriteriaList([stop]),
+ "repetition_penalty": 1.2,
+ "eos_token_id": model.config.eos_token_id,
+ }
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
+ t.start()
+ print("GLM-4:", end="", flush=True)
+ for new_token in streamer:
+ if new_token:
+ print(new_token, end="", flush=True)
+ history[-1][1] += new_token
+
+ history[-1][1] = history[-1][1].strip()
diff --git a/basic_demo/trans_stress_test.py b/basic_demo/trans_stress_test.py
new file mode 100644
index 0000000..a801118
--- /dev/null
+++ b/basic_demo/trans_stress_test.py
@@ -0,0 +1,128 @@
+import argparse
+import time
+from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
+import torch
+from threading import Thread
+
+MODEL_PATH = 'THUDM/glm-4-9b-chat'
+
+
+def stress_test(token_len, n, num_gpu):
+ device = torch.device(f"cuda:{num_gpu - 1}" if torch.cuda.is_available() and num_gpu > 0 else "cpu")
+ tokenizer = AutoTokenizer.from_pretrained(
+ MODEL_PATH,
+ trust_remote_code=True,
+ padding_side="left"
+ )
+ model = AutoModelForCausalLM.from_pretrained(
+ MODEL_PATH,
+ trust_remote_code=True,
+ # quantization_config=BitsAndBytesConfig(load_in_4bit=True),
+ # low_cpu_mem_usage=True,
+ torch_dtype=torch.bfloat16
+ ).to(device).eval()
+ times = []
+ decode_times = []
+
+ print("Warming up...")
+ vocab_size = tokenizer.vocab_size
+ warmup_token_len = 20
+ random_token_ids = torch.randint(3, vocab_size - 200, (warmup_token_len - 5,), dtype=torch.long)
+ start_tokens = [151331, 151333, 151336, 198]
+ end_tokens = [151337]
+ input_ids = torch.tensor(start_tokens + random_token_ids.tolist() + end_tokens, dtype=torch.long).unsqueeze(0).to(
+ device)
+ attention_mask = torch.ones_like(input_ids, dtype=torch.bfloat16).to(device)
+ position_ids = torch.arange(len(input_ids[0]), dtype=torch.bfloat16).unsqueeze(0).to(device)
+ warmup_inputs = {
+ 'input_ids': input_ids,
+ 'attention_mask': attention_mask,
+ 'position_ids': position_ids
+ }
+ with torch.no_grad():
+ _ = model.generate(
+ input_ids=warmup_inputs['input_ids'],
+ attention_mask=warmup_inputs['attention_mask'],
+ max_new_tokens=2048,
+ do_sample=False,
+ repetition_penalty=1.0,
+ eos_token_id=[151329, 151336, 151338]
+ )
+ print("Warming up complete. Starting stress test...")
+
+ for i in range(n):
+ random_token_ids = torch.randint(3, vocab_size - 200, (token_len - 5,), dtype=torch.long)
+ input_ids = torch.tensor(start_tokens + random_token_ids.tolist() + end_tokens, dtype=torch.long).unsqueeze(
+ 0).to(device)
+ attention_mask = torch.ones_like(input_ids, dtype=torch.bfloat16).to(device)
+ position_ids = torch.arange(len(input_ids[0]), dtype=torch.bfloat16).unsqueeze(0).to(device)
+ test_inputs = {
+ 'input_ids': input_ids,
+ 'attention_mask': attention_mask,
+ 'position_ids': position_ids
+ }
+
+ streamer = TextIteratorStreamer(
+ tokenizer=tokenizer,
+ timeout=36000,
+ skip_prompt=True,
+ skip_special_tokens=True
+ )
+
+ generate_kwargs = {
+ "input_ids": test_inputs['input_ids'],
+ "attention_mask": test_inputs['attention_mask'],
+ "max_new_tokens": 512,
+ "do_sample": False,
+ "repetition_penalty": 1.0,
+ "eos_token_id": [151329, 151336, 151338],
+ "streamer": streamer
+ }
+
+ start_time = time.time()
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
+ t.start()
+
+ first_token_time = None
+ all_token_times = []
+
+ for token in streamer:
+ current_time = time.time()
+ if first_token_time is None:
+ first_token_time = current_time
+ times.append(first_token_time - start_time)
+ all_token_times.append(current_time)
+
+ t.join()
+ end_time = time.time()
+
+ avg_decode_time_per_token = len(all_token_times) / (end_time - first_token_time) if all_token_times else 0
+ decode_times.append(avg_decode_time_per_token)
+ print(
+ f"Iteration {i + 1}/{n} - Prefilling Time: {times[-1]:.4f} seconds - Average Decode Time: {avg_decode_time_per_token:.4f} tokens/second")
+
+ torch.cuda.empty_cache()
+
+ avg_first_token_time = sum(times) / n
+ avg_decode_time = sum(decode_times) / n
+ print(f"\nAverage First Token Time over {n} iterations: {avg_first_token_time:.4f} seconds")
+ print(f"Average Decode Time per Token over {n} iterations: {avg_decode_time:.4f} tokens/second")
+ return times, avg_first_token_time, decode_times, avg_decode_time
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Stress test for model inference")
+ parser.add_argument('--token_len', type=int, default=1000, help='Number of tokens for each test')
+ parser.add_argument('--n', type=int, default=3, help='Number of iterations for the stress test')
+ parser.add_argument('--num_gpu', type=int, default=1, help='Number of GPUs to use for inference')
+ args = parser.parse_args()
+
+ token_len = args.token_len
+ n = args.n
+ num_gpu = args.num_gpu
+
+ stress_test(token_len, n, num_gpu)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/basic_demo/trans_web_demo.py b/basic_demo/trans_web_demo.py
new file mode 100644
index 0000000..a98af97
--- /dev/null
+++ b/basic_demo/trans_web_demo.py
@@ -0,0 +1,165 @@
+"""
+This script creates an interactive web demo for the GLM-4-9B model using Gradio,
+a Python library for building quick and easy UI components for machine learning models.
+It's designed to showcase the capabilities of the GLM-4-9B model in a user-friendly interface,
+allowing users to interact with the model through a chat-like interface.
+"""
+
+import os
+import gradio as gr
+import torch
+from threading import Thread
+
+from typing import Union
+from pathlib import Path
+from peft import AutoPeftModelForCausalLM, PeftModelForCausalLM
+from transformers import (
+ AutoModelForCausalLM,
+ AutoTokenizer,
+ PreTrainedModel,
+ PreTrainedTokenizer,
+ PreTrainedTokenizerFast,
+ StoppingCriteria,
+ StoppingCriteriaList,
+ TextIteratorStreamer
+)
+
+ModelType = Union[PreTrainedModel, PeftModelForCausalLM]
+TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
+
+MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/glm-4-9b-chat')
+TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)
+
+
+def _resolve_path(path: Union[str, Path]) -> Path:
+ return Path(path).expanduser().resolve()
+
+
+def load_model_and_tokenizer(
+ model_dir: Union[str, Path], trust_remote_code: bool = True
+) -> tuple[ModelType, TokenizerType]:
+ model_dir = _resolve_path(model_dir)
+ if (model_dir / 'adapter_config.json').exists():
+ model = AutoPeftModelForCausalLM.from_pretrained(
+ model_dir, trust_remote_code=trust_remote_code, device_map='auto'
+ )
+ tokenizer_dir = model.peft_config['default'].base_model_name_or_path
+ else:
+ model = AutoModelForCausalLM.from_pretrained(
+ model_dir, trust_remote_code=trust_remote_code, device_map='auto'
+ )
+ tokenizer_dir = model_dir
+ tokenizer = AutoTokenizer.from_pretrained(
+ tokenizer_dir, trust_remote_code=trust_remote_code, use_fast=False
+ )
+ return model, tokenizer
+
+
+model, tokenizer = load_model_and_tokenizer(MODEL_PATH, trust_remote_code=True)
+
+
+class StopOnTokens(StoppingCriteria):
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
+ stop_ids = model.config.eos_token_id
+ for stop_id in stop_ids:
+ if input_ids[0][-1] == stop_id:
+ return True
+ return False
+
+
+def parse_text(text):
+ lines = text.split("\n")
+ lines = [line for line in lines if line != ""]
+ count = 0
+ for i, line in enumerate(lines):
+ if "```" in line:
+ count += 1
+ items = line.split('`')
+ if count % 2 == 1:
+ lines[i] = f''
+ else:
+ lines[i] = f'
'
+ else:
+ if i > 0:
+ if count % 2 == 1:
+ line = line.replace("`", "\`")
+ line = line.replace("<", "<")
+ line = line.replace(">", ">")
+ line = line.replace(" ", " ")
+ line = line.replace("*", "*")
+ line = line.replace("_", "_")
+ line = line.replace("-", "-")
+ line = line.replace(".", ".")
+ line = line.replace("!", "!")
+ line = line.replace("(", "(")
+ line = line.replace(")", ")")
+ line = line.replace("$", "$")
+ lines[i] = "
" + line
+ text = "".join(lines)
+ return text
+
+
+def predict(history, max_length, top_p, temperature):
+ stop = StopOnTokens()
+ messages = []
+ for idx, (user_msg, model_msg) in enumerate(history):
+ if idx == len(history) - 1 and not model_msg:
+ messages.append({"role": "user", "content": user_msg})
+ break
+ if user_msg:
+ messages.append({"role": "user", "content": user_msg})
+ if model_msg:
+ messages.append({"role": "assistant", "content": model_msg})
+
+ model_inputs = tokenizer.apply_chat_template(messages,
+ add_generation_prompt=True,
+ tokenize=True,
+ return_tensors="pt").to(next(model.parameters()).device)
+ streamer = TextIteratorStreamer(tokenizer, timeout=60, skip_prompt=True, skip_special_tokens=True)
+ generate_kwargs = {
+ "input_ids": model_inputs,
+ "streamer": streamer,
+ "max_new_tokens": max_length,
+ "do_sample": True,
+ "top_p": top_p,
+ "temperature": temperature,
+ "stopping_criteria": StoppingCriteriaList([stop]),
+ "repetition_penalty": 1.2,
+ "eos_token_id": model.config.eos_token_id,
+ }
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
+ t.start()
+ for new_token in streamer:
+ if new_token:
+ history[-1][1] += new_token
+ yield history
+
+
+with gr.Blocks() as demo:
+ gr.HTML("""GLM-4-9B Gradio Simple Chat Demo
""")
+ chatbot = gr.Chatbot()
+
+ with gr.Row():
+ with gr.Column(scale=4):
+ with gr.Column(scale=12):
+ user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10, container=False)
+ with gr.Column(min_width=32, scale=1):
+ submitBtn = gr.Button("Submit")
+ with gr.Column(scale=1):
+ emptyBtn = gr.Button("Clear History")
+ max_length = gr.Slider(0, 32768, value=8192, step=1.0, label="Maximum length", interactive=True)
+ top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
+ temperature = gr.Slider(0.01, 1, value=0.6, step=0.01, label="Temperature", interactive=True)
+
+
+ def user(query, history):
+ return "", history + [[parse_text(query), ""]]
+
+
+ submitBtn.click(user, [user_input, chatbot], [user_input, chatbot], queue=False).then(
+ predict, [chatbot, max_length, top_p, temperature], chatbot
+ )
+ emptyBtn.click(lambda: None, None, chatbot, queue=False)
+
+demo.queue()
+demo.launch(server_name="127.0.0.1", server_port=8000, inbrowser=True, share=True)
diff --git a/basic_demo/vllm_cli_demo.py b/basic_demo/vllm_cli_demo.py
new file mode 100644
index 0000000..8f7e11f
--- /dev/null
+++ b/basic_demo/vllm_cli_demo.py
@@ -0,0 +1,108 @@
+"""
+This script creates a CLI demo with vllm backand for the glm-4-9b model,
+allowing users to interact with the model through a command-line interface.
+
+Usage:
+- Run the script to start the CLI demo.
+- Interact with the model by typing questions and receiving responses.
+
+Note: The script includes a modification to handle markdown to plain text conversion,
+ensuring that the CLI interface displays formatted text correctly.
+"""
+import time
+import asyncio
+from transformers import AutoTokenizer
+from vllm import SamplingParams, AsyncEngineArgs, AsyncLLMEngine
+from typing import List, Dict
+
+MODEL_PATH = 'THUDM/glm-4-9b'
+
+
+def load_model_and_tokenizer(model_dir: str):
+ engine_args = AsyncEngineArgs(
+ model=model_dir,
+ tokenizer=model_dir,
+ tensor_parallel_size=1,
+ dtype="bfloat16",
+ trust_remote_code=True,
+ gpu_memory_utilization=0.3,
+ enforce_eager=True,
+ worker_use_ray=True,
+ engine_use_ray=False,
+ disable_log_requests=True
+ )
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_dir,
+ trust_remote_code=True,
+ encode_special_tokens=True
+ )
+ engine = AsyncLLMEngine.from_engine_args(engine_args)
+ return engine, tokenizer
+
+
+engine, tokenizer = load_model_and_tokenizer(MODEL_PATH)
+
+
+async def vllm_gen(messages: List[Dict[str, str]], top_p: float, temperature: float, max_dec_len: int):
+ inputs = tokenizer.apply_chat_template(
+ messages,
+ add_generation_prompt=True,
+ tokenize=False
+ )
+ params_dict = {
+ "n": 1,
+ "best_of": 1,
+ "presence_penalty": 1.0,
+ "frequency_penalty": 0.0,
+ "temperature": temperature,
+ "top_p": top_p,
+ "top_k": -1,
+ "use_beam_search": False,
+ "length_penalty": 1,
+ "early_stopping": False,
+ "stop_token_ids": [151329, 151336, 151338],
+ "ignore_eos": False,
+ "max_tokens": max_dec_len,
+ "logprobs": None,
+ "prompt_logprobs": None,
+ "skip_special_tokens": True,
+ }
+ sampling_params = SamplingParams(**params_dict)
+ async for output in engine.generate(inputs=inputs, sampling_params=sampling_params, request_id=f"{time.time()}"):
+ yield output.outputs[0].text
+
+
+async def chat():
+ history = []
+ max_length = 8192
+ top_p = 0.8
+ temperature = 0.6
+
+ print("Welcome to the GLM-4-9B CLI chat. Type your messages below.")
+ while True:
+ user_input = input("\nYou: ")
+ if user_input.lower() in ["exit", "quit"]:
+ break
+ history.append([user_input, ""])
+
+ messages = []
+ for idx, (user_msg, model_msg) in enumerate(history):
+ if idx == len(history) - 1 and not model_msg:
+ messages.append({"role": "user", "content": user_msg})
+ break
+ if user_msg:
+ messages.append({"role": "user", "content": user_msg})
+ if model_msg:
+ messages.append({"role": "assistant", "content": model_msg})
+
+ print("\nGLM-4: ", end="")
+ current_length = 0
+ output = ""
+ async for output in vllm_gen(messages, top_p, temperature, max_length):
+ print(output[current_length:], end="", flush=True)
+ current_length = len(output)
+ history[-1][1] = output
+
+
+if __name__ == "__main__":
+ asyncio.run(chat())
diff --git a/composite_demo/.gitignore b/composite_demo/.gitignore
new file mode 100644
index 0000000..7fb683f
--- /dev/null
+++ b/composite_demo/.gitignore
@@ -0,0 +1,181 @@
+*venv
+*.DS_Store
+*model
+*.idea/
+
+# Created by https://www.toptal.com/developers/gitignore/api/python
+# Edit at https://www.toptal.com/developers/gitignore?templates=python
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+### Python Patch ###
+# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
+poetry.toml
+
+# ruff
+.ruff_cache/
+
+# LSP config files
+pyrightconfig.json
+
+# End of https://www.toptal.com/developers/gitignore/api/python
diff --git a/composite_demo/README.md b/composite_demo/README.md
new file mode 100644
index 0000000..d431899
--- /dev/null
+++ b/composite_demo/README.md
@@ -0,0 +1,167 @@
+# GLM-4-9B Web Demo
+
+Read this in [English](README_en.md)
+
+
+
+## 安装
+
+我们建议通过 [Conda](https://docs.conda.io/en/latest/) 进行环境管理。
+执行以下命令新建一个 conda 环境并安装所需依赖:
+
+```bash
+conda create -n glm-4-demo python=3.12
+conda activate glm-4-demo
+pip install -r requirements.txt
+```
+
+请注意,本项目需要 Python 3.10 或更高版本。
+此外,使用 Code Interpreter 还需要安装 Jupyter 内核:
+
+```bash
+ipython kernel install --name glm-4-demo --user
+```
+
+您可以修改 `~/.local/share/jupyter/kernels/glm-4-demo/kernel.json` 来改变 Jupyter 内核的配置,包括内核的启动参数等。例如,若您希望在使用 All Tools 的 Python 代码执行能力时使用 Matplotlib 画图,可以在 `argv` 数组中添加 `"--matplotlib=inline"`。
+
+若要使用浏览器和搜索功能,还需要启动浏览器后端。首先,根据 [Node.js](https://nodejs.org/en/download/package-manager)
+官网的指示安装 Node.js,然后安装包管理器 [PNPM](https://pnpm.io) 之后安装浏览器服务的依赖:
+
+```bash
+cd browser
+npm install -g pnpm
+pnpm install
+```
+
+## 运行
+
+1. 修改 `browser/src/config.ts` 中的 `BING_SEARCH_API_KEY` 配置浏览器服务需要使用的 Bing 搜索 API Key:
+
+ ```diff
+ --- a/browser/src/config.ts
+ +++ b/browser/src/config.ts
+ @@ -3,7 +3,7 @@ export default {
+
+ BROWSER_TIMEOUT: 10000,
+ BING_SEARCH_API_URL: 'https://api.bing.microsoft.com/v7.0',
+ - BING_SEARCH_API_KEY: '',
+ + BING_SEARCH_API_KEY: '',
+
+ HOST: 'localhost',
+ PORT: 3000,
+ ```
+
+2. 文生图功能需要调用 CogView API。修改 `src/tools/config.py`
+ ,提供文生图功能需要使用的 [智谱 AI 开放平台](https://open.bigmodel.cn) API Key:
+
+ ```diff
+ --- a/src/tools/config.py
+ +++ b/src/tools/config.py
+ @@ -2,5 +2,5 @@ BROWSER_SERVER_URL = 'http://localhost:3000'
+
+ IPYKERNEL = 'glm-4-demo'
+
+ -ZHIPU_AI_KEY = ''
+ +ZHIPU_AI_KEY = ''
+ COGVIEW_MODEL = 'cogview-3'
+ ```
+
+3. 启动浏览器后端,在单独的 shell 中:
+
+ ```bash
+ cd browser
+ pnpm start
+ ```
+
+4. 运行以下命令在本地加载模型并启动 demo:
+
+ ```bash
+ streamlit run main.py
+ ```
+
+之后即可从命令行中看到 demo 的地址,点击即可访问。初次访问需要下载并加载模型,可能需要花费一定时间。
+
+如果已经在本地下载了模型,可以通过 `export *_MODEL_PATH=/path/to/model` 来指定从本地加载模型。可以指定的模型包括:
+- `CHAT_MODEL_PATH`: 用于 All Tools 模式与文档解读模式,默认为 `THUDM/glm-4-9b-chat`。
+- `VLM_MODEL_PATH`: 用于 VLM 模式,默认为 `THUDM/glm-4v-9b`。
+
+Chat 模型支持使用 [vLLM](https://github.com/vllm-project/vllm) 推理。若要使用,请安装 vLLM 并设置环境变量 `USE_VLLM=1`。
+
+如果需要自定义 Jupyter 内核,可以通过 `export IPYKERNEL=` 来指定。
+
+## 使用
+
+GLM-4 Demo 拥有三种模式:
+
+- All Tools: 具有完整工具调用能力的对话模式,原生支持网页浏览、代码执行、图片生成,并支持自定义工具。
+- 文档解读: 支持上传文档进行文档解读与对话。
+- 多模态: 支持上传图像进行图像理解与对话。
+
+### All Tools
+
+本模式兼容 ChatGLM3-6B 的工具注册流程。
++ 代码能力,绘图能力,联网能力已经自动集成,用户只需按照要求配置对应的Key。
++ 本模式下不支持系统提示词,模型会自动构建提示词。
+
+对话模式下,用户可以直接在侧边栏修改 top_p, temperature 等参数来调整模型的行为。
+
+与模型对话时,模型将会自主决定进行工具调用。
+
+
+
+由于原始结果可能较长,默认情况下工具调用结果被隐藏,可以通过展开折叠框查看原始的工具调用结果。
+
+模型拥有进行网页搜索和 Python 代码执行的能力。同时,模型也可以连续调用多个工具。例如:
+
+
+
+此时模型通过调用浏览器工具进行搜索获取到了需要的数据,之后将会调用 Python 工具执行代码,利用 Matplotlib 绘图:
+
+
+
+如果提供了智谱开放平台 API Key,模型也可以调用 CogView 进行图像生成:
+
+
+
+#### 自定义工具
+
+可以通过在 `tool_registry.py` 中注册新的工具来增强模型的能力。只需要使用 `@register_tool`
+装饰函数即可完成注册。对于工具声明,函数名称即为工具的名称,函数 docstring
+即为工具的说明;对于工具的参数,使用 `Annotated[typ: type, description: str, required: bool]` 标注参数的类型、描述和是否必须。
+
+例如,`get_weather` 工具的注册如下:
+
+```python
+@register_tool
+def get_weather(
+ city_name: Annotated[str, 'The name of the city to be queried', True],
+) -> str:
+ """
+ Get the weather for `city_name` in the following week
+ """
+ ...
+```
+
+
+
+### 文档解读
+
+用户可以上传文档,使用 GLM-4-9B的长文本能力,对文本进行理解。可以解析 pptx,docx,pdf等文件。
+
++ 本模式下不支持工具调用和系统提示词。
++ 如果文本很长,可能导致模型需要的显存较高,请确认你的硬件配置。
+
+
+
+### 多模态
+
+多模态模式下,用户可以利用 GLM-4V 的多模态理解能力,上传图像并与 GLM-4V 进行多轮对话:
+
+用户可以上传图片,使用 GLM-4-9B的图像理解能力,对图片进行理解。
+
++ 本模式必须使用 glm-4v-9b 模型。
++ 本模式下不支持工具调用和系统提示词。
++ 模型仅能对一张图片进行理解和联系对话,如需更换图片,需要开启一个新的对话。
++ 图像支持的分辨率为 1120 x 1120
+
+
diff --git a/composite_demo/README_en.md b/composite_demo/README_en.md
new file mode 100644
index 0000000..57b462f
--- /dev/null
+++ b/composite_demo/README_en.md
@@ -0,0 +1,155 @@
+# GLM-4-9B Web Demo
+
+
+
+## Installation
+
+We recommend using [Conda](https://docs.conda.io/en/latest/) for environment management.
+
+Execute the following commands to create a conda environment and install the required dependencies:
+
+```bash
+conda create -n glm-4-demo python=3.12
+conda activate glm-4-demo
+pip install -r requirements.txt
+```
+
+Please note that this project requires Python 3.10 or higher.
+In addition, you need to install the Jupyter kernel to use the Code Interpreter:
+
+```bash
+ipython kernel install --name glm-4-demo --user
+```
+
+You can modify `~/.local/share/jupyter/kernels/glm-4-demo/kernel.json` to change the configuration of the Jupyter
+kernel, including the kernel startup parameters. For example, if you want to use Matplotlib to draw when using the
+Python code execution capability of All Tools, you can add `"--matplotlib=inline"` to the `argv` array.
+
+To use the browser and search functions, you also need to start the browser backend. First, install Node.js according to
+the instructions on the [Node.js](https://nodejs.org/en/download/package-manager)
+official website, then install the package manager [PNPM](https://pnpm.io) and then install the browser service
+dependencies:
+
+```bash
+cd browser
+npm install -g pnpm
+pnpm install
+```
+
+## Run
+
+1. Modify `BING_SEARCH_API_KEY` in `browser/src/config.ts` to configure the Bing Search API Key that the browser service
+ needs to use:
+
+```diff
+--- a/browser/src/config.ts
++++ b/browser/src/config.ts
+@@ -3,7 +3,7 @@ export default {
+
+BROWSER_TIMEOUT: 10000,
+BING_SEARCH_API_URL: 'https://api.bing.microsoft.com/v7.0',
+- BING_SEARCH_API_KEY: '',
++ BING_SEARCH_API_KEY: '',
+
+HOST: 'localhost',
+PORT: 3000,
+```
+
+2. The Wenshengtu function needs to call the CogView API. Modify `src/tools/config.py`
+ , provide the [Zhipu AI Open Platform](https://open.bigmodel.cn) API Key required for the Wenshengtu function:
+
+```diff
+--- a/src/tools/config.py
++++ b/src/tools/config.py
+@@ -2,5 +2,5 @@ BROWSER_SERVER_URL = 'http://localhost:3000'
+
+IPYKERNEL = 'glm4-demo'
+
+-ZHIPU_AI_KEY = ''
++ZHIPU_AI_KEY = ''
+COGVIEW_MODEL = 'cogview-3'
+```
+
+3. Start the browser backend in a separate shell:
+
+```bash
+cd browser
+pnpm start
+```
+
+4. Run the following commands to load the model locally and start the demo:
+
+```bash
+streamlit run main.py
+```
+
+Then you can see the demo address from the command line and click it to access it. The first access requires downloading
+and loading the model, which may take some time.
+
+If you have downloaded the model locally, you can specify to load the model from the local
+by `export *_MODEL_PATH=/path/to/model`. The models that can be specified include:
+
+- `CHAT_MODEL_PATH`: used for All Tools mode and document interpretation mode, the default is `THUDM/glm-4-9b-chat`.
+
+- `VLM_MODEL_PATH`: used for VLM mode, the default is `THUDM/glm-4v-9b`.
+
+The Chat model supports reasoning using [vLLM](https://github.com/vllm-project/vllm). To use it, please install vLLM and
+set the environment variable `USE_VLLM=1`.
+
+If you need to customize the Jupyter kernel, you can specify it by `export IPYKERNEL=`.
+
+## Usage
+
+GLM4 Demo has three modes:
+
+- All Tools mode
+- VLM mode
+- Text interpretation mode
+
+### All Tools mode
+
+You can enhance the model's capabilities by registering new tools in `tool_registry.py`. Just use `@register_tool`
+decorated function to complete the registration. For tool declarations, the function name is the name of the tool, and
+the function docstring
+is the description of the tool; for tool parameters, use `Annotated[typ: type, description: str, required: bool]` to
+annotate the parameter type, description, and whether it is required.
+
+For example, the registration of the `get_weather` tool is as follows:
+
+```python
+@register_tool
+def get_weather(
+ city_name: Annotated[str, 'The name of the city to be queried', True],
+) -> str:
+
+
+ """
+ Get the weather for `city_name` in the following week
+ """
+...
+```
+
+This mode is compatible with the tool registration process of ChatGLM3-6B.
+
++ Code capability, drawing capability, and networking capability have been automatically integrated. Users only need to
+ configure the corresponding Key as required.
++ System prompt words are not supported in this mode. The model will automatically build prompt words.
+
+## Text interpretation mode
+
+Users can upload documents and use the long text capability of GLM-4-9B to understand the text. It can parse pptx, docx,
+pdf and other files.
+
++ Tool calls and system prompt words are not supported in this mode.
++ If the text is very long, the model may require a high amount of video memory. Please confirm your hardware
+ configuration.
+
+## Image Understanding Mode
+
+Users can upload images and use the image understanding capabilities of GLM-4-9B to understand the images.
+
++ This mode must use the glm-4v-9b model.
++ Tool calls and system prompts are not supported in this mode.
++ The model can only understand and communicate with one image. If you need to change the image, you need to open a new
+ conversation.
++ The supported image resolution is 1120 x 1120
\ No newline at end of file
diff --git a/composite_demo/assets/cogview.png b/composite_demo/assets/cogview.png
new file mode 100644
index 0000000000000000000000000000000000000000..fabe410bf2d21e5f252f2b3b11572fc73d54cb23
GIT binary patch
literal 1957719
zcmeFYc_5VU*FSD4k}}#zVyJ|Y3E9U?r9z2NA)zcyvd4rNLzE<2NU}^KQuduN%9?G6
z?2LVEV=#;vvwiQW_xpK1zvuV-{{Q{)b(-d$>%Ojgx$bkF^E$6{?(iE%dV6;t+0Dhp
zwbwxZk|`J0uA^LByMuP|0WEw-Dzbqe1x~uUHw<)jrEWZSf8gZe$i<}}o{-3Y!)!q$
zi0$vLC2&S~A#{Al!j72z7X?2^Nw|cbI4>uiC3VkS#N6ZZzJ1?|KYo?kt@diL=-z#k
zWmJd)Pg}J`Vfwn_+0Zc*c|6(Yfl-&bECD{-#YA-N4Eor#FI@TlEw18FQ%{E&untT*
zHj2cr5yYkXf#2h3{-ws(FL&{NHXO#bhxv0IDXDO6dCa7<*|++90|Z35r^^ZFufPq~C!k49-6
z;wqL|%qDo4^+0Ft#S@=HgN0}B#@Ch<7yHRod~gYGetAQ(9jDK_mez-zHM~7D<6(LT
zs?=U7%-Ff#V36=vLubzbDiag1*yh(~#~vujCLK@va=QJc
z3ALB%a+$>_ov$?L+Sk~X=3sE;=KRe)*Cs{Vj~!x?pI-u)n$*U66jpr#^;_1FG
z7E)qf8t|E%-|Y~@ze7sAI~e{*=KKz2ZvGhqTGg+jV2h!T&>|_->sJvU)gz4&f>lp?
z7osRpU0konM#2ZCBwQJ=i>1zHb*g1tzNv>LBy==q1*-(=V^47DN1DOfXWR!y9K1hN)O)QJ*4~ThD@pWm*-LEWj~O+Z+#Ygx(lhx
z+if*3_`2S~g10}cb5iUq?}H!d9_OH7qvxXEX&&4)-1$LYCqq0A1W48i;h)d0p7uS=
z{c`_B-Pgiap}~bQ9w+u0Txe{4oViP0?@R2{%tKCkZpR-x2-9AFzyJBLRd!NUF
zKq$OIVHuMO9&VNQ?(!%~RP3mbtl%98(hghtu{rT#&9_EvoR9Pz)Z(>j8Xz4g&XQs&
zute%a+2J29ojswkpGP2khxC2b^c0UEj~5;h-FkX|y6(F6?&o$!YoAV-X2$Q0ztgs}O{GoN@k?>4vQ<)c&Bdy#RR*-T
z>&}NhzDn9{<`{gn?rK$=QiAxYkS~{RdEI(@N)K7}C}C0HH~mY)qnoPyWgcY;Wt)%q
z`|`hRex3f^`J1=TZ*k|Me;?%Y){T8%1QIUKnhxC9GKHA|58ZrMz&J+T6SLYZ8)*NH2O~Uow_>#
zdB^hj^1j`%EnOJq9u^x;E=?}OEZ+0ZMb6DD^bh5D86a+$>kgPDeznT+GrW)Wf
z&ATDL!d`E@eirF)|N-R?^w>iTPjYVa4^pSi}mn;_O2-`FkKSG5)ouSXr~dUZeHep$m}
zmZ}TUF7<)6BdUR~+_P-o;Bw)@`>50vn6*aj(`}Sh@tIz=mh!Ihk*Eiv4=RT9hgLTC
z^ooV6sC7QXtc{H~!FmG=nD+e0FyXM@LU}^&5_u9;QMaOeQsj(+jZUZBNg3A|(IBV#
zXu&v7c#J%c1zKI8U{c#|R@W
zrFK~=AZ1(n5IvlEgb6E#TC`gb82i{#>|^XMuFar#L2W^Z;O5|*U}UiF1;q>3o^|cC
z577-#--$;ol~gU?q1*Zta%
zay5#53i_^73N-KvqnNrRpe67u^zEepJC~`t#>pyfuZj+0DnYrxW4x9oLiQrqT7M{!
z^7_8;{FI=kV6@ErxT!ZnCs4DYUSBjDelE`tf9l>9@j1P92$yKyEI#4Cc8!LTw~_xO
z&&;vONjBsE74WOAmv5)Sg-;XLpL%TPZ$yhtiqTcBs7x!17w@t(u#^p5{IO@U$t7fY
zbXhSr^7D-owlhT!+doQse)h%ec+-(ss;}C;Ge`GU64E0@=xPdCI=(QPq#p
zDCklR(ho%orePJnD@-anSv@NC{{3skvBU9OqHcR!VnyCrOOwKnK91dO?{8(^68jox
z?TW4T-lVH~I^Pl85WFq2!4TfiwN8JWUb)J;gDd+M*vOEh7I~%iR=Y0MiLSlMKQtnm
zD2lemS)+-vVVF~aN5*1`(@Pr590*rNuS_=kCkLqgj{lM{mq>9*E33+RZFBo3C9wU+
z8T0nkb{|(6*96xD^UzzRE-GW?o+ATorETAC1>V{_vRPg*!t&&IZ^=uo(_ZpfAe|cD
zoJD-cfu8Ua-M*+5DB?p#;z|fj>-&f@x3V(+4WanH
z@5tin;1IIVICKX6`O|oLpbu|eI`yoEf;MAa|5vQPo^Euxg#FR4aJMG`{>RITJ_|f*
zb3XFO$oXx>iBj*>U79`pi$lY{zgA{gW>>Pm7&)gYYU*m4c|6!~VGT`see?<;nBlfc
z&$##pNV9&VMtCVcvDsbgJ^OfKR*=|1DKW3fPpMw?+-y{BE}L7*A4t}^t91m8q?Hx*
zmX$2L7>RtFyrORP?ZK1Os#&ikhAVp{kyJ>!i`AyP4b;B#2d&_~LD2R7-R!gE$z(mv
z=qr*10nbpT8#gzdXezac-&jj#zm!h`W`#mp@s-?Uc!DYjze#WKSc*u^hZ;GO%JNsD
z&oW$9Si^%nJDONqaH-%5pCJ1N-Jb5;KG4G6V3yd+1%BR&q;}4aY?4axL^O#}=c-{x
z9SX|@9S29ioX}rr9n?{l%*r)<>r?@?;1E8yt}GD5WI3F3_P*J;T#-d&eyenVmru
zbnXu)xXxep+4XBQy!Pqc58>{?o;>Vu*(W4V&z&D6RqdDdEgj_wGf}k_2zmwKvj1vQ
z3JFS0IjB{|8%*0G&}k1gSsv0M`!dx9+my2Jql
z`_|FI;KB9lT&IBTU0gf4_jBngYSe{Gv`%l*@bhl?xBiEGC{
zeeM9?oWB_01C0LXoA=ENE`H$Ge&7@Of#=`eyN-U~{daqJ5U`Kyf|;&?0q|{R|Jc#d
z&Fi7Nw@6^qB+ww>p>OHM#U&!c`EVPUo>=AL;u&(fY2j^g-B8Qk-Bt0vgS(xhqOYq5
z=QvzYUoBwM)zSODl&`Cco0pca&apo|w190+Gvt`mpDy0cI>#)o-;mOEf9xowrg&EI
z>@nDGDJd!FV}}P?rkAe#GaUG(bL^qFw}%!4;^X6^=%b?O{ulvK($v(1oK=P>E1v;+
zobmE=^Sg_K%%Byq(^zE1yr
zCO5BtZVR|U2xkvON%1V?e;elQ^x*$v7-!GlVSn!H@9ChNgK6Dx@^y5vxa8ytOf_(6
zFy*uAYS2ID`G4*D?<@T~^p=<7V_kPwV5B$fzaQ2=ga32q{~Y+|oR*b+4?_6Sp2Ue{cw*Au|3gJBb|4@d%n)as^P&L?YDCED?7PdP~NVJoS>pYji
zr3*KGxtD9La`Ub|8K~%DjuYKNoch#Ud6k5Or$hBm1Su4z$i4MFwp%XT5q>##z0oRu
z;)C$B^*8D}Vuhhpc~I;uf9T7DKV8$MW_O03AACF8dhDc-iGI)%3=B44#GI=@J4Z27
zs}$+Q{Wksu_-@+mTaiapul#0<-%@?l5B0*NUX~W-`M>}E?+pC^cLr8#q*i~CfAwsO
z@%S9%CUm!dMdNL72-)qNJ-1rrt_^7o#}Y1FsWRJs2pW1(qh1KSHv7x&V^MpTaL|F%7f=&
ziEt23fbfEzOV@NZpp!fco@6rSfPSI<^i%T})^)PZQ#Xpi(baRbR
zZd~c^_Q`(KlHoTBuYXqg5x6rz$Su|y=pvhvu(d%jDz3VH1^+qv;zYA>H_ff%{3~1E
z9TpuYL^ay@Rulus_YgWzR5w@R!HbEm-S>3@kon7+mhN;wt*RfOC=GD-NLF_s>l{1E
zx%uw3zZYE2qy>Y_4t=MJDC!SD(GI0_lD
z{n|MXI{ed_Id)@odSJVEdm3@2OZ-Bv3s>3Snvq)5kJ)>9^?+HSG$cy}Yp|uhLz0(UVp-{Z|BR$`JSug4XiSwfoW9-
z?kwRb|XYV69+ES>^0fwwv=a=gVs?%pBQSis$prl&1(q%|nIMr(9FCq&QE
zTJgqws4OO4%b^UBI(rKlBYRooSXtm=p^yzFS|g0zgIL{*a@p?MkX?VfCO`9mL3$ZN
zHQdv*%Z5yvM|5K6F#aN5Az?HXLRX6M%$Wk0C6JT?U=F4}pbf$MkNf6fTm1UBM*#63
z1!h0WVnQ?(9=-OWZj$vQjR*9IturMByO#k9-&aGyQ)~(QS>Ho&^>M~uNQUTR?tYHR
z=%B(~xl%I%Qw@N+DVh
zTf8cT?1VY6E5!_7H{R}000#xE>e*1iAr*`Q+Q7DodARFXs`Ira(mo6YLjhAvwtg`K
zzd^t{guyIk1v3%3!%I;`?B_&|i`1^FQ?XUesmMvqU{i0_f#
zlzbp}91(O;0&dMWG+r(5^JriCnc~(QA4gC;^SBUT)m7P1^{`
zc8CRF`lfsLrk=?^1JHmgj2_xoGNEC~oX9?IA!=tX~Ak
z$nYQmA}*A_*9$XG4hx%hj3
z^Zk(iV1%U~b$#&RLxSB^iJ2>*N2~YVj48|8E(Wt`w&a3M#F;32)Cu&X+7xAut)}YJ
zNey8&Q=r@7XZktTSb;S@*O0?JXtRb`lMUqZgUO)VY_km4%-2L=ri@#RraMxH&~Ex0
zG+PY%32Pp>8+$8|aZ}u?7TTLt5Bksr78hd3WbFs7Y4*SM&)@fso9v|%<3H+m^3#F1
zc%Q{dJs1>|pkw&Z;oYE_h+kW#%S$7Mxw(z$_&ThU)$s0F^kCe@DYxCA4#2cIv&vY1
z*SiS%AbOuzYASGUjnxV&{bf_mT|#t5w6Na!RCv_kpfG}>nQtmq3%I5g#u)7z*@r;-
zJGDNnlgdC70hA10-71Hp??FV+>pfntxso4k1W4CsdQElqwY!MyAZNO~Z~dwM93k=8
zpJkWY8~=OGZ?>(zH|yzV9BZGwr&8*>7id<+mf&3UIn*bb?5UR0&FswzCL$S>e>{E|
z@Dq`18-l%6*$073U1m;Iesf!^4ZO)viXB>S0&`^Ku7^ZA$6-&_rQ5y}K_5kBP`^^0
z;gIs*-gIrk)wO|3b1#kQq!-TvFXb?|Dw|(LETov_Ve_#5*ue+A6rb&@OC+ER&NUxf
z$J>kBk~7*BT7N*gKJ>w}HJN>~@8ga(t@_SbZ#eGXjCqe=>V3hGd8+7~3N@AEpbk~`
z9d!9-;igZ+efEG(Hj_}9U5)Finhrb_IMvVkMl6{)W;4@t&4vuR2{Yc_mqTN?H#lua
zu}(a*`^|7>5eb5@k61(SFP{frz_Faex6Jg~ST4=PUL3-o<&yopi3SdA4nL+^fVsZB
zAvPwa~N*|w;6L{%SV1Y3>&e-2EFHUc_BX$!Et~dVB&)yrxv?c7z
z@god1cF`RC9ZBnql7deFAd^^*dO{laX*cMaA&6UKvPSd`1L|~Eek}5E8$Zhyn*FTH
zhZ0uy|Jdc8^;Mld84a01kGMGfqimH0)yCy)$qa$BfZv?rWA5;Ng?$B&z*ZoisoEK*
z!nM#y)+lA~^ec{jtv&C;kGS>xt8i&wM`eN4oZ)nK0nkEmxJTNA1d7#7070e*pnN3N
z*PDOeyNGUy-63H>8G%aDm1jQs}^4y5q%uT*sqp_|3cx`Yy~rr#RC3#Bv?b4F{n0
zJgmC;ypZGCLcRa+)t0aYcZi6Jh8^Q
z8*2UX?(3N3bF-6X1q!p8@;TqWQmhTo+
zQE#6CEJ*{x_dPlfo-2?qn=ivzQq=QwwkO)!Ml-=k@s7iqvq&B0wmj@i)I7XWyaW#
z+Cf}G2Xw@Dr0g)dZ7Ql}Dyt?BIw#6Kiwf(j5|+9)&Kn-_Jiv69*oFmLVYwH*9QO;n
zo*BOY%276t#6KO@eT~^UwD#Ssxu&`-OhNg8?V=R6>`Gw2Syp6q3Fz2AtDggAjatqw
zClawORDS3y_$%ych?uPiTJgNdA+)>3s?RpRW55gkMMlx&4Tkl9NoSxymEL<2VCzFR
zx#9KdT4+~|4r(!(|LbK!beAn)Sbe%cZMa27lNkS02cZhrM+Tvg?2<0E)yQbd61cd8
zII2Bq_QX{xx@Oy;sG#fHJ&fltHX+hYoI7>4oH&l~_u`pm@M+AX#!u+_@m=U2;j<(w
z?lAP&xX^!`*NITROhO5MQ@qiqNt?COvz6W^KGSrRC?S3(>pma2ou+)L);>G;Xn9V^
zXLDmpbaL2WD7&X(BZ1+be635IaL5TTDA}8dze||e%nIP?I$H@+mtd9*OJI%KCyO@s
zrPOj3^?Quigm@}k8@)Xg<-Akg--&>m4x`!i{axH;8c++c#p%o`k}G5Zn2HQ?OZln8
zW}_8kerK+w^l4cgBzm~xtG`5mytfp%UO(L+t=V%t8M&-BOhFHxt%r8vLbNysAzubGY_#Q&t
zMH6&S;zl0F4D?5Fkkgig*}$%WJp=ow`LeASGl_71xnZ>0;bF9hP?t0z!pmtrNt?4a
z^1o$mzl1fE{)pyi%w6NvLoZ92TU<@L&iP*YH7VEPKu`X=TZm8DA4FcxuErGJKA%a%
za*nEgxNB?otL+H?aDTyo07CUzn19^1{wK5QGJ9=Dmkd;ZxtQtjP`}!0UUG2+N@~_6
z%#GZ3n*8A>x9DPucAr_4+hJ~Q?0uCSbP$&j(Lw4{ZpPBaTZ386`-bMfFGLqFzLzhK
zPaQ42ASs!dt5xclbTh5m(8+nM?~~Dz0gX}?QJ*oG9Vz%G@YQt#tN2b_U)=*ZgNzL$
zw5F(Z`4CEG=WLgZmq_J^a<igQ*fZnS-B^2@xa0epj4#wsPWBLT
zMtUSBJ2mbK?7*&Kqq^3cJ
ze*%a(;I)qw4t2OH$)3BAriZSm1tK68Ye;s+zpGn)=4k`=6J=Ee*F71Q{k(@UAE$Uz%v!&SOJa~{DuQ>
zGd|&LsksL0Ub1FX6st6Sx>iJH9WqQX4hZBbf}D>U*mAE$ibmRM4oj;{lVoezjcTFe
zwJ17DcpevbT=;`=RQi#32cd?$P1S8ghN}kir~Je;mpaWLCh9*0f|mp0kJq-4VZ5yw*8dRZIDWo=f14jMcoD~$k4U{Q!sSZTVX9b?rt
zV>PH=hW@dL_K*rj@v-X>l=wdYPjVcfETZW~be!jy3w!KI^i(tu1mUJgUEr37)ns!5
zt_$BnJHLkmkt>wEFz}R(lUAroGNAdF4*Z5RJoxIgMXQr(JQN`YIz
zW0Ms<%BU+WPW%i00CG~T^
z6If?eA#OW&SsGhmY)Q{5iB#JI{T-t47=RshQ793iR<#m%Z{3reuu^w?7RsT(89kiQ
z*?SnxVMD%+Gu&A^HLMC2l*a1)6Eg!*b1Ic%`@_oY(%?{^cTbxw><9!n4!ruA1h_gb
zNh0=N!Of8?09NB)v8C|f)Y9zrrGIr{E6}XX)R<@R8SVEZ`!sm$b3=$W`pc4KC))|L
zW|767@ss+cX+u$HCRECkJJh1UNjZ8|O|VthuQi)y1LH8)4B7MN5rI61w#sv&uI`S~
z&@j3-6rOt^d8)o->{OcZZfdK6e@ht1`2AC(p{=nmi5pHsJ}Ha6u<4UHJH5rqt@YB3
zw0Qnv%*=1+G1%CW|4sUE?_0->tAnE>WhqP|Dwv;ihEbu3wr01q)bIV3oyBgiaKp@@
zX<*hbhBMQN>B@EjI3O8{9BZ)u+1D;`qj{6kc7q>9MPzReadf$1>gB(*HF0^;u+
z#(ubPz|w<9tYC|u8B!m5b*?5msPnh1dCbrzN;R~RD%e+qOSK2MviwZP!*U+C8k%m&
z(@VFKuMd>DqYap1`(+D}{`5X)u-HrQVOgBTJby7%7%JI5EQLBse=RyZg3Vh)aI86C
z!*LBu=p~Yp$l&ePX{{mF0Lzx)l>!17P;-VAgS|?N)|}KT>U^6A#n}RJ;!-OVK)nai
zDevK{udq=|uMWqSv&vb@>K1xy{3v@zTsZSH5YWNz{bqfu;;Z|`@~3041YiPpodbd=
zNgw8B?zw4rf5Or-yY;;KBwED?pX)+bWXcGo2g0I4s*`M+ygp*g3j9o0Rc@`+OoqI!
z?umBU*tLtxW8>kj2@fOXqecWDo|ubT8OrOq=4q70^CWMFbz!ae>|E4(jqajLb|CTV
z0ZTF^T$uLNlGcE1pxv-rb;$^NYbPx#Ne6U(D!z(#J86y#;ulp~LlW335xk30G4(j#HtvPQ_?Q$Hn#`6l(xq
zzu0FW`AgEz+p0fmeV9EHyk~tl8x&~2P)%_PF(T$ISD}1|!$4zMD3`Iz^ohsfRQ&I?
z-hyUqXVT85$WH7ydn|=bLzA(a1)I?tT*qXho(`{>16shgevKYs5C0>B%n^13`W5>X
zI)bguhK}Lz+H53iiQ!Xo`gp&c(I|y>;M=c~!szP@ibEB)6=LjQ-Y0~bZkSJ+I+Fd$
zTAxuyViIqU;BnFuJE??IcF<6+sF~3HDyb3mZ4GKrT3fr$lzEdmHl{n{WGCtz7u9JiE^n@l3i5{Sb{%9YC(Q+MHQGAWAi$esXDT($9KV*NfH871k?n)w=e(^L6JJ_2w@OL3I
zRnxq#y8z4i3~R9KT$jMyOT@8t^8LDlXxDdl2>`GSH|NB6|BBmMmjA^ypFdJjbZ+T8
z`6+7<&pvz7FW1M5jd=YAai+bm$4KEFc-40zT~yLf-%OghC^Y8o?W&7h%#xohGg@tQQH?Fv<3dvs4kc@jPa2Y7cb2{aI
zOJdRE$}X7StbV$VJ9b`b+Aho99s>>7XvLuJEbFGCF2Ai29F(W`(JX*8#PmSdIIFK+
zqlY`)zRgH;Gb0X)k{uffQ*VJpwKnLD2Wqlp%c8GK3M9uonR^$LJpJ^@G%k=zK!?fu
zP4XfkD=hM;2rEcrr-gdK0roM_6ClF?$jH$t_qBl+mpP;s
zT^Cph*-Foq_ySFUc^e|2M9Fk3
z_azH$y66yU9O2F@1zZW05#r-$Yis3)-ia#UnC_VlH#jimR+Jb#M?bs~
zEQc~PBqYA-{TQhc{2AL;7uQ~%Wpu(D>PjE>J{omlBvai99c@oh2k&Md7oy?I%SRgl
z7w9@z&O!yUz4kt0sj@AAOh#HD<2T?ABS579eO@V2NqWy=0{-O!bci_is230s!Z&tK
z3-to1;U5`^fDaPf{h6~&Wn9^{-F|xdEQ2Y~Mt7Qbv-n_&BN0Lg9!->6CR$~6)s@)h
zG$`Ewfwg3A9h|O@n9(uVGiz~na
zlUv5RTf_cK4*utjh@e;CY=GSlyuGf0!Ezj0Fn)DHP}m8Bb@c=+UKcnOF4TSz!#;EP
z6})^iNax4k_u7xt>B`&6_7>^^^?dtJ|N6Ruf9P+%1#W42H|HM)k=NbMoK`=%uOxXL
zn`54wDQy3aY%3Yya8jr*^$0uuaw2i2SMh03P(6R^oZJD>y27z;7(Df-TI-t@`7Pqy
zj>}h2SrN#GdY12zbcQ@#(V-XSM>X6re16^(Badt~eVdz6gL4Mgxf%^+ubd=~sQ>y3
z;M*YtUUXqP$`+MrK7*+tHU%0u@QZ@q+aSgs`$YZ9
z1VLQc&TQ>``aMpxVopcX+0y~6slbbO7-ozR&>^-CgLaxcl?7uvj2DIJ0+|O6Ck)V-
zn~5qC8*&(|uj2R}N!-iDBrQ!yCF$Mx9BUd;L5bXO8ukhB>QHfKW(-$IywiM;bD)a`!F>_-BhE|Qb>;)KwzcSDp
zjjiu1QQAUxs0`RFDc(U>v7gz5GNV1AKbf+`^l%Bsv+;UjQ{=Fp>Y%9ccAMXPI*CoT
zeHmNhaZ+OlsrvH-9{!YTdMZtpF}<3|tEP_K7px?%(VDaf3ze;_-4EQ+l*?nSUvTBa
zrdR0k(z8E%`94MxTOBGLY~0IooX&>W
zB{Qe#d)S@)se0S3(^NFeCxtz+OOyRG!j)V=^Ep5_2EGuCp6!NLK;np1k|#jDNGGk;
z^s!24x&Rb_!@6k*GVefZ7do0nGydZnwi+;SQNU{eA?(&$7<)K?2Ox>W2f~*Ow($h>s#*`w>wZf
zxn+;nH%NO|FC5iKbzGG86%HV4Wj}rJMMy$i?~Z*K8;LaCrJ(|o9cRXq*35o!ml=f#$Gi?NQQq)
zttm3o>I_E3`}Jigj^uvP9lJ%zK@U%(`>6;_1v-Ge{WFj?sZz^kU}@-gieab_&B^WW
zHJ;af^6R}|u~+l4B}^CHI$Fhj*}ADXwyVt!#a{IKADFJxtjcm_jsa8bxiz-xk-xkp
z%KQW*??i0}>3VAb`rh@Ikp>Uw^psVPB!Hjq(!U_!?+
zN6;23m{}$cxjda(QN?mEnXJ5RmyB#+HZVhPK8~S2ybuDK+%z(Vceo{TIBY
zsEf*Q~6m>-jkx$cOP~Ohb6zE%X3d8O_6ggiM3H{}SZ})VVEUA2NJH(4q%4rWT
zsxY>4?0GKv5*iBiPGagwz?m&4tFuup*g*n?*wR}Xt%zsK)9tRn-Ru|~BNjb`eEJ8-
z1HsIkY#w{Wrg3zT#Ez~8?waGOU6!t{;gwkGEH@x_bBqpp1y?)D@zd`FIjY!%A7RZS
zqEg|KY%(5*^NlG7KU3*zD{xLFz+l9jWs~NtB}Hq?wnCFVFG?}WV@ELW;?aKCmcw9v
zbPMFl+wX$oudx$4KN;C)w4}XhRfqjrz0=sOWVr|-rn4gmxtPiLaR4#{+h-KaJO3g6
z=hET1$?3ydA+8DqUv$MyC9YfdcPjfy;0{WHo=j-cS{o4e`_f1TE!MZf)D}PFDw-na
zN8|lUW$^<9M#;FVez_Pdpl>*W{a;&py
z^=DFKm(yeX<#Uj2?WcjJdi7P_fQy*m0if_^gynx%I|APSaW%*SEGf(Su=kQbvaoBk
zaXTas+i_vM%+(R(=POD?Z!8!LU}cMpN1V`yK&Vt|Y=xnYt5mZ<>>cAV$7#K&?V$&U
zYy<4-x1P^gpnA0A4_id5%o}Po!;n+;*w@}r*^kA&b%IN!PLh6(h^GlCI=zyT6cF>2
zvVdIfmDJgwd$wRJoN3h*0@e8bZjv&itil-F0@Kj1YyZQ7b?zp}db!S}o>p;XN$~0q
z#3wzI-VrtNM6};_Tbib5#l8bbxuw!TO_EQ3M=20Pfxw=rKGv!U(R%`WfmECntV?Fq
zi8*v5$+@c|;p9|i!AgepQ;jYVfEXDNd{F-V)F
zHa$KpKs7L%Kqm(FGcq=PbRF}nR3NuiYQr}Nwi-rJB9tw)3(xC1Jgw?hs7~!_94ktR1
zlo4
zdLB+nU4b<^$AcyeyNLT$p1_`S1c)YI3P
zr3FUY-|6EOms!tG@XWS^zFehE$!`?TM&8N
zWeTwXby1|)PzR{?a+}S!$NVfEh>Tns)PnLS~A=ZOlR{+!ueRtJg){>KN^#(}UDqMT-$SP~>L
zP*z|)to#C?jmZ}ee89HOw9EW9Ib6`?Y*S_V#MDGxw$RA_sG
zYM-+xvdilU6eK(zOK^?LjvjXUFe#*dGncU{0v%LtrOdishaBwSR#4io!Iwx
zqNrS4CxNtyg^(x56`BJ9sVwXUP?r?346-v}Fju!92B_NSCsjzEh=8l1eKGOtmgl~D=GHqV#`q0^x^Y1)m_x=^Qy0pVjEt_9n5F{!#|(5(LvH=r&_;XP<|mhNThO
zvHJ0E9pD$LRB!5Q>2&@x9Fhj9Of&!6Td6_yza{
zFGzzmui_&)`R9+{ZR)&^`pdT?7(69NksaLVDH-R^0y7(tEeA1G0Mcho>ge&DCRmZ(8!kiT{??MXke_WPp!grLHS?2K
zw6i$55uh~VNDqyCxI*07_4GWTL-L@T49`X$!(lYY&6A}m^ofF>hI-;vn-F)`yQWDZnogq+Xla&LNF1z8fnSy#+3xeu}s#47_4(Mv_
zTJYwv(Q>Gb3o6$N+0^QDahF8IPH6g3)%Rq(t=mPNK^lp9rxv?wyDAoui&BblJE6>X
z)n&%j74CkJl1m`1<)u=0`0p@Zd@fp*6zht_^w
z-O}k`eE~`IW#}>{FSH)7a5t7Xx0!kbOEk<5phVa0|64?qy}5OmU5VxB-^z5D?kcC)
zv@D?Bx0>pee5i4-`>y&HIjs6msokf4vn-rtyh=vc?V^N<=}~DC$QVb5S6HE#qi3bn
zJx{Z=?Cfi700PF6QPmlN0J72jU=k~WCb%X9sYcu7>vqwSvOAIEY?}(L0((3%@unaB
znT?Gq>|=I8f(W9YXiwU}il#T$K4))cI~_`HSJ
z_=nnNTZ8!(MCP6$A#GS+dI)0Jwuv0BO+3z@Lyku+7R_qJ6dCt9S^4*^-AH44n7pH8
zr^y7y_%K?(Mrdu05F(o0#b=xL)|U3bp7uG4FBS^2mFcazne!VavJ1B;WeXa&aL9AN-dRVy%RfXvGWDJ{$1fql0BpQ+5PewvAduY};D0S|5P_vLc;O!S
zd7z*nLZ_kfSJ@Ag1CX-|2m%ehJWzQM4IbE67`!3ACAlTeYWqk=PLP3o)7Y+uP2p~_bUk5|jm_bi-nk=7j%eRXmdv-8xA8BrfLMBQ
zz>Je#QX<4iEBJ}`51Ge>&aYIqiARvbS86lM&0idZT}nd?%NFcG71g?mEOkc%^ifHz
zmW#328)M1T*W%hycuL=O`d43A;QF8_8rguMbSt&@*$%)S4-!n^elItQ*X+%;&4=`a
zlJvy>WRB>J?TV}(T{2UPj0MULf!q;Lgx$VamAA85u~DoU(szg*baBlVu+D!{!z?m?
z|CKGltr*KJ8<-#NCx;kpvh8>|1?lG*csYfee)`J&Ag6HoFYW5HwspN3zx+K{1v(>R
z_vEE7(S#^YihHlOg`8%~JPq`r$x)mLhxH{wQ12*JW;b6HnPo#A7fcA4wLIo|oIIUQ
z2GF^)>ys#;4J8p#$K
z4`i%vO^5l6t>JgD)bE2+vztLAwPp-+6~~5Qfl^}*46I;2S(!HW{{wtDe9BM@``;W|
zqj(cgF(Z8X5ZZO=BPO}XI7q-zGT=F(8ZeeUFtp|2Qnn;>H~JLd?f;c(!8O^Uz23cR
z7&S=*JA;_fXge6BU+@Z^uN~OGywr?Vzt8TYmQ7oTwWs+!V$7RTKCWeupG$H1?rYr}
zBD#F-yQ6qi4@>*rlLJKCu
z2sjbJmdm&{HpCx0&Cm0vs-|3X^2mjpO~eRLH31|Y-C&J)3TB$Y9ZkUlBg}Nn~0G?#yAhyyvH-5X3n`%U#GzrP%B!{^~_CXjr$k*9KJ{0>`O0S2ioR
zjR4U;z_J1C+a>>&D_3=@Atl%^IQlj8vbd|SRLk1uwJjvk-hL|M`ml5ILHb6xk~GND
zU9xVo?aeFE9OaTTM2z;jqmGXRSYpm`{-KfN7u5Z_%!Ssz3w=LjA$YCT9)aA7$R|#u
zOEZFe@zS#0w}+`l!bHQ=!6Z^hwdQ@fuLs0Mhr---Tu;!$g&a$xi4n^U71+#D6F72F
z28GgDEbp|F(AOk3q)>k1*33xM-^c`JIJS5`*k~xMU^I{c#EF<67|K)-s%`LCYUWX`
zo{-nmvYo8oBsc>OXK$Ql$p1eoLv#0f#?T%9;3f{i3ncoq(e9q682AS58V0^eJK))q
z31dT9JUc2kX!aO*vH3P_w{sSu4TdI?2}^;j6@*GNhT0Rz{?xVUgT9@_(n~8hr-3&`
z$KYwj*hX+W?1@x!>){jh1p;IhA`ahdT{qV4zHMpR)}{T+Y^%VZ-d6M?F09Y*5u?vq
zl-<6Z!T*>sm`0f$Nd&?P-K4s0w>)#qW8AH3(Al)FUGVu_NVF(F?O7}RO?)uF^t9lu
z7Y8L4({Vyo89(rvr1DQY)F@0B!(%Mr;lrGn!M0Qg|&^CFD6uU#Xo@#
z%073$ng)~mR!n&J1!O5eZyg~m4QDgu>5Elub&m&%AXmzZ*v57kRtBPgUc3gRQ->e0
z=Y1y9NqNg<34dJSt2WcU%pswetu47?!Wa!`J1$o!6z(y00-u`AY1x0e{GlKoo1kh
zmkO4)E|a5u&aQ2Zb!>tqanreMfBIl=bIz7~D#ajuX%KNV>hf5*4`ZMx`Za}!rR13L
zE6+&JW}}AdWq-pq;|*L-yBA5)n7w?a*%bAao;v@k@}vBZ%P;UpPiL%D$)s*pNz{i#
z(Ul*x31;MaNiRkmFHnHCCb}iv3=@`3GQFf4$6*a=#gc+E
z$Ydk>x?$oXnbqFQ=eUbbUo&eSFRgk-sGcxJZCqd=##}OLH3J)vPNUDEsG#?X-9QjefrW?25#z?u?`0
zWk=9kcI1J{<5?zYbP#3YLR*_x
zZhX2Kr78I72#z5McF0!=^nOVI6ihW$4KWUjJ$7x88tauilkj_d92Vd=d@I|vn|!w?
zAQ*S6#T>B{pZNXS>gD?B%Vbk&v-Q3;IiUWh04fhM`g+uFS6#K7b22nwMR
z9f++UEk3KLz`CQ`U~0tU>m{8X8DPr=ElL7@+8y&dxGNk^3;#;Z0r`!55d_>?8v^XD
z#&baFfbJt8wTk$sg212KIdno?{|db
z%D6*H9|*A?S8bt8f(&i_@B!pozYA@29#qxW#Z^=Jh@F132B`0SaV>AvZd2~H*dBX_
z99I2g+z$^&ZJ#4%pod%d%1fPhM?dKUT85qN9YIgBQ1q;^a9JdfyRH~B1b9^6!T#np
z8z)>13X(RPKOWn=YpbGv~r+>V5e;{}>0|0Fqy5^{Ms!n<6%V10(rNfr4iN#`BjNqCoJQHDBhwX{X!W
z(g$zwj1%O{9pswU3uD73L1aVOZ^Ry?h9IwiFd9Z_UrBd?3u=Xep@D*afWTWCz(a$$
zVa!cw1AXkTmIM%V*TI?08+e(YQxli2e2gwk*-Y`)u+K6UmjWuBj#jxL>PtY1(Wj%V
z(-Dz;kN95vFIGOP8K%cIPPbr9Ab3ICBIEXyBGA}Mofn1S1Wl8-Cg|JVDDB7YVt0Va
z{T{=+WtWO)*e7(=eW=tS8|~)F>+-G^`$zzABLN=q3o4+;D5H-yCio>Q$BEfOV_O2h
zEqULN&G(XeuP1_$)Ga`Drte&J#t8y@RSWS8&knLetn5w!G|X6f>A&<}TY_|;%jDEV01n-7gd9_YCQRs&kVUl)nWf8tu&aO}7h6^U;5l=gAMM!EF2#7ZDV
z9~;9wM2yT=I`NDyCpK!qxal@N_h@X(7%%SRtKZoxRtR~M3dTAsKN~e2UC{`0%XG$R
z=&_J;<`knoTmc9h_;xK8wW2SfAC;c0(Mw2CbxeuXc=TxK9%gRUn-h?}qa6CylPz*B
zdq}rVf#8M@XD$lKFZvwxSu|Lk8ES|v*Oqe)F_W;?YmOo|Rs$w#-+RTT;!98K6$0fE
zE{1hwo%LtA(_({l;g6NC|7q>LwKocD27G;81o{^A>
z*t9AX=Zm!kulEwV-i}H?c!khsHLZ6&IEwg48!Q){W-H&r!4b
zeYxHqkboWcuIK_$nK5TSHUxhi7FhHUUTpkzR{%xpW^^)Z<8j2Aj2;W#29IwW@F
z>JZZY2+5i2jG6S{U1H53v|S&N_?M+)U&gcRY0D>1!^Bad)ME37Sz5K
zW8f73pI7^==+Kkzyb%n)V}r}qZVRJtn64oH+p9fHR`5GOpBCibU#K_JXzF*8e`vN~
z40`~R-!4aca2`!^+X*hhtw42tc)h|b^22oOgDIt4YPH_XokCG66{*3;T80h6NjWwx
zUxK*%FZyai>&?#4QD+b2omqe=_S-B(7FBhNIkii!hy)7_$!;5m(eW~$kQA?%
zp_R}cz2Kvg4#^9*f?RJ9P)B86od|AjPI>|Y$u}A_M?JyC7Wk+InJvN`aK2>YHddpK
zjelf-LpxUl$@SV~b+a
zTZwKyw%=-?W>k3MubX+yxd|rjd@*y~dozEGxE0y9UUvb;It=Ini1fVuH+)`aC79wc
zNxIvWO8z|yfWP_1>++UBEG1_%oUo+~p>%2_LmS&_AjB;E+rw^;3WWnq=hs3)CgYYm!TY2HCU`$
zX@NNY=xgjQpOQoE=~5~+kCK;Eu$5+u`%et90R0)9nmz{0kr`Ehlz&TPHJJu+E%5#n
z5<%U^DTJZQz4nYuKNi%m+%gJdJdJd+N(70UbP&`>;_ngbaOqYU*sd(9GD00ua{4&Al_sE;TBHzJ`0^FF1)o!0tRs{d@d
z7$ES&dKBQnP99^`?!_d(mBXxFZ4v@W!Jm2d5
z@DG#OA>belTLJQV;selK$a^?8=&e@%6A%GZ_dcv3NqUF5R(Ba4{FI}(`6W8)*V1>YS;LcWBG@zcq8?0os^
zv}x6nB6Fq#!OiuWm+q;Jp-D!OOfTN~47RR50bt8$kFV8Ls3gS{rVgB?M0GI*GZ<05
z8riG~Ag?8y+#ru-M8KESJyb48SeB%CvGT!>?3gpvX%(&ve^3L3Cc?r{lc42U!MC32+aqZgQ
zwQ#k1Bq^X^{22fK{vRJ-io<@EPrytE9Ua;?O49O}>l1(LJ@CBs3GFSay4D8hfnT4(
zZY0EVb%PEBRj;ydvM~103W0h5AG*H|1aa|`AdYY+!D2<6_15k%m+C)lXKEb;4vY>p
zi@>nt^`-UZdw43rOl6XhuDvpIP=)mPsy?b1S`@8b=%H&5%t_GYZ>e_@o1^~1qdkz7
z*%hn5#+P*#_CALp|Eig~$e3uAotAuqO`1B-y!17Q;iE;h(7;y4P^RMM{ut#rLlR?=
zNUWe_$Bc7mb={%l;2DJ>8=x%Nyv4uZdIyKE;D7I5*%(fz
zU?Nuvn4oJ>UYH}7M%khh_hS8VecmbODMCk
z6G5#DIi`lc-aMP_x5;J8Xfq!{F7$CNm&Rj4O*&Bt0@V=oTff~8>(I5`M}7Pv6~>d7
z2J(#7VvN6MfB9|tDdxAViGFbMtN7=Y(B1+aCr2xXeM?E93Rr4Z;mJ1pAWCg0MV8~L
z$#f!>e3`choyyc>=1!J{`#KR&TQ!&PW>o#N|@T6b8L;CoJF
z{4PjE;aKE7eezRE2H9qCb-l{NIN2?_v}!7YAB!UY_RI=lbzEYtQ=^Kh-37G$X23ZB
z5ljCD^CV_-@zj+*xK9z^)hq1$cD=N%0Sm!3KQ7@;**skaW;}S!-JF7+29C<30Z!eu
ze`y)E;>DJdE#i0RB!TLsZik<2PX|QqXkZIa3KI%lm?Q)}4QO%?)pvhdF9ny`68*K8
zbps4{UEnw`-eo)gp~BpONzO{f5+N|3d5muir;1Y7CU~6o%rYMqDi1Rd+EM=_Iu!N*
zBj8Kg)NrGm@g$oCIg4)s|Y;>nStYq(K19P8BFJ
zUMKtnR7%tyQ+aZ?+0dhKYd}L>1xUk<-vpZbN9i=F8Gp5@K4FmHD?OF5TeW>?X~J|n
zlFg?H_+yM1_(lxTuM-s9Z&}S4LGrue(M!6tYObP3)y8avM-{VA7l&dEs(DndH0VOB
z);_S4P~0J3J5|&|=Bi^;O4TN##Sw?*EW9uV--Fmsph^~z=$SY#eL8Hz{+ppGD>E)M
z_I7`f_Ua%AgHrl0HnQAti?2Bl6m`I2s|S2bfE^V1?}iE*3^TtP55p|3mclTURm=AG
z=#RF!Z#{lj;`BeTwrI(}zmz{7o@UO>c(O4jOuGkCfWEl*=_B<^uzv)>z$#y9Gsf(jt*Jk-JUx6>*?(F
z(J9xX4j`G$$m>!Ut%jm-K%K|5`7AX@ephyopFWM3H^!_$b1@qhaArD&4H8U1HmV6I
zDk#sW&qe%**!86rA)G$@LhY{4cPi$uNc%ruh&$e=`{Q=G8PUB0QM%Kh;ewLb|9*jY
zyr}RmeVPnA-MFj=Ubszr&13u-T%F3?DF|8NKt9y=Rx}{s8`D!ru8Lrq#wTFrXq&^B
zby>lrfT2bNuIgXJvXrq0AmYn$YrN|9rXZSg`Q5p5GVA+W6LbL>`#-^Xi{mHN|`A
z@egfm0Go+d8pQ~Bf^`p|ki&!X65y+jd=iq^i<}7)C+36sO|P;2bt6apl45?SFx$Gh
z{Q-C#8fL-@FSS^u_F9u>)f7ZpfwQ7j8>xx6wee{LQtOJ^nj>N*t1~eNy(*+W+Yn@Gcm@^!7vH@
zb>jLuuNo4-?6n-TA^JUI@5ril`rh&Gf47|fyqmILcp1C_NeSqKDpm4$#JTm&MEHsf
ze1(Qy2}5o=!q>gv>xg(^+S#(X!$cxxrY$DOoIS1FybA8;nlDyDFZCa$-M?OJYSmcL
z3*aD7L62G;FQ+n2pGY_YryjDq2gHfl-ptCW1Xhy^&bK*_QVW~Wy(G1%?z2-UZO3NA
zAb;mUluCQpxI?!Tn0naZv0{L|LWNte!;l557x5&e1y#Lp;W!ZZFUnQnDRm)v1;i_g
zg3lfv5JjECfnz~jYAKO%7QT8`Fo@;PWBJo02xhYjzvOtG?R#_YKOL~98Bvq7gtNK3
zMkQc)OK=`=UT~Wb7<*h4OYk>89et0T(hxM*{n=8Z9HQOnAy3S2cPv%0)WN^3;6D
z2rrbP>}r=)znj6;
zYds1Y4+u`^YccAJzo2*+6q*2xVznR^=rF5O&PZL=SG5ls`hDf0KI$i|$nPoXB{*dx
zbNlo}r=_$2+Q0xw>h91X$byI3CnC1i`8kAHAslRzF2zGNwjt~GZi70niv`tw%jIA7!jILhov+_W#b2M-Ammr&>m~j$}GoOy`9T&Qe
z`Hz|Z??0q$6M+&p8FKCLO>x1v2agUC&@qMRg>eW09lT_S{6TvCL;iew6@HyKI&-j&
zm*tnW%3n{f#{u^}k+4oEL}LWGyJ?@Hg#h-y`kun)%c44bsE5MTX_)*pDIk>eSEF7F
zu3fRkIjg+!d@gfXNamREmr-Nw#6xG7vW!D(EG|Q!iYXUUO3|LEIAMUYIJ-JV5v=ng%d0AF9hzpaM!epMv3V(E=K+WxHhfvHzcV@n5Sj
zK6sEm$ia^Rks+oKPB((D*)6HB!tYgWoLri4mticRhfpcsb0`cjlvGf-ZNApoN#c
zPMFWo_bb+ucA#NY)BCxW&A@08ZiykhozM5nmkM;QC`Alap_nnBvA$gnthj*Lh2V$n
z7qMF3jf&-&&q8-|IfD1C3
z$-ZRE$BZ7lo+>eko(UJs0rFuU3C^DqY?KmtZvHp_nbS9r7~UZU2_QACDHvWEoSn1xLp7Py
z5pthxDq=e`PdJ!Z^%w>PaT!O8c~y-t9l)4t9Y^F0*Bt17%*#jS3l?@E!9J_JhQJ>d
zGg2S9mb5ozrp(d4pj*qAHs)k1@jw?>8_h5JQkzD`JcGZrud96KLGEH&O39iJ84sR>
z`Hu?7kv(X0LS&piO{H9Uk+t4#fn|PK)IW#5gPH$jt%UqVk*wT!)X2RsujycpOky@l
zFq&c0*T`v#QWtk3suvz9`jbg-IGxu43sEq%CgV8YjAVoDO_Zr0biixZ)pC5a_w!
z6~bm5kpZb(U9K48b(v})hyS5TD~T;j{C$Q8tpM2DOWceXFmN{3fsXpa6>VHghK>I-
z_)gEzonsXiH0vt6OATDh>D*9+4QiBVet8u5z^DLVf?5K}&1bE1jRa0%d4LGuI+qTV
zgp8hGz8Qy3;gfZ)Pw1chT<7;%C{jH2iqD%k{YqQ&x16xIzTsEoNOLX4yV#9UjL)fq
z`?Ql9;xB>=Fjfm}KSnVDmhqitBsawJBpYvsxfPcc%xyRu^E`s7E=aB$oN)-wz_TxR
zaOU<#VgGkO^?%+SH#tNyB#~1(49R0
z>w;la@D&{qp^Zmkl5+l0s(LoZRiRO_;#JM`Re8Gq!{!ghZ%Dp9oei?4Hi~LN9m?2S
zR-4}21J+VK$STBA?C&DPs0NU|TMYFWewprEVPRGr1r(wetIHI*+_K%^kAH~Uh8@Ig
zFjEmwUJs7z1uy5YS%I@N6@KZ%Aq-R6a73VBCzhm34iLPTvIpdInGxlVDH?i=*cX~K
zOx&t^z-`PjUpB#cjO)m~rkL^Vn|G0iqH6hUatW*V^)3!hyED!4x`+o{=m1@5Q7QTD
zCgm)%Qku+u5NQaWv{;e+z(5|kV`S${6Dm%btVp_
zpJb3bTNQ9osC^tJ;d|cDH-t(ZFjuwV2$7+uUUUF|&73NZ(
z#^}(8KylM#zwZDMEqD*J@6AaHywe*1cg8CO+gBQYQMA|Xa>kYD(E$=v97XTZoyiIv
zAB4^n`APqv!rQyFY=usw?s%X2B4pzp%@pSrcUzXBl|H7P27XBk(FfaECsVmD?YDiB=>25~Hw8eHk
zr;7p;;
zBoAE2CB;`jI|OqcHPbS-kxX>s#q6Yi{zuyf+^4_0H2bG)H9fBP9?fhnQ^gNI{H~gx
z%73a^&KOP(B$XG0>fTwt(JS0uac3DbVOlf;3m6rc)2!90zuUP;d;j?mR$RI(k0QGX
z+^HTXFoAa)H7{Dwp^ykj0Jx7t1${fF+TV*oPCbVE@6n?ly#W-BUNE-ZyJzlS9ZdeP
zzKuXBGh;V0Ugi~GZKki|QT}>#6Cg5%>S<)9gPK*1zM>Y705Z;^p`lQWlc0Wml$+;)
zNp$_kK&TxgGPaQk<$dh6s=g%@u+Rk9XwGmc!=MN0E%-Wo8$h$;U;quh-D|b@rnHoB
z;>>iF0O-;W=A`lh`omygho&40)E2(Q7c^hvHacS*Ja7v0RI5>gZAwDpdu#(Tgv16B
z7VOzLBhukTPbnLrF>q&FGuVVaLsNX`OU6r`(RaYaXz?$76xY`y-y00E@$T?@tK)+^
z0FT_EY@|yiGG-G!s(FKsUkwlM{HOL=5m93bt@36q<`AkY0-F?%`BjXD1PXM;pQ7RA
zpL&7>x(*!^)@thn8-7p%z+24m)PUAX8}9t4i|~Msbd>$P{6!t=A=9c#XPi#snhq=4
zGn~tH^d1B;N-ulQm+VLs5wBqxrm9N5VOHPbT(!A?rK0Zjbf&~#yUL4wVXSNJeGiXX
z%7O#E%UzdPo!P^qFbA^XqNazO9Q9DnYOMuyqu&N#BmC|h_=iFMRgG7O5OI>fxJ2;k
zU*JL4al;X96}OfV%8NhFzBU^vZPVy68WjYtUtL52C$G^{uoKBG!Mn3|Yujg8Hu$1n
z2nQ?`A;z3YUd`1=@W7JIBxulYbMZ5fS+YUlLz`2!9xX>hUeAUNYLR5mB1IV0hle^H
zg^VIelKvZLi%VA3=em2RhxrQdfE3{P#-&T~y7{D>@RRoAt2DH&%O5`U;LiE{-Opy@
zx>Fp;z}~G{TplYO3tVmOXR{Fh+$JSOf+xnA{Wep}F`>G5moJowGWl`3H&cLoy8hUi
zRhZA8AU$)oP09znejvV*T7}W~5mfx8K)yjTl5b*tq2~)blk^-G=f#U?L?2cRWAl;l
zW(!644%H$Bb+RGxo=o(1~>#)zZz=0YU*Kb;8ODhznS|OZ#};
z)vVZ3O6k30E?%>p+KNf#&JcNNme1!Z)MBwQ`13U@;Y;Y6dZRLVgd^U4c;M72xcQ^DNvT-V~;UL
z5mt)AXfsJ`x54n3nlSF~$p(rCFk6&&c^rve8ytk9rlty!{0KxFp9q5MavMlvFe||&
zW%@;l$`ao+Xf7Oyo|-A7VQ9UD5cW?OM`Uoa+i~FR{*ZWqfNe}u>OgQk?tD774aR3r
zoo)W*7VM?_KaTc*wExr28sb=RgJApzjHVYf6`?6La|-yB{~0kY%xFE*g!Le}pX1w4
zFVE4^Pq3_Jf1~{cv3|w2W)O-6t4Bnj?2Yl?VF;SdL;$>*rv|xZLE4JC0kpe3C9?qU
z82qSdu>ZWVtWPr#KJ}>W+&O0mz>DXie4HWGygKl!aJOR6m_~C<+sdJIG^6mOuM&A!
z>mj?ujM+TSF7I|&2Z9BHj|@6
zPv~u|UE=mRC&v250JDx**_QiY_VVU$S+s9+-0BTJ7f$T@8#MLFO2Txx95|0oq48F+
zps`@|thsP8EVICJ>v
zI@cOx^xygL@jNB^7GwsB99E7Vgt-XK4IDTf2!KmU^vVa`DF<2#=edR71D;T@KZC&B
z&0|wRU+V~OUPULILDj;}(3{%%&X~nsK%m9%
z-Fz(Z)IDZ@Y-*sc^8S~{?q*0ozxLUc*s0RNWyy488<6w?b(hsBOi^h^&B))zh9
zffDi=PFAS+VBgBnM;-g`FRZI;&vHX5BQvGUDV?)NsyA8$CuISLI_x2aUO}RRS!V?#
zaYso0lZ(nvB_+n6VEj;JGZ20lww=!#Tj?7laxP(-7Waia9u!+XCi2UEY;#t#eT(?6
zgUUUyrcQ!>$r}A*PZ~C33=$Ycat3Kr8(U;tf`6Y^Uk?m%|~x@&t^b-;A7
z#}oM$)}Qwn9iz(?Vf|1Ah13Pa);jY`1#@+UL?#C1UwwDjaqccEYkSyUC2Yq50c-gc{*Lu(>p-0!k9Lk8EnE;3zq?$pLrY_EBn%IkxIA$kq2BrjoI7`txsSRSclpHRsuvMWr|j;Y_TexE{4O
z9ja+2ktoHs4P{SY5YDFr5;6hUjZM8f3owz6!$}UF!j=dp4lyoO++h5I!qU1^#YX{CAAGj0;**nJX?C}R+*vf)jakSj)8q|g7AQ-Pe7|!G5h
zA!Q;X2#EtsMI#~1FKD{-52!>iqe%r=trsBJhR_;Ni<%sCA*{Tf{U2%gxU;fB`1M>W
zU@Q3FZ|k=rfKTU&$QWq&*MR$lIaPRNEA;Y6-zT$()oRAF%xvlTK5?YhIJk$4ST<4d
z^jk!!%BHXKdn@itSDj<><-q4?NW_TXXZr5ID!lceXmYur{wSz`Z0zoV1hY^;7e$*k
znp&5B62yx|*d%8}O&QPLaq?w<*}YR_T(`yr^LU6Z6*GJ{+@0wO-1M-MYtU8!%7vzCE?Tq8Mac=7sC9|mV&n69Z?TdVBmZq>g
z!`HWEwv0z!j6Y+HOXgK!MmLF?&`s?F$I$OEmzAGFr0IBT-nu<9;Punf|I_7=C8S-5
z>!U%Tm`5+d^d7DJy>`n7MKG%#{MJGoCmW~3UBd7PI3J9c?AQ%dd6-0ad(RKinL39p
zP|7=|w|u?KV-BRS0}x)Q+purnF-HXADLme8AHV#|sm#I*=)yKHZeCVP7@evfT;9XwsyzjF`!DXI~`
zBsARLss}(N;ZAg=edLSm`jL2bw3)-gXn|=k~WAa^F%jl%p6XE6i)NJRwUtIqx8q2za`##C*CK2}h4__6Og|2My`2+I)z&rj@$vC(R1O^&
z$hR{(W#c%b_-C#$2R7^0JPl&=uQz@GGbK%%J`-IJ(_6kM
zseO9=zUT&7-Gu0T}oq;?v9y)Ic@hMid<4vwCN7;xbL@b)lfr7ASGJ
z0SiGs8?-(ljTeVmNCg+gY8lL^IXsZMxyZiaoYNCb+xaWl1Z}r*_s=XL%(cd@B>!vG
zkD4AvHiRue53BV-6X@@vR0Uoxe2-R(ce`d=_!l^GF(dT&*t<8%Oj2zt%}Vxx<4@eE
z>4X|&xquyZeK6aJntvE+v!019TMI)xdsGEem|e(t0)ED(cRfy#Dz>Yb51R`(JKx$5
z1vkjW?4n$jT_V|Vvh89h@o4fvPAaT|*AgB&^7kt8_KX{=X#TQJE_iREhh|4AXcy$(
zd@b+ytjr`Kl3Y@6g~0$vz@R@cGl17b)**xHO`N=7_h2|HoUAe@==k
z$28ys8@Qp4G^3WZ&y+KTgTpHk&A8(bXNn;1>|U@0<~WLOz#y9AK>?mmUo8Hfdmg&M
zXm9pY!=2abnZdk~1+TbNuYL9YScBh)4X@`E@4MQd+?1Yy4GC1{jV4dE_qXgv{qDc-
zWUB7awBvirrTr)9+wNBG=V(oW#KQ2vCqYCcJ;m614s4vyo?qG
z2@4%;%{*Ck{0QPmL55}K^o^gPdhElRg1WZ-!7IUw3SFkE@0S)$9WJC=9x~85$ZEb{
zC0rVHtmL1ziicd`Pc)UhY7I~Yroo6Iu|c06(Rxk{ipik7)@EW
z@NaPTJE@BX&NTDywJ!!TQvTy?{_T4#m1?TAgvEywRpzWMl~TOdwu||vaB4(BVsrjh
zePJQ=Z>6I*j<*4EYu|HW)P<)x&$Qwx0T{Zx~C_H*K0nBCRjEVkiH%SXhyv6T?f?dYoUlW;XY
za>KHB&ichPKJ&!`axC4bC&B1lFvlU0eOMH>NOum`;T#DuU!p&0LtIPNj+!QEkT~@!
ziuq_xM%cy%>{3;4zyA+ObZZjr(qhyn-!0mpY~p)qJ6*LmYnCb{r4c_%PFZD$iDg
zys~dck@75E_{0|+pZ!Y-XN0oZeV8|%t-UK1xxZy!gR=cEjuBqa>CfjJkqr1AR2_PwvUKSdZzIkrz8~Y{0fJ_ul0i;a7*W)UjHV5*3s0)hKv4
zFB?U#6`x30*_gvY`~#P8LCa+%G_ch};c{mwf4Sr>telemB)a19{&|d%ly-)_OUfCC
ztBoly3>H)q=y|e-At(5N12oN^GR@3rePMSCX1S=B6@tSAKl{r(zWw7y)Fy%6rDD_c
z8Db9Kob0`u3rS49i}~V+6UgdjSRPrmS1Z5YMPR1mHo6EKqj6S}sDq0u3Y<)&-jS0t
zyP(+}xu4|(mU-}|(t?A*emzHOoE#kevHVvKa?_{A<
zUC=VlIOoQ4$i?K=xBF!vXO!&?bv%pm70U*4nWjWnLn*IEywTrH{T#W__~5E8?1||P
z+%X_zHy)$|tk~?!-EZGcy>S@${_xVFZ98uG&%{~&q%_5-geK0ly?kf)tm(rM}>vDK;qwL
zn{fF#m&(#=@idQ0sZhALx5ek6iE^7)!ZUlNj;bHatu4iG4r?lRbnhA8y#V3ULrWgF
z{xxhaFE3A>jzN^zVfFjAODTL>YA1}}#jLh!*2(t)lH1GskJ=q%w86Npx>Mn28Thp<
z)~g*dKUe4S?MUPV$CSZI@_?^M;BwjAPp^;r5`x%DIQgf~XWNqH3tU-ib7qN}9#Ts)
zy7{rS+a>A=hNq<4)vU-ONA};S#XNm``4x2USk~sZ*r=-wE5r+ovPqAT32|T+yd`bt
zMdGbmtOEJ92sZ}2WiDKAv9bHXhM%+1eHotxygIKZ?RN-i7szcPq937X
zohy6+yqbaYT&v$pfGk_hy1y%|yY}Z4+Plzz(}UQtaM8jMR`c{ms1Woh=Lqif
z^m`;juK!9b*neJBa@It>vPgMwJCyke>uJJCKv;X-!pEu)W@>Y6ud4HcPuYjrpKjZK
z#OrG>JrFqb1Y?7RuR@amFgyF1d;cJ-+q_0N`pd9M{QH55rObGnos-{lDz+;;jY!Zg
z8a30+B}T}n(&(sT$=ZR>tB4MG{p#Sbs)8?rtfxL#W2NVnC8RUk&tK@|oN!^&C-ij&
zo}VueOitE^WxvsswF#o0*JU&was#kQ8kDypUrrg?IkNE3L&xL4p~@{|5QhjSO|=Uq
zK&&^!KTT6Tf=XW=p11w=c2Dh&M*YAeGG@Q6Jj#yPU#lQ@zl$d)WCVGRBwC)?*HrS#
zAoJk&;0*@-@i{p~<&vnwH^yTv%$zNr%0PoxHseC5BL2W*2mQu&_m_x6f;*lT*m!qd
ziYwnV8@hD4kyV+D=y)Ll()m^?FF9VWh!I4ePshhp=Nve2pt+!j!{JCjRnm93Y5q9v
zHd^2FgP$S#4)b(YjmU$2;j6W2z^b}!ewkc-5=GMDg`Wi}o(6fX3^seT{9soeIM#Xcgzez93mcq;mVN?L**J9V>^^J`L_7D$EM98I+_jvG7~!%9zdoO`
zk&FYMjQVi;*mi?ev159Zah0EtH74o|@7I68sh>cDQEN*fmvL($5o=#BfJ42JmS&oIY&V4DfVAu_3RC9^A}6JP7m{(xSuyls1cUm23r(q>iO0#cQ#(G
z-*2TGHwPYE&eUZX-SJ}=g%tcqcA34mdfe|^K$+LbA464cuWtvuHO7_i=_~cD
z>;)ydGY?_&4?scNPu|z>6{)Tg0~=HJ`jcdDylcZzLH=(W;?y2uh$
zBS(c@t5WaH&g?VLs=qI(ajf(OggZcbCM;f%Gpu8(N^F2Hks_SFTNpD-A2Q4B|LilE
z4N+6kB$wIw4aq=Ui|6jR7gH-L=6tg0J1smsE{u%t5_rWtbnoQ=y
zsZhw!3A#ZHZ{`orrMNuL1+T4a<=;*Q@HrhYF$s+tIpaGQ6-119gO|P$qX)p`$u`}j
zz>H~|{70{o0Wm%`VKlQ+Y=8KE?XEikWFHZbgw8RNuiL_Cr76P2ye~w0JYXh5bM#Zo
zzLvy(oM3aVZr@JDd$aSM=~Ol3#(N&8pI9=+TlO;)<2J=iWE7VQbU#B|uK@o?;69O{
zv(WYNx8rH@XbTgcs}U2p+w$PeoYRnM>y4*!>5+O;Oxabzd(u5oW!cZ+JmQl&C~qbXvEkJS*=-(Ee(qQgdtyPnv@BznOvyYeQJ~?NG_R5o;Em
zj-RMACn&=Yb$x#qv3#gJwq`i>f5K@18O%E#G7OTITjLe6_#Q9Z|jhU4L~n{jn(L
zZt_sSuTkf!q|aVuiV);
zdQ0j2!C#^`yAJ*F)bL{{@Y(+hx^ks$J-pAHlv=qpBg>H|imfsp5cGeYH4xdiCGs8s
zatjE{#L7L12cuanVpo)!zShgskes~g@)L&QaX78g=K>>0l|bCU02Zgo6z
zZCr^Jhs8Pi{z9L@1~X!CyPg4RWaPFzApNzdl}DoOx604%Nv-wTQ&9S=J*?=cNX`8~
zo1b2BKn1~yQuZPD57&pnFfq5oiz5=>i&gK%>^L5deEDSepUWWnyBp{!`LV|QRqNl9
zw_M{n(b(X_dP^<4@7#4j%+7$5b};gr5$`kE*JqqkA!Di_cU5KgtY6qM+4nr+2Q?gY
zlQD7TjY5+0@0(VfO0_x69o(AQ5pNexeni=P>MMvG`+G5%@a8l*+*Fo14q}|w6uZ4_PozAL9IlCm4*StXn
z-tHp(m~AMZ0#cmc6H&p72icjR2M$>Kudk4tY!a{NQlrruN^EdzYq$e{&l9uyczFmX
z^P8X-r7^Ict@}G!vky;ZahiY9)Qk^4ziHuk9_5ltpn=ZUc%c76qDSvkuDm~=;+MZ0
zHuth1&$DF`Iq+sg|xur{x1u=_wN>ut!_zhHN)y89P7nggk6LAe3lG*>Fb
zrsRbMG+#IddLjn}s^XmuvR$bvDI3#}*Yf>zyW%8AjLLk#0;3B0xuBpSDX7{nQHE%@hJ$!r9W>svg5#9gI`z16}PLBU$m>z4u=U3?rMBu
z;2Y9*BSido(z2nrn0V!na&6br@6Qq+;dKrT+POaYTBoeaWAz6=PHoywkX95BJRKCrd+OoEt)yU!>X}DLY_dv;#nyZ)
z_s?Sp2%0@HvhVq$rJ8q|n9<5{A-$1xg*)ZT?iC&O{=gsHxG6E7HA$^1Y}09x(vOY5
zEVZd``lUD&vnsztGYc6kbd;TbNSK|%!bel+nr{cW<>sPNLPR7Htvo;O3(`?it3!BH
zoflK|?#sS+Q$<5AMtc5@MAVDtT(E@744{{Jt+7p>{#jm%(dPsp{)f;22b~{Fc@>l;
zq@X;SRi*pK(hmWj>ougd@4o1uyVXvunmyN2@171Ee<4n(gYdsw2QFkTfQ$B<=vN9N
zb(g=(+W4nU=R7EU9MOt{!z&;7P>cKTQ0CNvkgwpeVY^fvS6)xbzJ7#J<2^b1#wt4T
zcwFOn#{*^#)#%|h9{s9~Uc8Q;!T~&Zso(!J@4&jVq12JXR^DfiZFC{5I1~Lp)>;NH
z$bSG&dphNDCRBBS=TvrR%ZTj@OV6vnx%0-NNNr9Ue0$t!%HHR{r(T_b9%Y`jEXX#Z
zlQ`$wKFyw%B2wPL&&|)hx-f}XPw<4Vjz7F6j`~3D9oF($JN?CTA>Pbh`bZro`;h#o
zk}gDyM!0)_ZgA(Fv4F6M)oa`P2t??x;&k<2U&B?#tmGuV4v3}n?s<1(gtUl)W*G4v
z&yFOzf3M>AIZxr$)mjc{wh>$Vs(fS4N383A1s?s=B`3yZhZugExzkeJahu9wmUi+=
zhCameyIGXsM~G)WDvUKOaR6&6ZHa>aUr+vXaYcj*x;JEG2yX
zO5Uy^RiMc89RPGNh@nN6wcbo~!GLA<1)_jQ8!Go%Vx6TmpDVfZ?^ytwtL>WZ!7>RE
ziZ|@=0jae&>lW+a73Pn+`bA}78U?kHt9HxfRMtSjNft1z#3MZ3-10-5^s#Lsi5hM@
zYqxJc-?ztlG+FWZePd(OmnR#&Yd|t+1+U*;#=C08^$!1s(5#Ytk%-=$l8*JOCYJRj
z6Cq*WLUtKz)u&qU)-UYTnzzQ$ipZ}F%Jz)Ez!ZzGMdD7S*(wQhkgU=Ja~z&V&vdYu
z|BTG~oJ?@|^N`*g%Q=wc#Y6*l)|)xaeIDlRYnp2P9s|-T?Z@U*|0uAyBX+mx?JEP>
zLp8*fPzYKJ&n#$Bm-HBz(|Lbu--)}na84&SH#@tjdMy+B=LESQ1>_}fa@9@a)psZC
zm0;@uR37WJcg%q${tOf|c%-kIJsGCVZ5?#PjU3^8_|R@K#NYemsH0+kzmC_=N9%__ez0lN8clic;B;e?
zJ|7kFmkKun$NvyxcCl$&g~2{)D3~sE^LfB|arI!GmA_;}|A2zaM6UDJRDE8r`CImd
zTt7MWJzfU66zTKq0<+e@Z&&4-5^{)2=~#Cu;LxDM@tquq*QRu|@@Hb4B})FAL!^Z^uuu->(NYgq%(%4RZbX
zzrO0}l=mb^Y5YHAy>(O*ZruJ)$>mox{EN`@XK%b=@L24Ng7>Hff@+-}cr=trrDxv4tR^1c`C7}~0&(Ny{~EkLmgtxE|z1YGs;u&oSlBEIXDoRu+E
zP=@3A)Gt)8jO;e9_hO8$S#?XRZ>CYuCbeBO)6
zXKp~L{E6pRP@AiK@0=8~zAT*44NIsY^CKV{AIL5ZqgZZV4%8D2BBu(Dl-qrfBI2^l
zs5kf&=(xuFDfSI^yqtl@csOAK2z(-N`Nuj9{~hg_qq4SL2>Rrkp9qINw*tb(l>`#t
zbNwOri$8i$XdaKgO9@QfYB6_&v4x4{_WCU=nnJi+Ybq?#(VTjU>%qAD9XVM$$U2&I
zEVpO};HTUx^Fj06gV3u$8_zX<11yT?T+Hh`0yepaa9xs@e6O7Qj!x{E3*iU-^w&Rg
zemiB4=qh@PP6LR5Yd4bT7X{o1=nYI~9z5m{F=FLVXn&JBjsMG6(Cv%(sR%P-1?sZf
zY-9^$1v&S0J0z*jLPr3RvZh(PcSd4F$2omYP1vODzH5NIZdd!s(HX@NQK2D2$&aY3
z7-)9t0&Uq0480C20mwzD^ZNxlBK`iDA0h2nH-FdEP~D>*U>AL8%CISkSvZsZ_gyc0fGur;
zx&}9>?8&wFRI5gs7$=g~e3Pk{B=JX`1m=#EEjUn0?N=x_&tv^aj;yBVx2vWkj}YfZ
zNRW<`#OinYucUk+A6tCuM@MD(m0SmG4id78cSe
zg=y*YlmZ8kAspgXkxp~l0Ic#{DJ{d!uC!;~Z3Hq(lobz=kjP7lK}d{zx=n&LSU^%
z-~_izE`5z!bKmH=C%$Yhhb|v4fSkHaJk(4OQ5Z^odFJ@O74G5M&Yw|Z9kuSO8>0&A
z{{r9L{`1XhoQbo4T1FqzEq6bu%V<-yH-OG_;Fals8k3Qx#cOt=Pq$QRCJS9LMb%~;
zf<{lWG)3=O67karDwdIP4on;ME{_>~o>1jro8-sV^pv~q0JNca
zf|c{+uA$e9w%(Mc_OJcb*JAUowpP+=o^*a&T%FOS!g`=f(qoyg_$O=JfC268@3&Ruy*@aA=}dYI
zjz+*ImBn+Re4YAXNC^%qj*1S+w$n&zq1WyBh7$#-A;!7##xoQ~8Af1IVXq}ws`O5H
zmAg7c*|TC~sJp}rLP^%5V`ylZcI8}Exa6NVO~U>`JLzUWqFq(hLr~`LO@7*b!`FGY
zDxIcNHe2jtZ5UgE*SJ{M-T}Jj-QlB=>tcs36iqegam{d9`hp$wJWh)rG>7EC%^yUZ
zPID{|g4UHNvQohPHdM3Ba&c){J*OL!^ZPM9Y*5{rypP0=`^~Pz?j^*=!+r8IxqRbV
zQvf2GzrK|xYponT*#Ns-`hmT#EP!cwCMh>7_0RQ3gD4`(?J@uV9UXXfwn`tPgw
z3oc32BYy5zJ7mYJ?K;RRmepBRUFcVzPICVBrKK&Q!$T&QphTv-1VMYc(vkaswZ2#&
zA)tR+`awh)Z?!nxtKPea*V|k0nm6YG?Y+dSEzE|L(p>0Z8F_aR+m)M`kI^RWTcH>q4ApYVRSNLg1yQO2nux9^aZ;
z&QdKeQKsMd!*V;nYahzbWc#kj&0nC_BsWJ@I_X1V7tA->XcY-MH9PDNWBOV+JrxI)
z$P`>QA9#O>2rpMa`BqQ0&FDf->!~wgNs}5WoSWXLk(kHsPV?h^PGB0Pz(f|Lt<6I~
zaz9(TK2|#G$9~T@S`S+LUK8N=AE22Loz~MI)KHl|hj7o~)~AkJbJf2&s|hW`=S|V`r
zXHu^k1EXh_Q@H%-PqIHd5#g6=bS@teO`QF&_bMmr2|xW4#lYiqB(D3baQ
ziNK3{j6?zDf5U~9U|4w}uz>B}^}!T^-ud)zxF9h7LUZ$`Md-uQyBi(7YT4SEzY%r5
zYENgnR20_{XHww3Ws^`}v!K(8Qg>h43p^xa*s>-$UMK0W*ZihbYT%!C*vLtEv&cZA
z7hcE1)(v-Qy?qX~wG!q0RY8GnZoinJKamYNcOLukANDEXg`
z4OUV$T>eUDU33#pn}$#y*00g{<66!;8_OCoyKxyg?^Zr?<3GiHX`<#wT&qZ^?r7eN
zL}dc{7=QN6F!4H%FbqL(ca`-Suv}63h`iP`{%EFb-C!<
zsma*IzTAs++{E5BOCCqFvB~OY0uIlo;9ZgnZcZy6Ey(?y*u5Ip%?UvHMM|i`&fBy%
z7;@zx-dH$|PnF8?#PxYH(%_3dO9Fhwg&|*IFh0
zWZOm;k}ye)6PQYC!~L_`A)(5z0nrB({oS;-lI=|5cPe^1Bf_T)Lviy#3hRbkB{P!k
zoPN&kb|A65_ibjOn~VO@rF1hE-|fvxHICb(2G2U74wj_=j<@>m53e}(-GVw8@9RpP
zGU|S-XA+vNnW%d|<3yrjR!k-f5Gg`)5y0Nr=ba^-(SyZ*h295`6n~m=`4J1+@T*Uy
z@>Fd|l@VQ2*yAV9ax-em0<=!J
zxL^OTk%-bHAydF>n^*hs5g-H}lv!6??if>5z`Sn4+scLkQ85IFrt1S;vJ}_Uus7%2
zE(AV!)MCUD(mP^jO;At|EA>
z+kxlw#&OrmMWA!Nd7CX^-do%V?jlxnt2hvCi#~}Bl>0N0_0M1T=W4Iboy~Av&_*6(
zkG7)WB27cemX?%nO6Z+xDiL)RUR;^1#r1F9$Vv*#L*cVUa6RlYKD|
zh3!RtY3;Sf!6>fZa(W?Bq1qKn3d%(pwA%(GOQ-E1I{epX$g%08$B#EmF7#RheErOG
z9tJ_kWG+l*8nodvtEAfsy6k&XrSnIvI@rs5=NADWzTFhE{S#!Orq8a6%}F~Ot~j8}
zgc@&h@hSNJ+D>fyrqJt~R|+_aj`C^OY{UHMDwbvmp&FP!S5#QDN)-O3jA@Ytui??4
zau54x_}NEQ%+8i#U^CpARF*YrTB{8ccgtJ?@giyW4g=^QfsQme%0iJqhw@1llWI}o
z(*gDjk~u*Gt}eDv#kSs-JDZ(d0Rz@Bqm{TGB?#by@NR!FuZSr`d^>bBf_-jvI$&LC
zHGc>~hU&`2W6f9@Kmq_{xt%jOePnDN4G%2Po;7^%NF>#n_?qyA&Dde|IR$06xmHS%
zEjxON>Og+@^J%8^E557XWPN~)`|jQz^fZmK)5rE^U$3d1RI0vuH0`vnQ)gV?i{P0FI5e<#*csQZU$nKpT-h20nDe5J`bN}C(tnUjPQ~GaHRIKf`7?{Mj=g&L6KqqK_H9puXo`2V92)7wzqeDP%z3NR{Z>V9!4Fo++^j3A2E9BMB=!_+X52Zf#I_Y8V>vH;u-0dKZmHXQdZ=UH}>Ox{R
zr4B%?C!dMOcdPnDEY(`rv^H--+asMaFSFz1f!CPn4y7N=6v~PXNZ_H1EyoonE*5tJ
z*%!H473oyyFVPLazp%TpMt7R$_fOtVA!}a24d;+&x)MZCJZe8`NcMZh*zr7%i+^6t
zv8I0_ehqE@l4DMNhtJju@eZ+Y3}Dikxi%!2HXO!ib4g^Ju_Y{V6#6TZD9fj#dX{e?eA5
z!qH^AilG||vuP_{e+VwThm-K?Bn~{@@*9xDjPYEeIwlg}o6pPDtiESm_UD4W-)@`Y
zfgOLZ6a1jGahIw6I~O4r*R5;<+N^e48rV!>B29U33g~xGvLxtEumEo9M|IT9i($E!7sK3B2GKD8a=4Jfc6n3Ha3CDf)627E$`BHem1wwBVm1a
z_Q=5fjop
zX>$h-wx3%k7e%0X9{YK4KmPGJu~7edP$E_UNV|OhnTSsX(JbIesAB`)?SeX(A%*zM
znlLg0{E@j%yKa_ib0`Kx?$0{}4>E9YSKSHlATd<53ZaQ%#oIi^sVaQ8*5m0y6iJBa
zvr99Fa*C_pSUpPK649izmQ*B$=sb&NdCh4pulL~U*9j@3^V1$BKwetRx#K|xZC?Jr
z|F?fpUHq&y7{MQ$=!JeiYExdJ>DPN1l@yiUe%U
zBWdu;daL|Zy8Av4=<@kYOfVGCZ`@!JhQ-`mIel`5c?6(`U{y(2)RbWtLcvP;pONX5
zdc+ztk>T6syPZ=}+m(^odo?IA@~E|LPKAVx-FH2nn@34oJFyfUu~v6!*hslK0-Wc#
zUeJzYAmCnIk>2B)3CfN<9fm+iw(xLSzYDLzHhOuci+MIKywTkK@O#
zw^EQ?@CbKbGi2|2Et>JXWA)g+ugT9h7z4RofP?g5`
zCAp@+H;lT7l~LgZcQ&_U>>rYzc7_m1@n=?@RLvrSq*N`MykTstRgZl17Ylo*9*G>4
z_%2%9D90X78`iN5c0ul9*Vq={SH_9o8A!vi$qV5UL00Lz6Y|GFC8XFYL#i6x$MfWJ
zdo+IQPRFGLGrieuzbz<3LZ4b*9f4eqCwXXI2ForWx0&OM%t`17BYrUs$$#1N%J0fdxnaP{$VCglf(#$<4yw__@Koe)d?yWhGW
zJ^bGrW+NLBxMx<4aPFfYC(POwbYv>Za|_PhCL$-m26
zi~~F;+_0XopogC)F5hwaZHpu4)SCXc*tr~0+hZ%>tNrTF9!qyaIClfGj1f8=ImP2+b
z#+Ci%*A_$E@frvRhMr|ZsjWogcuw51{JZH?KQt`AARujfja&9Fb5-s~0J(wlAqW4ewpl
za1a`YR{7^MXFPw#PeiN0UtEjXAFh~O__S?JV|}GPcvJ90df$`WR3N5oIsP=E4|(y-e$6P7gG%mx1c(z;
zq~aGe+yy_e<7{3H)pdU-PKuv=tpRx7xYuY-40JGqW8XW^SzzD%^-OJw!g(;(=s9*=
zl?_e?bA`YXdam_2EsvAa(Z0HB!hR5!__Cfi%3lQEP)-cIN)wwc1b&$t{EFW;zu|!j
z)Yw#ny7;5yH<+;^*PX&I3cu=wSeH1gMs3pHe9&OuF3j?KiOK<42xGW0kHRAaGov_a
za=2mvw|iRmC7^(F5yUOCg-eZrte2xoHmnNl_emJ+tObVv*H>MXxWr3Hx8P{d=bM+?
z$~JIS9I13iIM<}-c)!`5J>Xw+*Mp8t+Rw|)OA*lyh<9X&_~3Tg(E9z=&UKm_^58pz
zo((>@X5(80=Ki;iXzCkp9DuVfE!;C)f*rmp$E4_w!0LeaQ_ad_TXwtl9@EC&VNz*-
zr#c!ViI8s^9w}Fm(hA0wdS}R5TQfW`b
zstH;|YAVzO&L{`f?E*dDdWDVr6w76`3P9ZH`3E60z)@
znpJ9lVa9cnFdcUm0dWY8!!@S!#Js#(nTR&~fP@V^f(T_1q^A-%0dcOLZswwWHxJa|
zv>NAicm03YR^EL9`|C=%l{?ILL=Jogn+Kc;@R|82=yP;%Z3ry
zut~s5*m7L#CF_Lu3;cwG?P_OzcZA&iv5b!~jqGzE?{cTvNJML8%FW&!9^v4SrFz#-
z5Y*dtLic$#7nc}W%qhqw0pAd;fX}t(t#C!}{G;24y5uQU*r8mUA%wzB`&G11MMQ}8
z&D7C1G0RSu09<9a(?&4t*=>OQObwLvXV@H1MNyQ;
z1DiIu6i4sQYNO=vQFId3Dh`Tn>@Qn6t}p&yFW^r-dtXG1urJBPhN@Z4Rw_WB|2d`Q
zny1$9nWKND%yojiIY4eIUf}UKlSe*6ed~;wvI1sOhgxTu!2})2`km(kUG_1{i+>XU
z8y)<+_wr;CyBSEU0>2eU>_x8`=4)hxHeR=ZY3zQh4jRQ#XE%BNmn9-uh5xm?J*CG#sN60^iE7PoADNvkTJ
zmxtG#a3qb=IB)uc2{T>2gTjI+%2(}xqrv@}T4~xLsA<@p@Fv(zZ>6&sfpy8R>mCUp
z;_&-Ox|mw{0+X9~TpG8EiF+6TM9MW^d$gd+6BHrFERyp0IzwB@gM~woe-$
zR%&!n!j}}p!cQAoNv
z-8=A@ORRk}{HluQzZr;6=s4SQ9N6)aWz*6{OPY6npmm`=JvB3!?RPO?yrQ}J;mp`R
zMqg^uB13ZLRB}lYBI!JvU>VD4vVk!uKGt#NUez#;^e5x|@t31tMZ1+P=fzV|GiVin
zC-Q>xU3!eb!7;_
zS4^N^jD&Qy`QO0S0%XVPUPgYjH8--vqv&>huzkyuQNpwQ!|N?BZHieFym0r!?-PK_
z#p!xO1n%0NKJ3NCEVCY`BJ=RUAf5pkGGd;1T+SQE$4wp_LPq-`&;J`<#kx&pvzkKQ
zc0{gow~K!IL5T9#d__&uo1rC_E^6+XApaI1-YCE2InHWpZ;vmE_!0D8a@4TgvcwSo
zGEKVpriYb@zX$KqFIBGkM)6j&pK^IqD&F$x1$kL2aD(Qs*`f}JIz9`oV2rVsh~;_j
z$eAB7YeMb0J`oGwO?@bTiOIN!&!fo1E49Vwm0wumBm`ntlp;`$!d{ADs4ikLK5T+k
zQNwt-*qKD#1aF^b;VtW1WCR5C=3p$UQtx@xtF$+sy%zAfAcEI@Ha_{G2?{=7mc)Sa
za0UV2O7`Av5tqlzaI4X&Z@TW7l)|%iRX!rjH-1)=Av(+B*UJ4LmYIuavTl^RG4E`t
zOsw`-ey2y!e^sls?3*piSHGt0;@AXt21f)DiF6@fP8!ls6W&tTmlgQ{V!jwXr+=ly
z@97gFk--w4p(9&XH1WI4K7F~8(!FHLm8vXYe_%Hk(f64=O`#r6M4N}CwgOv%~|ji_bBs-Uiy1TL+Fk*PmE8A3J<;<#muH>hrY;)r#h@D=&9K^eJ;GRRb6vv8B+)
zu)MM5VY|+Wj56HV!EX5lbh>Du92K|;m?EW~mnT{Y
zY^jnl3Z47zwmFLBDGxkdvhg3P)ISVnqGcr#KBb~t{5qoI
zs%3r(62~9SsWx1&QmTQ}JViy_LnS!$Mf-exK-RX<57)KckF`S^Ouz^Ai^z7B_t#ua
zLqrB<>1DHCi1ZcP>Gq?+&iYXb)ZnPQKHf9g;$_Bl7(=Q*5E5*-8zAT2?`G^{%;#K5L&>d?{j9
z9m^Ve|3xGJq=J8|6%l>?^q@G1P~2|#d=KWgQ%a=yOgv|vD-OmKg-f(d11*raE(3e&
z9^f7TH(a$8>w8%WV_h*RDgyA`)xL7|w(F+BX*tX&^=sRbIC!r`0$!u~U5_f}{GAA2
zkBXJSRKAn>0|2VZZ4ND9?!_u>DjGfbxfjIXckdrsRA?p%l3UU1tUm9jEM``gg
zg6W53R343erUDqnlPKLEqI+to_^q!4tVPJnKXtUI6h0LL9TLbh^-?d?B*AVE*BMqF
zLwhhQp?O*$cFgjG-=AlQoA}W_IUn9EKhJvrwtAS3ly9n&DR{(z6y?5E*L1k}k#Utm
z`8ZhO;(>%Y*%AojGH`V)G`DQB(t8ef%jQboj{A#hn9&y%^T9NzGZTLUZ0BCXl@;K6YCNl!|!}d3r)3Qib(kVs^aT
zNj6KfS>}kyPb06IS3Rct;F92+CTi`{o73i8SGMP+nClPsEg=sPf-YsOCJ9mh>$a8SL5w7p9=hH3IzS9=lzei*+SNhtR+dS
zi_p0+^h9FNbxzjk9BFr7yW#nE>|!vaJrwjWvWt!sgg8sOa6DZ(f1h_UV;PIj^7jSK
zFlfn{k6=Qv-b=px$H(ZE@}o8+XTUPyRU%#-l@&M=+J64z=RZz`F@|9H^0#o?0w>;H
zsrnxGF(~_CRR!M1sj;6>X5*j4-W+A_J-JTWS8PMnKC$xh#rp^3BY@Hrp)N6l62_?e#0xwzFCK
zJ-lZ^5p()4Ofw@%(J63&zx>VAs|Fw8myDs$jPwZmiGqs9#FixbG53|GR+ar86diRl
zXw~4)skHr7q?*h_z%*0q$fdAO`%*`3fq1Pg@xzn0)#23~!7`c4Cq<{BuUk#7+x|}e
zjP+%&IaAD%JdIP~cq4$T?i~}zD!F(kj#vJjetd(^Qh}=Sb@9_0-NKAo$MLmz(x9YyD5#j;+~EtX|`RNa^E#PAivo
z;-bgcO>f+