From 713dd2cd90fb8256d786cefd5304daa9a0775764 Mon Sep 17 00:00:00 2001
From: xxl <505279206@qq.com>
Date: Tue, 31 Dec 2024 11:47:03 +0800
Subject: [PATCH] first commit
---
README.md | 675 +++++++-
added_tokens.json | 11 +
config.json | 145 ++
configuration.json | 1 +
configuration_intern_vit.py | 120 ++
configuration_internlm2.py | 150 ++
configuration_internvl_chat.py | 96 ++
conversation.py | 391 +++++
eval_llm_benchmark.log | 61 +
examples/image1.jpg | Bin 0 -> 78073 bytes
examples/image2.jpg | Bin 0 -> 125656 bytes
examples/red-panda.mp4 | Bin 0 -> 1867237 bytes
generation_config.json | 8 +
model-00001-of-00004.safetensors | 3 +
model-00002-of-00004.safetensors | 3 +
model-00003-of-00004.safetensors | 3 +
model-00004-of-00004.safetensors | 3 +
model.safetensors.index.json | 580 +++++++
modeling_intern_vit.py | 430 +++++
modeling_internlm2.py | 1415 +++++++++++++++++
modeling_internvl_chat.py | 349 ++++
preprocessor_config.json | 19 +
...ents.1731928182.HOST-10-140-60-23.129254.0 | 3 +
special_tokens_map.json | 47 +
tokenization_internlm2.py | 235 +++
tokenization_internlm2_fast.py | 211 +++
tokenizer.model | 3 +
tokenizer_config.json | 179 +++
28 files changed, 5139 insertions(+), 2 deletions(-)
create mode 100644 added_tokens.json
create mode 100644 config.json
create mode 100644 configuration.json
create mode 100644 configuration_intern_vit.py
create mode 100644 configuration_internlm2.py
create mode 100644 configuration_internvl_chat.py
create mode 100644 conversation.py
create mode 100644 eval_llm_benchmark.log
create mode 100644 examples/image1.jpg
create mode 100644 examples/image2.jpg
create mode 100644 examples/red-panda.mp4
create mode 100644 generation_config.json
create mode 100644 model-00001-of-00004.safetensors
create mode 100644 model-00002-of-00004.safetensors
create mode 100644 model-00003-of-00004.safetensors
create mode 100644 model-00004-of-00004.safetensors
create mode 100644 model.safetensors.index.json
create mode 100644 modeling_intern_vit.py
create mode 100644 modeling_internlm2.py
create mode 100644 modeling_internvl_chat.py
create mode 100644 preprocessor_config.json
create mode 100644 runs/Nov18_19-03-50_HOST-10-140-60-23/events.out.tfevents.1731928182.HOST-10-140-60-23.129254.0
create mode 100644 special_tokens_map.json
create mode 100644 tokenization_internlm2.py
create mode 100644 tokenization_internlm2_fast.py
create mode 100644 tokenizer.model
create mode 100644 tokenizer_config.json
diff --git a/README.md b/README.md
index 2ceb821..35e30c4 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,674 @@
-# InternVL2_5-8B_a14123014239023104738976
+---
+license: mit
+pipeline_tag: image-text-to-text
+library_name: transformers
+base_model:
+ - OpenGVLab/InternViT-300M-448px-V2_5
+ - internlm/internlm2_5-7b-chat
+base_model_relation: merge
+language:
+ - multilingual
+tags:
+ - internvl
+ - custom_code
+---
-InternVL2_5-8B
\ No newline at end of file
+# InternVL2_5-8B
+
+[\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[📜 InternVL 1.0\]](https://huggingface.co/papers/2312.14238) [\[📜 InternVL 1.5\]](https://huggingface.co/papers/2404.16821) [\[📜 Mini-InternVL\]](https://arxiv.org/abs/2410.16261) [\[📜 InternVL 2.5\]](https://huggingface.co/papers/2412.05271)
+
+[\[🆕 Blog\]](https://internvl.github.io/blog/) [\[🗨️ Chat Demo\]](https://internvl.opengvlab.com/) [\[🤗 HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[🚀 Quick Start\]](#quick-start) [\[📖 Documents\]](https://internvl.readthedocs.io/en/latest/)
+
+
+
+
+
+## Introduction
+
+We are excited to introduce **InternVL 2.5**, an advanced multimodal large language model (MLLM) series that builds upon InternVL 2.0, maintaining its core model architecture while introducing significant enhancements in training and testing strategies as well as data quality.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/5HDAGOQOZvS1EtI107Ac-.png)
+
+## InternVL 2.5 Family
+
+In the following table, we provide an overview of the InternVL 2.5 series.
+
+| Model Name | Vision Part | Language Part | HF Link |
+| :-------------: | :-------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------: |
+| InternVL2_5-1B | [InternViT-300M-448px-V2_5](https://huggingface.co/OpenGVLab/InternViT-300M-448px-V2_5) | [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) | [🤗 link](https://huggingface.co/OpenGVLab/InternVL2_5-1B) |
+| InternVL2_5-2B | [InternViT-300M-448px-V2_5](https://huggingface.co/OpenGVLab/InternViT-300M-448px-V2_5) | [internlm2_5-1_8b-chat](https://huggingface.co/internlm/internlm2_5-1_8b-chat) | [🤗 link](https://huggingface.co/OpenGVLab/InternVL2_5-2B) |
+| InternVL2_5-4B | [InternViT-300M-448px-V2_5](https://huggingface.co/OpenGVLab/InternViT-300M-448px-V2_5) | [Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct) | [🤗 link](https://huggingface.co/OpenGVLab/InternVL2_5-4B) |
+| InternVL2_5-8B | [InternViT-300M-448px-V2_5](https://huggingface.co/OpenGVLab/InternViT-300M-448px-V2_5) | [internlm2_5-7b-chat](https://huggingface.co/internlm/internlm2_5-7b-chat) | [🤗 link](https://huggingface.co/OpenGVLab/InternVL2_5-8B) |
+| InternVL2_5-26B | [InternViT-6B-448px-V2_5](https://huggingface.co/OpenGVLab/InternViT-6B-448px-V2_5) | [internlm2_5-20b-chat](https://huggingface.co/internlm/internlm2_5-20b-chat) | [🤗 link](https://huggingface.co/OpenGVLab/InternVL2_5-26B) |
+| InternVL2_5-38B | [InternViT-6B-448px-V2_5](https://huggingface.co/OpenGVLab/InternViT-6B-448px-V2_5) | [Qwen2.5-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) | [🤗 link](https://huggingface.co/OpenGVLab/InternVL2_5-38B) |
+| InternVL2_5-78B | [InternViT-6B-448px-V2_5](https://huggingface.co/OpenGVLab/InternViT-6B-448px-V2_5) | [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) | [🤗 link](https://huggingface.co/OpenGVLab/InternVL2_5-78B) |
+
+## Model Architecture
+
+As shown in the following figure, InternVL 2.5 retains the same model architecture as its predecessors, InternVL 1.5 and 2.0, following the "ViT-MLP-LLM" paradigm. In this new version, we integrate a newly incrementally pre-trained InternViT with various pre-trained LLMs, including InternLM 2.5 and Qwen 2.5, using a randomly initialized MLP projector.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/BiiyXN6NOk0p-3rl3ueyL.png)
+
+As in the previous version, we applied a pixel unshuffle operation, reducing the number of visual tokens to one-quarter of the original. Besides, we adopted a similar dynamic resolution strategy as InternVL 1.5, dividing images into tiles of 448×448 pixels. The key difference, starting from InternVL 2.0, is that we additionally introduced support for multi-image and video data.
+
+## Training Strategy
+
+### Dynamic High-Resolution for Multimodal Data
+
+In InternVL 2.0 and 2.5, we extend the dynamic high-resolution training approach, enhancing its capabilities to handle multi-image and video datasets.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/xoMY6rwRrNxbAGYPNyU8g.png)
+
+- For single-image datasets, the total number of tiles `n_max` are allocated to a single image for maximum resolution. Visual tokens are enclosed in `` and `` tags.
+
+- For multi-image datasets, the total number of tiles `n_max` are distributed across all images in a sample. Each image is labeled with auxiliary tags like `Image-1` and enclosed in `` and `` tags.
+
+- For videos, each frame is resized to 448×448. Frames are labeled with tags like `Frame-1` and enclosed in `` and `` tags, similar to images.
+
+### Single Model Training Pipeline
+
+The training pipeline for a single model in InternVL 2.5 is structured across three stages, designed to enhance the model's visual perception and multimodal capabilities.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/5NduZeCPLgPJTFr0RGTq3.png)
+
+- **Stage 1: MLP Warmup.** In this stage, only the MLP projector is trained while the vision encoder and language model are frozen. A dynamic high-resolution training strategy is applied for better performance, despite increased cost. This phase ensures robust cross-modal alignment and prepares the model for stable multimodal training.
+
+- **Stage 1.5: ViT Incremental Learning (Optional).** This stage allows incremental training of the vision encoder and MLP projector using the same data as Stage 1. It enhances the encoder’s ability to handle rare domains like multilingual OCR and mathematical charts. Once trained, the encoder can be reused across LLMs without retraining, making this stage optional unless new domains are introduced.
+
+- **Stage 2: Full Model Instruction Tuning.** The entire model is trained on high-quality multimodal instruction datasets. Strict data quality controls are enforced to prevent degradation of the LLM, as noisy data can cause issues like repetitive or incorrect outputs. After this stage, the training process is complete.
+
+### Progressive Scaling Strategy
+
+We introduce a progressive scaling strategy to align the vision encoder with LLMs efficiently. This approach trains with smaller LLMs first (e.g., 20B) to optimize foundational visual capabilities and cross-modal alignment before transferring the vision encoder to larger LLMs (e.g., 72B) without retraining. This reuse skips intermediate stages for larger models.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64006c09330a45b03605bba3/UoNUyS7ctN5pBxNv9KnzH.png)
+
+Compared to Qwen2-VL's 1.4 trillion tokens, InternVL2.5-78B uses only 120 billion tokens—less than one-tenth. This strategy minimizes redundancy, maximizes pre-trained component reuse, and enables efficient training for complex vision-language tasks.
+
+### Training Enhancements
+
+To improve real-world adaptability and performance, we introduce two key techniques:
+
+- **Random JPEG Compression**: Random JPEG compression with quality levels between 75 and 100 is applied as a data augmentation technique. This simulates image degradation from internet sources, enhancing the model's robustness to noisy images.
+
+- **Loss Reweighting**: To balance the NTP loss across responses of different lengths, we use a reweighting strategy called **square averaging**. This method balances contributions from responses of varying lengths, mitigating biases toward longer or shorter responses.
+
+### Data Organization
+
+#### Dataset Configuration
+
+In InternVL 2.0 and 2.5, the organization of the training data is controlled by several key parameters to optimize the balance and distribution of datasets during training.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/2LJe24b1ua3gjI9gDitVl.png)
+
+- **Data Augmentation:** JPEG compression is applied conditionally: enabled for image datasets to enhance robustness and disabled for video datasets to maintain consistent frame quality.
+
+- **Maximum Tile Number:** The parameter `n_max` controls the maximum tiles per dataset. For example, higher values (24–36) are used for multi-image or high-resolution data, lower values (6–12) for standard images, and 1 for videos.
+
+- **Repeat Factor:** The repeat factor `r` adjusts dataset sampling frequency. Values below 1 reduce a dataset's weight, while values above 1 increase it. This ensures balanced training across tasks and prevents overfitting or underfitting.
+
+#### Data Filtering Pipeline
+
+During development, we found that LLMs are highly sensitive to data noise, with even small anomalies—like outliers or repetitive data—causing abnormal behavior during inference. Repetitive generation, especially in long-form or CoT reasoning tasks, proved particularly harmful.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/aka8ZRiKF3ajdyZBnNFZI.png)
+
+To address this challenge and support future research, we designed an efficient data filtering pipeline to remove low-quality samples.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/70l1UxnX-Arn0NoOGwpth.png)
+
+The pipeline includes two modules, for **pure-text data**, three key strategies are used:
+
+1. **LLM-Based Quality Scoring**: Each sample is scored (0–10) using a pre-trained LLM with domain-specific prompts. Samples scoring below a threshold (e.g., 7) are removed to ensure high-quality data.
+2. **Repetition Detection**: Repetitive samples are flagged using LLM-based prompts and manually reviewed. Samples scoring below a stricter threshold (e.g., 3) are excluded to avoid repetitive patterns.
+3. **Heuristic Rule-Based Filtering**: Anomalies like abnormal sentence lengths or duplicate lines are detected using rules. Flagged samples undergo manual verification to ensure accuracy before removal.
+
+For **multimodal data**, two strategies are used:
+
+1. **Repetition Detection**: Repetitive samples in non-academic datasets are flagged and manually reviewed to prevent pattern loops. High-quality datasets are exempt from this process.
+2. **Heuristic Rule-Based Filtering**: Similar rules are applied to detect visual anomalies, with flagged data verified manually to maintain integrity.
+
+#### Training Data
+
+As shown in the following figure, from InternVL 1.5 to 2.0 and then to 2.5, the fine-tuning data mixture has undergone iterative improvements in scale, quality, and diversity. For more information about the training data, please refer to our technical report.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/GaTY9Lde02YzclASMthDa.png)
+
+## Evaluation on Multimodal Capability
+
+### Multimodal Reasoning and Mathematics
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/ihFWMRHbF0lpFTkLqnnj1.png)
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/Nrzq0kjlitjp_jrJCqtwX.png)
+
+### OCR, Chart, and Document Understanding
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/3yCMoLjlbsqY7ZJViGzih.png)
+
+### Multi-Image & Real-World Comprehension
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/DSnalmEyhDVQ9GE0GPCla.png)
+
+### Comprehensive Multimodal & Hallucination Evaluation
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/Z7Raj3TGDiV1H81pDHtoG.png)
+
+### Visual Grounding
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/lPcIrng8MPSg_PM1hpDPt.png)
+
+### Multimodal Multilingual Understanding
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/BPpbAOX36RV8RTnm3j-gs.png)
+
+### Video Understanding
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64006c09330a45b03605bba3/tcwH-i1qc8H16En-7AZ5M.png)
+
+## Evaluation on Language Capability
+
+Training InternVL 2.0 models led to a decline in pure language capabilities. InternVL 2.5 addresses this by collecting more high-quality open-source data and filtering out low-quality data, achieving better preservation of pure language performance.
+
+![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/mxuSKvSY-kfI8zePpXj6y.png)
+
+## Quick Start
+
+We provide an example code to run `InternVL2_5-8B` using `transformers`.
+
+> Please use transformers>=4.37.2 to ensure the model works normally.
+
+### Model Loading
+
+#### 16-bit (bf16 / fp16)
+
+```python
+import torch
+from transformers import AutoTokenizer, AutoModel
+path = "OpenGVLab/InternVL2_5-8B"
+model = AutoModel.from_pretrained(
+ path,
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ use_flash_attn=True,
+ trust_remote_code=True).eval().cuda()
+```
+
+#### BNB 8-bit Quantization
+
+```python
+import torch
+from transformers import AutoTokenizer, AutoModel
+path = "OpenGVLab/InternVL2_5-8B"
+model = AutoModel.from_pretrained(
+ path,
+ torch_dtype=torch.bfloat16,
+ load_in_8bit=True,
+ low_cpu_mem_usage=True,
+ use_flash_attn=True,
+ trust_remote_code=True).eval()
+```
+
+#### Multiple GPUs
+
+The reason for writing the code this way is to avoid errors that occur during multi-GPU inference due to tensors not being on the same device. By ensuring that the first and last layers of the large language model (LLM) are on the same device, we prevent such errors.
+
+```python
+import math
+import torch
+from transformers import AutoTokenizer, AutoModel
+
+def split_model(model_name):
+ device_map = {}
+ world_size = torch.cuda.device_count()
+ num_layers = {
+ 'InternVL2_5-1B': 24, 'InternVL2_5-2B': 24, 'InternVL2_5-4B': 36, 'InternVL2_5-8B': 32,
+ 'InternVL2_5-26B': 48, 'InternVL2_5-38B': 64, 'InternVL2_5-78B': 80}[model_name]
+ # Since the first GPU will be used for ViT, treat it as half a GPU.
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
+ layer_cnt = 0
+ for i, num_layer in enumerate(num_layers_per_gpu):
+ for j in range(num_layer):
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
+ layer_cnt += 1
+ device_map['vision_model'] = 0
+ device_map['mlp1'] = 0
+ device_map['language_model.model.tok_embeddings'] = 0
+ device_map['language_model.model.embed_tokens'] = 0
+ device_map['language_model.output'] = 0
+ device_map['language_model.model.norm'] = 0
+ device_map['language_model.lm_head'] = 0
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
+
+ return device_map
+
+path = "OpenGVLab/InternVL2_5-8B"
+device_map = split_model('InternVL2_5-8B')
+model = AutoModel.from_pretrained(
+ path,
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ use_flash_attn=True,
+ trust_remote_code=True,
+ device_map=device_map).eval()
+```
+
+### Inference with Transformers
+
+```python
+import numpy as np
+import torch
+import torchvision.transforms as T
+from decord import VideoReader, cpu
+from PIL import Image
+from torchvision.transforms.functional import InterpolationMode
+from transformers import AutoModel, AutoTokenizer
+
+IMAGENET_MEAN = (0.485, 0.456, 0.406)
+IMAGENET_STD = (0.229, 0.224, 0.225)
+
+def build_transform(input_size):
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
+ transform = T.Compose([
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
+ T.ToTensor(),
+ T.Normalize(mean=MEAN, std=STD)
+ ])
+ return transform
+
+def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
+ best_ratio_diff = float('inf')
+ best_ratio = (1, 1)
+ area = width * height
+ for ratio in target_ratios:
+ target_aspect_ratio = ratio[0] / ratio[1]
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
+ if ratio_diff < best_ratio_diff:
+ best_ratio_diff = ratio_diff
+ best_ratio = ratio
+ elif ratio_diff == best_ratio_diff:
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
+ best_ratio = ratio
+ return best_ratio
+
+def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
+ orig_width, orig_height = image.size
+ aspect_ratio = orig_width / orig_height
+
+ # calculate the existing image aspect ratio
+ target_ratios = set(
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
+ i * j <= max_num and i * j >= min_num)
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
+
+ # find the closest aspect ratio to the target
+ target_aspect_ratio = find_closest_aspect_ratio(
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
+
+ # calculate the target width and height
+ target_width = image_size * target_aspect_ratio[0]
+ target_height = image_size * target_aspect_ratio[1]
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
+
+ # resize the image
+ resized_img = image.resize((target_width, target_height))
+ processed_images = []
+ for i in range(blocks):
+ box = (
+ (i % (target_width // image_size)) * image_size,
+ (i // (target_width // image_size)) * image_size,
+ ((i % (target_width // image_size)) + 1) * image_size,
+ ((i // (target_width // image_size)) + 1) * image_size
+ )
+ # split the image
+ split_img = resized_img.crop(box)
+ processed_images.append(split_img)
+ assert len(processed_images) == blocks
+ if use_thumbnail and len(processed_images) != 1:
+ thumbnail_img = image.resize((image_size, image_size))
+ processed_images.append(thumbnail_img)
+ return processed_images
+
+def load_image(image_file, input_size=448, max_num=12):
+ image = Image.open(image_file).convert('RGB')
+ transform = build_transform(input_size=input_size)
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
+ pixel_values = [transform(image) for image in images]
+ pixel_values = torch.stack(pixel_values)
+ return pixel_values
+
+# If you want to load a model using multiple GPUs, please refer to the `Multiple GPUs` section.
+path = 'OpenGVLab/InternVL2_5-8B'
+model = AutoModel.from_pretrained(
+ path,
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ use_flash_attn=True,
+ trust_remote_code=True).eval().cuda()
+tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
+
+# set the max number of tiles in `max_num`
+pixel_values = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
+generation_config = dict(max_new_tokens=1024, do_sample=True)
+
+# pure-text conversation (纯文本对话)
+question = 'Hello, who are you?'
+response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+question = 'Can you tell me a story?'
+response, history = model.chat(tokenizer, None, question, generation_config, history=history, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+# single-image single-round conversation (单图单轮对话)
+question = '\nPlease describe the image shortly.'
+response = model.chat(tokenizer, pixel_values, question, generation_config)
+print(f'User: {question}\nAssistant: {response}')
+
+# single-image multi-round conversation (单图多轮对话)
+question = '\nPlease describe the image in detail.'
+response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+question = 'Please write a poem according to the image.'
+response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+# multi-image multi-round conversation, combined images (多图多轮对话,拼接图像)
+pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
+pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
+pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
+
+question = '\nDescribe the two images in detail.'
+response, history = model.chat(tokenizer, pixel_values, question, generation_config,
+ history=None, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+question = 'What are the similarities and differences between these two images.'
+response, history = model.chat(tokenizer, pixel_values, question, generation_config,
+ history=history, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+# multi-image multi-round conversation, separate images (多图多轮对话,独立图像)
+pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
+pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
+pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
+num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
+
+question = 'Image-1: \nImage-2: \nDescribe the two images in detail.'
+response, history = model.chat(tokenizer, pixel_values, question, generation_config,
+ num_patches_list=num_patches_list,
+ history=None, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+question = 'What are the similarities and differences between these two images.'
+response, history = model.chat(tokenizer, pixel_values, question, generation_config,
+ num_patches_list=num_patches_list,
+ history=history, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+# batch inference, single image per sample (单图批处理)
+pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
+pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
+num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
+pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
+
+questions = ['\nDescribe the image in detail.'] * len(num_patches_list)
+responses = model.batch_chat(tokenizer, pixel_values,
+ num_patches_list=num_patches_list,
+ questions=questions,
+ generation_config=generation_config)
+for question, response in zip(questions, responses):
+ print(f'User: {question}\nAssistant: {response}')
+
+# video multi-round conversation (视频多轮对话)
+def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
+ if bound:
+ start, end = bound[0], bound[1]
+ else:
+ start, end = -100000, 100000
+ start_idx = max(first_idx, round(start * fps))
+ end_idx = min(round(end * fps), max_frame)
+ seg_size = float(end_idx - start_idx) / num_segments
+ frame_indices = np.array([
+ int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
+ for idx in range(num_segments)
+ ])
+ return frame_indices
+
+def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
+ max_frame = len(vr) - 1
+ fps = float(vr.get_avg_fps())
+
+ pixel_values_list, num_patches_list = [], []
+ transform = build_transform(input_size=input_size)
+ frame_indices = get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
+ for frame_index in frame_indices:
+ img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
+ img = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
+ pixel_values = [transform(tile) for tile in img]
+ pixel_values = torch.stack(pixel_values)
+ num_patches_list.append(pixel_values.shape[0])
+ pixel_values_list.append(pixel_values)
+ pixel_values = torch.cat(pixel_values_list)
+ return pixel_values, num_patches_list
+
+video_path = './examples/red-panda.mp4'
+pixel_values, num_patches_list = load_video(video_path, num_segments=8, max_num=1)
+pixel_values = pixel_values.to(torch.bfloat16).cuda()
+video_prefix = ''.join([f'Frame{i+1}: \n' for i in range(len(num_patches_list))])
+question = video_prefix + 'What is the red panda doing?'
+# Frame1: \nFrame2: \n...\nFrame8: \n{question}
+response, history = model.chat(tokenizer, pixel_values, question, generation_config,
+ num_patches_list=num_patches_list, history=None, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+
+question = 'Describe this video in detail.'
+response, history = model.chat(tokenizer, pixel_values, question, generation_config,
+ num_patches_list=num_patches_list, history=history, return_history=True)
+print(f'User: {question}\nAssistant: {response}')
+```
+
+#### Streaming Output
+
+Besides this method, you can also use the following code to get streamed output.
+
+```python
+from transformers import TextIteratorStreamer
+from threading import Thread
+
+# Initialize the streamer
+streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=10)
+# Define the generation configuration
+generation_config = dict(max_new_tokens=1024, do_sample=False, streamer=streamer)
+# Start the model chat in a separate thread
+thread = Thread(target=model.chat, kwargs=dict(
+ tokenizer=tokenizer, pixel_values=pixel_values, question=question,
+ history=None, return_history=False, generation_config=generation_config,
+))
+thread.start()
+
+# Initialize an empty string to store the generated text
+generated_text = ''
+# Loop through the streamer to get the new text as it is generated
+for new_text in streamer:
+ if new_text == model.conv_template.sep:
+ break
+ generated_text += new_text
+ print(new_text, end='', flush=True) # Print each new chunk of generated text on the same line
+```
+
+## Finetune
+
+Many repositories now support fine-tuning of the InternVL series models, including [InternVL](https://github.com/OpenGVLab/InternVL), [SWIFT](https://github.com/modelscope/ms-swift), [XTurner](https://github.com/InternLM/xtuner), and others. Please refer to their documentation for more details on fine-tuning.
+
+## Deployment
+
+### LMDeploy
+
+LMDeploy is a toolkit for compressing, deploying, and serving LLMs & VLMs.
+
+```sh
+pip install lmdeploy>=0.6.4
+```
+
+LMDeploy abstracts the complex inference process of multi-modal Vision-Language Models (VLM) into an easy-to-use pipeline, similar to the Large Language Model (LLM) inference pipeline.
+
+#### A 'Hello, world' Example
+
+```python
+from lmdeploy import pipeline, TurbomindEngineConfig
+from lmdeploy.vl import load_image
+
+model = 'OpenGVLab/InternVL2_5-8B'
+image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
+pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
+response = pipe(('describe this image', image))
+print(response.text)
+```
+
+If `ImportError` occurs while executing this case, please install the required dependency packages as prompted.
+
+#### Multi-images Inference
+
+When dealing with multiple images, you can put them all in one list. Keep in mind that multiple images will lead to a higher number of input tokens, and as a result, the size of the context window typically needs to be increased.
+
+```python
+from lmdeploy import pipeline, TurbomindEngineConfig
+from lmdeploy.vl import load_image
+from lmdeploy.vl.constants import IMAGE_TOKEN
+
+model = 'OpenGVLab/InternVL2_5-8B'
+pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
+
+image_urls=[
+ 'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
+ 'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg'
+]
+
+images = [load_image(img_url) for img_url in image_urls]
+# Numbering images improves multi-image conversations
+response = pipe((f'Image-1: {IMAGE_TOKEN}\nImage-2: {IMAGE_TOKEN}\ndescribe these two images', images))
+print(response.text)
+```
+
+#### Batch Prompts Inference
+
+Conducting inference with batch prompts is quite straightforward; just place them within a list structure:
+
+```python
+from lmdeploy import pipeline, TurbomindEngineConfig
+from lmdeploy.vl import load_image
+
+model = 'OpenGVLab/InternVL2_5-8B'
+pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
+
+image_urls=[
+ "https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
+ "https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg"
+]
+prompts = [('describe this image', load_image(img_url)) for img_url in image_urls]
+response = pipe(prompts)
+print(response)
+```
+
+#### Multi-turn Conversation
+
+There are two ways to do the multi-turn conversations with the pipeline. One is to construct messages according to the format of OpenAI and use above introduced method, the other is to use the `pipeline.chat` interface.
+
+```python
+from lmdeploy import pipeline, TurbomindEngineConfig, GenerationConfig
+from lmdeploy.vl import load_image
+
+model = 'OpenGVLab/InternVL2_5-8B'
+pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
+
+image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
+gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
+sess = pipe.chat(('describe this image', image), gen_config=gen_config)
+print(sess.response.text)
+sess = pipe.chat('What is the woman doing?', session=sess, gen_config=gen_config)
+print(sess.response.text)
+```
+
+#### Service
+
+LMDeploy's `api_server` enables models to be easily packed into services with a single command. The provided RESTful APIs are compatible with OpenAI's interfaces. Below are an example of service startup:
+
+```shell
+lmdeploy serve api_server OpenGVLab/InternVL2_5-8B --server-port 23333
+```
+
+To use the OpenAI-style interface, you need to install OpenAI:
+
+```shell
+pip install openai
+```
+
+Then, use the code below to make the API call:
+
+```python
+from openai import OpenAI
+
+client = OpenAI(api_key='YOUR_API_KEY', base_url='http://0.0.0.0:23333/v1')
+model_name = client.models.list().data[0].id
+response = client.chat.completions.create(
+ model=model_name,
+ messages=[{
+ 'role':
+ 'user',
+ 'content': [{
+ 'type': 'text',
+ 'text': 'describe this image',
+ }, {
+ 'type': 'image_url',
+ 'image_url': {
+ 'url':
+ 'https://modelscope.oss-cn-beijing.aliyuncs.com/resource/tiger.jpeg',
+ },
+ }],
+ }],
+ temperature=0.8,
+ top_p=0.8)
+print(response)
+```
+
+## License
+
+This project is released under the MIT License. This project uses the pre-trained internlm2_5-7b-chat as a component, which is licensed under the Apache License 2.0.
+
+## Citation
+
+If you find this project useful in your research, please consider citing:
+
+```BibTeX
+@article{chen2024expanding,
+ title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
+ author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
+ journal={arXiv preprint arXiv:2412.05271},
+ year={2024}
+}
+@article{gao2024mini,
+ title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
+ author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
+ journal={arXiv preprint arXiv:2410.16261},
+ year={2024}
+}
+@article{chen2024far,
+ title={How Far Are We to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites},
+ author={Chen, Zhe and Wang, Weiyun and Tian, Hao and Ye, Shenglong and Gao, Zhangwei and Cui, Erfei and Tong, Wenwen and Hu, Kongzhi and Luo, Jiapeng and Ma, Zheng and others},
+ journal={arXiv preprint arXiv:2404.16821},
+ year={2024}
+}
+@inproceedings{chen2024internvl,
+ title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
+ author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
+ pages={24185--24198},
+ year={2024}
+}
+```
diff --git a/added_tokens.json b/added_tokens.json
new file mode 100644
index 0000000..35f5893
--- /dev/null
+++ b/added_tokens.json
@@ -0,0 +1,11 @@
+{
+ "": 92552,
+ "": 92545,
+ "": 92548,
+ "": 92550,
+ "": 92546,
+ "": 92551,
+ "": 92544,
+ "": 92547,
+ "[": 92549
+}
diff --git a/config.json b/config.json
new file mode 100644
index 0000000..90be7fc
--- /dev/null
+++ b/config.json
@@ -0,0 +1,145 @@
+{
+ "_commit_hash": null,
+ "architectures": [
+ "InternVLChatModel"
+ ],
+ "auto_map": {
+ "AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
+ "AutoModel": "modeling_internvl_chat.InternVLChatModel",
+ "AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
+ },
+ "downsample_ratio": 0.5,
+ "dynamic_image_size": true,
+ "force_image_size": 448,
+ "llm_config": {
+ "_name_or_path": "internlm/internlm2_5-7b-chat",
+ "add_cross_attention": false,
+ "architectures": [
+ "InternLM2ForCausalLM"
+ ],
+ "attn_implementation": "flash_attention_2",
+ "auto_map": {
+ "AutoConfig": "configuration_internlm2.InternLM2Config",
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
+ "AutoModelForSequenceClassification": "modeling_internlm2.InternLM2ForSequenceClassification"
+ },
+ "bad_words_ids": null,
+ "begin_suppress_tokens": null,
+ "bias": false,
+ "bos_token_id": 1,
+ "chunk_size_feed_forward": 0,
+ "cross_attention_hidden_size": null,
+ "decoder_start_token_id": null,
+ "diversity_penalty": 0.0,
+ "do_sample": false,
+ "early_stopping": false,
+ "encoder_no_repeat_ngram_size": 0,
+ "eos_token_id": 2,
+ "exponential_decay_length_penalty": null,
+ "finetuning_task": null,
+ "forced_bos_token_id": null,
+ "forced_eos_token_id": null,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "id2label": {
+ "0": "LABEL_0",
+ "1": "LABEL_1"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "is_decoder": false,
+ "is_encoder_decoder": false,
+ "label2id": {
+ "LABEL_0": 0,
+ "LABEL_1": 1
+ },
+ "length_penalty": 1.0,
+ "max_length": 20,
+ "max_position_embeddings": 32768,
+ "min_length": 0,
+ "model_type": "internlm2",
+ "no_repeat_ngram_size": 0,
+ "num_attention_heads": 32,
+ "num_beam_groups": 1,
+ "num_beams": 1,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "num_return_sequences": 1,
+ "output_attentions": false,
+ "output_hidden_states": false,
+ "output_scores": false,
+ "pad_token_id": 2,
+ "prefix": null,
+ "pretraining_tp": 1,
+ "problem_type": null,
+ "pruned_heads": {},
+ "remove_invalid_values": false,
+ "repetition_penalty": 1.0,
+ "return_dict": true,
+ "return_dict_in_generate": false,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": {
+ "factor": 2.0,
+ "type": "dynamic"
+ },
+ "rope_theta": 1000000,
+ "sep_token_id": null,
+ "suppress_tokens": null,
+ "task_specific_params": null,
+ "temperature": 1.0,
+ "tf_legacy_loss": false,
+ "tie_encoder_decoder": false,
+ "tie_word_embeddings": false,
+ "tokenizer_class": null,
+ "top_k": 50,
+ "top_p": 1.0,
+ "torch_dtype": "bfloat16",
+ "torchscript": false,
+ "transformers_version": "4.37.2",
+ "typical_p": 1.0,
+ "use_bfloat16": true,
+ "use_cache": true,
+ "vocab_size": 92553
+ },
+ "max_dynamic_patch": 12,
+ "min_dynamic_patch": 1,
+ "model_type": "internvl_chat",
+ "ps_version": "v2",
+ "select_layer": -1,
+ "template": "internvl2_5",
+ "torch_dtype": "bfloat16",
+ "use_backbone_lora": 0,
+ "use_llm_lora": 0,
+ "use_thumbnail": true,
+ "vision_config": {
+ "architectures": [
+ "InternVisionModel"
+ ],
+ "attention_dropout": 0.0,
+ "drop_path_rate": 0.0,
+ "dropout": 0.0,
+ "hidden_act": "gelu",
+ "hidden_size": 1024,
+ "image_size": 448,
+ "initializer_factor": 1.0,
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "layer_norm_eps": 1e-06,
+ "model_type": "intern_vit_6b",
+ "norm_type": "layer_norm",
+ "num_attention_heads": 16,
+ "num_channels": 3,
+ "num_hidden_layers": 24,
+ "output_attentions": false,
+ "output_hidden_states": false,
+ "patch_size": 14,
+ "qk_normalization": false,
+ "qkv_bias": true,
+ "return_dict": true,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "use_bfloat16": true,
+ "use_flash_attn": true
+ }
+}
diff --git a/configuration.json b/configuration.json
new file mode 100644
index 0000000..4aef15d
--- /dev/null
+++ b/configuration.json
@@ -0,0 +1 @@
+{"framework": "pytorch", "task": "image-text-to-text", "allow_remote": true}
\ No newline at end of file
diff --git a/configuration_intern_vit.py b/configuration_intern_vit.py
new file mode 100644
index 0000000..7e630c4
--- /dev/null
+++ b/configuration_intern_vit.py
@@ -0,0 +1,120 @@
+# --------------------------------------------------------
+# InternVL
+# Copyright (c) 2024 OpenGVLab
+# Licensed under The MIT License [see LICENSE for details]
+# --------------------------------------------------------
+
+import os
+from typing import Union
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+
+class InternVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_channels (`int`, *optional*, defaults to 3):
+ Number of color channels in the input images (e.g., 3 for RGB).
+ patch_size (`int`, *optional*, defaults to 14):
+ The size (resolution) of each patch.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ qkv_bias (`bool`, *optional*, defaults to `False`):
+ Whether to add a bias to the queries and values in the self-attention layers.
+ hidden_size (`int`, *optional*, defaults to 3200):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_attention_heads (`int`, *optional*, defaults to 25):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 12800):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ qk_normalization (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the queries and keys in the self-attention layers.
+ num_hidden_layers (`int`, *optional*, defaults to 48):
+ Number of hidden layers in the Transformer encoder.
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
+ Whether to use flash attention mechanism.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
+ The epsilon used by the layer normalization layers.
+ dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ Dropout rate for stochastic depth.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 0.1):
+ A factor for layer scale.
+ """
+
+ model_type = 'intern_vit_6b'
+
+ def __init__(
+ self,
+ num_channels=3,
+ patch_size=14,
+ image_size=224,
+ qkv_bias=False,
+ hidden_size=3200,
+ num_attention_heads=25,
+ intermediate_size=12800,
+ qk_normalization=True,
+ num_hidden_layers=48,
+ use_flash_attn=True,
+ hidden_act='gelu',
+ norm_type='rms_norm',
+ layer_norm_eps=1e-6,
+ dropout=0.0,
+ drop_path_rate=0.0,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=0.1,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.dropout = dropout
+ self.drop_path_rate = drop_path_rate
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.image_size = image_size
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.attention_dropout = attention_dropout
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+ self.norm_type = norm_type
+ self.qkv_bias = qkv_bias
+ self.qk_normalization = qk_normalization
+ self.use_flash_attn = use_flash_attn
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ if 'vision_config' in config_dict:
+ config_dict = config_dict['vision_config']
+
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
diff --git a/configuration_internlm2.py b/configuration_internlm2.py
new file mode 100644
index 0000000..282b13b
--- /dev/null
+++ b/configuration_internlm2.py
@@ -0,0 +1,150 @@
+# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on transformers/src/transformers/models/llama/configuration_llama.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" InternLM2 model configuration"""
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
+
+
+# Modified from transformers.model.llama.configuration_llama.LlamaConfig
+class InternLM2Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`InternLM2Model`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ Example:
+
+ """
+ model_type = 'internlm2'
+ _auto_class = 'AutoConfig'
+
+ def __init__( # pylint: disable=W0102
+ self,
+ vocab_size=103168,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ hidden_act='silu',
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ tie_word_embeddings=False,
+ bias=True,
+ rope_theta=10000,
+ rope_scaling=None,
+ attn_implementation='eager',
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.bias = bias
+
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+ self.num_key_value_heads = num_key_value_heads
+
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_validation()
+
+ self.attn_implementation = attn_implementation
+ if self.attn_implementation is None:
+ self.attn_implementation = 'eager'
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
+ f'got {self.rope_scaling}'
+ )
+ rope_scaling_type = self.rope_scaling.get('type', None)
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
diff --git a/configuration_internvl_chat.py b/configuration_internvl_chat.py
new file mode 100644
index 0000000..56c628e
--- /dev/null
+++ b/configuration_internvl_chat.py
@@ -0,0 +1,96 @@
+# --------------------------------------------------------
+# InternVL
+# Copyright (c) 2024 OpenGVLab
+# Licensed under The MIT License [see LICENSE for details]
+# --------------------------------------------------------
+
+import copy
+
+from transformers import AutoConfig, LlamaConfig
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+
+from .configuration_intern_vit import InternVisionConfig
+from .configuration_internlm2 import InternLM2Config
+
+logger = logging.get_logger(__name__)
+
+
+class InternVLChatConfig(PretrainedConfig):
+ model_type = 'internvl_chat'
+ is_composition = True
+
+ def __init__(
+ self,
+ vision_config=None,
+ llm_config=None,
+ use_backbone_lora=0,
+ use_llm_lora=0,
+ select_layer=-1,
+ force_image_size=None,
+ downsample_ratio=0.5,
+ template=None,
+ dynamic_image_size=False,
+ use_thumbnail=False,
+ ps_version='v1',
+ min_dynamic_patch=1,
+ max_dynamic_patch=6,
+ **kwargs):
+ super().__init__(**kwargs)
+
+ if vision_config is None:
+ vision_config = {'architectures': ['InternVisionModel']}
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
+
+ if llm_config is None:
+ llm_config = {'architectures': ['InternLM2ForCausalLM']}
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
+
+ self.vision_config = InternVisionConfig(**vision_config)
+ if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
+ self.llm_config = LlamaConfig(**llm_config)
+ elif llm_config.get('architectures')[0] == 'InternLM2ForCausalLM':
+ self.llm_config = InternLM2Config(**llm_config)
+ else:
+ raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
+ self.use_backbone_lora = use_backbone_lora
+ self.use_llm_lora = use_llm_lora
+ self.select_layer = select_layer
+ self.force_image_size = force_image_size
+ self.downsample_ratio = downsample_ratio
+ self.template = template
+ self.dynamic_image_size = dynamic_image_size
+ self.use_thumbnail = use_thumbnail
+ self.ps_version = ps_version # pixel shuffle version
+ self.min_dynamic_patch = min_dynamic_patch
+ self.max_dynamic_patch = max_dynamic_patch
+
+ logger.info(f'vision_select_layer: {self.select_layer}')
+ logger.info(f'ps_version: {self.ps_version}')
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
+
+ def to_dict(self):
+ """
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
+
+ Returns:
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
+ """
+ output = copy.deepcopy(self.__dict__)
+ output['vision_config'] = self.vision_config.to_dict()
+ output['llm_config'] = self.llm_config.to_dict()
+ output['model_type'] = self.__class__.model_type
+ output['use_backbone_lora'] = self.use_backbone_lora
+ output['use_llm_lora'] = self.use_llm_lora
+ output['select_layer'] = self.select_layer
+ output['force_image_size'] = self.force_image_size
+ output['downsample_ratio'] = self.downsample_ratio
+ output['template'] = self.template
+ output['dynamic_image_size'] = self.dynamic_image_size
+ output['use_thumbnail'] = self.use_thumbnail
+ output['ps_version'] = self.ps_version
+ output['min_dynamic_patch'] = self.min_dynamic_patch
+ output['max_dynamic_patch'] = self.max_dynamic_patch
+
+ return output
diff --git a/conversation.py b/conversation.py
new file mode 100644
index 0000000..5a77176
--- /dev/null
+++ b/conversation.py
@@ -0,0 +1,391 @@
+"""
+Conversation prompt templates.
+
+We kindly request that you import fastchat instead of copying this file if you wish to use it.
+If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
+
+Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
+"""
+
+import dataclasses
+from enum import IntEnum, auto
+from typing import Dict, List, Tuple, Union
+
+
+class SeparatorStyle(IntEnum):
+ """Separator styles."""
+
+ ADD_COLON_SINGLE = auto()
+ ADD_COLON_TWO = auto()
+ ADD_COLON_SPACE_SINGLE = auto()
+ NO_COLON_SINGLE = auto()
+ NO_COLON_TWO = auto()
+ ADD_NEW_LINE_SINGLE = auto()
+ LLAMA2 = auto()
+ CHATGLM = auto()
+ CHATML = auto()
+ CHATINTERN = auto()
+ DOLLY = auto()
+ RWKV = auto()
+ PHOENIX = auto()
+ ROBIN = auto()
+ FALCON_CHAT = auto()
+ CHATGLM3 = auto()
+ INTERNVL_ZH = auto()
+ MPT = auto()
+
+
+@dataclasses.dataclass
+class Conversation:
+ """A class that manages prompt templates and keeps all conversation history."""
+
+ # The name of this template
+ name: str
+ # The template of the system prompt
+ system_template: str = '{system_message}'
+ # The system message
+ system_message: str = ''
+ # The names of two roles
+ roles: Tuple[str] = ('USER', 'ASSISTANT')
+ # All messages. Each item is (role, message).
+ messages: List[List[str]] = ()
+ # The number of few shot examples
+ offset: int = 0
+ # The separator style and configurations
+ sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
+ sep: str = '\n'
+ sep2: str = None
+ # Stop criteria (the default one is EOS token)
+ stop_str: Union[str, List[str]] = None
+ # Stops generation if meeting any token in this list
+ stop_token_ids: List[int] = None
+
+ def get_prompt(self) -> str:
+ """Get the prompt for generation."""
+ system_prompt = self.system_template.format(system_message=self.system_message)
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
+ ret = system_prompt + self.sep
+ for role, message in self.messages:
+ if message:
+ ret += role + ': ' + message + self.sep
+ else:
+ ret += role + ':'
+ return ret
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
+ seps = [self.sep, self.sep2]
+ ret = system_prompt + seps[0]
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + ': ' + message + seps[i % 2]
+ else:
+ ret += role + ':'
+ return ret
+ elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
+ ret = system_prompt + self.sep
+ for role, message in self.messages:
+ if message:
+ ret += role + ': ' + message + self.sep
+ else:
+ ret += role + ': ' # must be end with a space
+ return ret
+ elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
+ ret = '' if system_prompt == '' else system_prompt + self.sep
+ for role, message in self.messages:
+ if message:
+ ret += role + '\n' + message + self.sep
+ else:
+ ret += role + '\n'
+ return ret
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
+ ret = system_prompt
+ for role, message in self.messages:
+ if message:
+ ret += role + message + self.sep
+ else:
+ ret += role
+ return ret
+ elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
+ seps = [self.sep, self.sep2]
+ ret = system_prompt
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + message + seps[i % 2]
+ else:
+ ret += role
+ return ret
+ elif self.sep_style == SeparatorStyle.RWKV:
+ ret = system_prompt
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += (
+ role
+ + ': '
+ + message.replace('\r\n', '\n').replace('\n\n', '\n')
+ )
+ ret += '\n\n'
+ else:
+ ret += role + ':'
+ return ret
+ elif self.sep_style == SeparatorStyle.LLAMA2:
+ seps = [self.sep, self.sep2]
+ if self.system_message:
+ ret = system_prompt
+ else:
+ ret = '[INST] '
+ for i, (role, message) in enumerate(self.messages):
+ tag = self.roles[i % 2]
+ if message:
+ if i == 0:
+ ret += message + ' '
+ else:
+ ret += tag + ' ' + message + seps[i % 2]
+ else:
+ ret += tag
+ return ret
+ elif self.sep_style == SeparatorStyle.CHATGLM:
+ # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
+ # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
+ round_add_n = 1 if self.name == 'chatglm2' else 0
+ if system_prompt:
+ ret = system_prompt + self.sep
+ else:
+ ret = ''
+
+ for i, (role, message) in enumerate(self.messages):
+ if i % 2 == 0:
+ ret += f'[Round {i//2 + round_add_n}]{self.sep}'
+
+ if message:
+ ret += f'{role}:{message}{self.sep}'
+ else:
+ ret += f'{role}:'
+ return ret
+ elif self.sep_style == SeparatorStyle.CHATML:
+ ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
+ for role, message in self.messages:
+ if message:
+ ret += role + '\n' + message + self.sep + '\n'
+ else:
+ ret += role + '\n'
+ return ret
+ elif self.sep_style == SeparatorStyle.CHATGLM3:
+ ret = ''
+ if self.system_message:
+ ret += system_prompt
+ for role, message in self.messages:
+ if message:
+ ret += role + '\n' + ' ' + message
+ else:
+ ret += role
+ return ret
+ elif self.sep_style == SeparatorStyle.CHATINTERN:
+ # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
+ seps = [self.sep, self.sep2]
+ ret = system_prompt
+ for i, (role, message) in enumerate(self.messages):
+ # if i % 2 == 0:
+ # ret += "]"
+ if message:
+ ret += role + ':' + message + seps[i % 2] + '\n'
+ else:
+ ret += role + ':'
+ return ret
+ elif self.sep_style == SeparatorStyle.DOLLY:
+ seps = [self.sep, self.sep2]
+ ret = system_prompt
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + ':\n' + message + seps[i % 2]
+ if i % 2 == 1:
+ ret += '\n\n'
+ else:
+ ret += role + ':\n'
+ return ret
+ elif self.sep_style == SeparatorStyle.PHOENIX:
+ ret = system_prompt
+ for role, message in self.messages:
+ if message:
+ ret += role + ': ' + '' + message + ''
+ else:
+ ret += role + ': ' + ''
+ return ret
+ elif self.sep_style == SeparatorStyle.ROBIN:
+ ret = system_prompt + self.sep
+ for role, message in self.messages:
+ if message:
+ ret += role + ':\n' + message + self.sep
+ else:
+ ret += role + ':\n'
+ return ret
+ elif self.sep_style == SeparatorStyle.FALCON_CHAT:
+ ret = ''
+ if self.system_message:
+ ret += system_prompt + self.sep
+ for role, message in self.messages:
+ if message:
+ ret += role + ': ' + message + self.sep
+ else:
+ ret += role + ':'
+
+ return ret
+ elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
+ seps = [self.sep, self.sep2]
+ ret = self.system_message + seps[0]
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + ': ' + message + seps[i % 2]
+ else:
+ ret += role + ':'
+ return ret
+ elif self.sep_style == SeparatorStyle.MPT:
+ ret = system_prompt + self.sep
+ for role, message in self.messages:
+ if message:
+ if type(message) is tuple:
+ message, _, _ = message
+ ret += role + message + self.sep
+ else:
+ ret += role
+ return ret
+ else:
+ raise ValueError(f'Invalid style: {self.sep_style}')
+
+ def set_system_message(self, system_message: str):
+ """Set the system message."""
+ self.system_message = system_message
+
+ def append_message(self, role: str, message: str):
+ """Append a new message."""
+ self.messages.append([role, message])
+
+ def update_last_message(self, message: str):
+ """Update the last output.
+
+ The last message is typically set to be None when constructing the prompt,
+ so we need to update it in-place after getting the response from a model.
+ """
+ self.messages[-1][1] = message
+
+ def to_gradio_chatbot(self):
+ """Convert the conversation to gradio chatbot format."""
+ ret = []
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
+ if i % 2 == 0:
+ ret.append([msg, None])
+ else:
+ ret[-1][-1] = msg
+ return ret
+
+ def to_openai_api_messages(self):
+ """Convert the conversation to OpenAI chat completion format."""
+ ret = [{'role': 'system', 'content': self.system_message}]
+
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
+ if i % 2 == 0:
+ ret.append({'role': 'user', 'content': msg})
+ else:
+ if msg is not None:
+ ret.append({'role': 'assistant', 'content': msg})
+ return ret
+
+ def copy(self):
+ return Conversation(
+ name=self.name,
+ system_template=self.system_template,
+ system_message=self.system_message,
+ roles=self.roles,
+ messages=[[x, y] for x, y in self.messages],
+ offset=self.offset,
+ sep_style=self.sep_style,
+ sep=self.sep,
+ sep2=self.sep2,
+ stop_str=self.stop_str,
+ stop_token_ids=self.stop_token_ids,
+ )
+
+ def dict(self):
+ return {
+ 'template_name': self.name,
+ 'system_message': self.system_message,
+ 'roles': self.roles,
+ 'messages': self.messages,
+ 'offset': self.offset,
+ }
+
+
+# A global registry for all conversation templates
+conv_templates: Dict[str, Conversation] = {}
+
+
+def register_conv_template(template: Conversation, override: bool = False):
+ """Register a new conversation template."""
+ if not override:
+ assert (
+ template.name not in conv_templates
+ ), f'{template.name} has been registered.'
+
+ conv_templates[template.name] = template
+
+
+def get_conv_template(name: str) -> Conversation:
+ """Get a conversation template."""
+ return conv_templates[name].copy()
+
+
+# Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
+# is that during training, the preprocessing function for the Hermes-2 template doesn't add
+# at the beginning of the tokenized sequence, while the internlm2-chat template does.
+# Therefore, they are completely equivalent during inference.
+register_conv_template(
+ Conversation(
+ name='Hermes-2',
+ system_template='<|im_start|>system\n{system_message}',
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
+ sep_style=SeparatorStyle.MPT,
+ sep='<|im_end|>',
+ stop_str='<|endoftext|>',
+ )
+)
+
+
+register_conv_template(
+ Conversation(
+ name='internlm2-chat',
+ system_template='<|im_start|>system\n{system_message}',
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
+ sep_style=SeparatorStyle.MPT,
+ sep='<|im_end|>',
+ )
+)
+
+
+register_conv_template(
+ Conversation(
+ name='phi3-chat',
+ system_template='<|system|>\n{system_message}',
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
+ roles=('<|user|>\n', '<|assistant|>\n'),
+ sep_style=SeparatorStyle.MPT,
+ sep='<|end|>',
+ )
+)
+
+
+register_conv_template(
+ Conversation(
+ name='internvl2_5',
+ system_template='<|im_start|>system\n{system_message}',
+ system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
+ sep_style=SeparatorStyle.MPT,
+ sep='<|im_end|>\n',
+ )
+)
diff --git a/eval_llm_benchmark.log b/eval_llm_benchmark.log
new file mode 100644
index 0000000..bd17c0d
--- /dev/null
+++ b/eval_llm_benchmark.log
@@ -0,0 +1,61 @@
+/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl_eval2/lib/python3.10/site-packages/bitsandbytes/cextension.py:34: UserWarning: The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.
+ warn("The installed version of bitsandbytes was compiled without GPU support. "
+/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl_eval2/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so: undefined symbol: cadam32bit_grad_fp32
+model path is /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/work_dirs/internvl_chat_v2_5/internvl_chat_v2_5_internlm2_5_7b_dynamic_res_finetune_datav162
+11/19 10:57:39 - OpenCompass - WARNING - No previous results to reuse!
+11/19 10:57:39 - OpenCompass - INFO - Reusing experiements from 20241119_105739
+11/19 10:57:39 - OpenCompass - INFO - Current exp folder: /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/work_dirs/internvl_chat_v2_5/internvl_chat_v2_5_internlm2_5_7b_dynamic_res_finetune_datav162/20241119_105739
+11/19 10:57:42 - OpenCompass - INFO - Partitioned into 256 tasks.
+[ ] 0/256, elapsed: 0s, ETA:
[ ] 1/256, 0.0 task/s, elapsed: 356s, ETA: 90694s
[ ] 2/256, 0.0 task/s, elapsed: 375s, ETA: 47629s
[ ] 3/256, 0.0 task/s, elapsed: 375s, ETA: 31636s
[ ] 4/256, 0.0 task/s, elapsed: 381s, ETA: 24028s
[ ] 5/256, 0.0 task/s, elapsed: 391s, ETA: 19650s
[ ] 6/256, 0.0 task/s, elapsed: 394s, ETA: 16397s
[ ] 7/256, 0.0 task/s, elapsed: 395s, ETA: 14057s
[ ] 8/256, 0.0 task/s, elapsed: 395s, ETA: 12252s
[> ] 9/256, 0.0 task/s, elapsed: 396s, ETA: 10860s
[> ] 10/256, 0.0 task/s, elapsed: 397s, ETA: 9765s
[> ] 11/256, 0.0 task/s, elapsed: 397s, ETA: 8844s
[> ] 12/256, 0.0 task/s, elapsed: 397s, ETA: 8075s
[> ] 13/256, 0.0 task/s, elapsed: 400s, ETA: 7480s
[> ] 14/256, 0.0 task/s, elapsed: 403s, ETA: 6967s
[> ] 15/256, 0.0 task/s, elapsed: 405s, ETA: 6509s
[> ] 16/256, 0.0 task/s, elapsed: 406s, ETA: 6090s
[> ] 17/256, 0.0 task/s, elapsed: 407s, ETA: 5723s
[>> ] 18/256, 0.0 task/s, elapsed: 408s, ETA: 5390s
[>> ] 19/256, 0.0 task/s, elapsed: 408s, ETA: 5085s
[>> ] 20/256, 0.0 task/s, elapsed: 408s, ETA: 4811s
[>> ] 21/256, 0.1 task/s, elapsed: 410s, ETA: 4584s
[>> ] 22/256, 0.1 task/s, elapsed: 411s, ETA: 4371s
[>> ] 23/256, 0.1 task/s, elapsed: 417s, ETA: 4229s
[>> ] 24/256, 0.1 task/s, elapsed: 418s, ETA: 4037s
[>> ] 25/256, 0.1 task/s, elapsed: 419s, ETA: 3868s
[>>> ] 26/256, 0.1 task/s, elapsed: 420s, ETA: 3712s
[>>> ] 27/256, 0.1 task/s, elapsed: 420s, ETA: 3561s
[>>> ] 28/256, 0.1 task/s, elapsed: 426s, ETA: 3473s
[>>> ] 29/256, 0.1 task/s, elapsed: 427s, ETA: 3343s
[>>> ] 30/256, 0.1 task/s, elapsed: 431s, ETA: 3250s
[>>> ] 31/256, 0.1 task/s, elapsed: 433s, ETA: 3143s
[>>> ] 32/256, 0.1 task/s, elapsed: 434s, ETA: 3035s
[>>> ] 33/256, 0.1 task/s, elapsed: 435s, ETA: 2941s
[>>> ] 34/256, 0.1 task/s, elapsed: 438s, ETA: 2858s
[>>>> ] 35/256, 0.1 task/s, elapsed: 438s, ETA: 2764s
[>>>> ] 36/256, 0.1 task/s, elapsed: 438s, ETA: 2675s
[>>>> ] 37/256, 0.1 task/s, elapsed: 441s, ETA: 2609s
[>>>> ] 38/256, 0.1 task/s, elapsed: 442s, ETA: 2536s
[>>>> ] 39/256, 0.1 task/s, elapsed: 442s, ETA: 2460s
[>>>> ] 40/256, 0.1 task/s, elapsed: 443s, ETA: 2394s
[>>>> ] 41/256, 0.1 task/s, elapsed: 443s, ETA: 2325s
[>>>> ] 42/256, 0.1 task/s, elapsed: 446s, ETA: 2275s
[>>>>> ] 43/256, 0.1 task/s, elapsed: 447s, ETA: 2212s
[>>>>> ] 44/256, 0.1 task/s, elapsed: 448s, ETA: 2159s
[>>>>> ] 45/256, 0.1 task/s, elapsed: 450s, ETA: 2108s
[>>>>> ] 46/256, 0.1 task/s, elapsed: 450s, ETA: 2053s
[>>>>> ] 47/256, 0.1 task/s, elapsed: 450s, ETA: 2002s
[>>>>> ] 48/256, 0.1 task/s, elapsed: 451s, ETA: 1954s
[>>>>> ] 49/256, 0.1 task/s, elapsed: 453s, ETA: 1915s
[>>>>> ] 50/256, 0.1 task/s, elapsed: 457s, ETA: 1882s
[>>>>> ] 51/256, 0.1 task/s, elapsed: 458s, ETA: 1839s
[>>>>>> ] 52/256, 0.1 task/s, elapsed: 458s, ETA: 1796s
[>>>>>> ] 53/256, 0.1 task/s, elapsed: 459s, ETA: 1757s
[>>>>>> ] 54/256, 0.1 task/s, elapsed: 461s, ETA: 1725s
[>>>>>> ] 55/256, 0.1 task/s, elapsed: 462s, ETA: 1690s
[>>>>>> ] 56/256, 0.1 task/s, elapsed: 463s, ETA: 1652s
[>>>>>> ] 57/256, 0.1 task/s, elapsed: 463s, ETA: 1617s
[>>>>>> ] 58/256, 0.1 task/s, elapsed: 464s, ETA: 1583s
[>>>>>> ] 59/256, 0.1 task/s, elapsed: 466s, ETA: 1555s
[>>>>>>> ] 60/256, 0.1 task/s, elapsed: 466s, ETA: 1523s
[>>>>>>> ] 61/256, 0.1 task/s, elapsed: 467s, ETA: 1491s
[>>>>>>> ] 62/256, 0.1 task/s, elapsed: 467s, ETA: 1461s
[>>>>>>> ] 63/256, 0.1 task/s, elapsed: 467s, ETA: 1432s
[>>>>>>> ] 64/256, 0.1 task/s, elapsed: 470s, ETA: 1410s
[>>>>>>> ] 65/256, 0.1 task/s, elapsed: 470s, ETA: 1381s
[>>>>>>> ] 66/256, 0.1 task/s, elapsed: 470s, ETA: 1353s
[>>>>>>> ] 67/256, 0.1 task/s, elapsed: 471s, ETA: 1328s
[>>>>>>> ] 68/256, 0.1 task/s, elapsed: 471s, ETA: 1302s
[>>>>>>>> ] 69/256, 0.1 task/s, elapsed: 471s, ETA: 1278s
[>>>>>>>> ] 70/256, 0.1 task/s, elapsed: 472s, ETA: 1253s
[>>>>>>>> ] 71/256, 0.2 task/s, elapsed: 473s, ETA: 1233s
[>>>>>>>> ] 72/256, 0.2 task/s, elapsed: 475s, ETA: 1214s
[>>>>>>>> ] 73/256, 0.2 task/s, elapsed: 477s, ETA: 1196s
[>>>>>>>> ] 74/256, 0.2 task/s, elapsed: 478s, ETA: 1174s
[>>>>>>>> ] 75/256, 0.2 task/s, elapsed: 480s, ETA: 1159s
[>>>>>>>> ] 76/256, 0.2 task/s, elapsed: 481s, ETA: 1139s
[>>>>>>>>> ] 77/256, 0.2 task/s, elapsed: 482s, ETA: 1121s
[>>>>>>>>> ] 78/256, 0.2 task/s, elapsed: 483s, ETA: 1102s
[>>>>>>>>> ] 79/256, 0.2 task/s, elapsed: 483s, ETA: 1082s
[>>>>>>>>> ] 80/256, 0.2 task/s, elapsed: 484s, ETA: 1064s
[>>>>>>>>> ] 81/256, 0.2 task/s, elapsed: 484s, ETA: 1046s
[>>>>>>>>> ] 82/256, 0.2 task/s, elapsed: 485s, ETA: 1029s
[>>>>>>>>> ] 83/256, 0.2 task/s, elapsed: 485s, ETA: 1011s
[>>>>>>>>> ] 84/256, 0.2 task/s, elapsed: 485s, ETA: 994s
[>>>>>>>>> ] 85/256, 0.2 task/s, elapsed: 485s, ETA: 976s
[>>>>>>>>>> ] 86/256, 0.2 task/s, elapsed: 486s, ETA: 960s
[>>>>>>>>>> ] 87/256, 0.2 task/s, elapsed: 489s, ETA: 950s
[>>>>>>>>>> ] 88/256, 0.2 task/s, elapsed: 492s, ETA: 938s
[>>>>>>>>>> ] 89/256, 0.2 task/s, elapsed: 493s, ETA: 925s
[>>>>>>>>>> ] 90/256, 0.2 task/s, elapsed: 493s, ETA: 909s
[>>>>>>>>>> ] 91/256, 0.2 task/s, elapsed: 493s, ETA: 894s
[>>>>>>>>>> ] 92/256, 0.2 task/s, elapsed: 493s, ETA: 880s
[>>>>>>>>>> ] 93/256, 0.2 task/s, elapsed: 493s, ETA: 865s
[>>>>>>>>>>> ] 94/256, 0.2 task/s, elapsed: 494s, ETA: 852s
[>>>>>>>>>>> ] 95/256, 0.2 task/s, elapsed: 495s, ETA: 839s
[>>>>>>>>>>> ] 96/256, 0.2 task/s, elapsed: 496s, ETA: 827s
[>>>>>>>>>>> ] 97/256, 0.2 task/s, elapsed: 498s, ETA: 816s
[>>>>>>>>>>> ] 98/256, 0.2 task/s, elapsed: 498s, ETA: 802s
[>>>>>>>>>>> ] 99/256, 0.2 task/s, elapsed: 499s, ETA: 791s
[>>>>>>>>>>> ] 100/256, 0.2 task/s, elapsed: 500s, ETA: 780s
[>>>>>>>>>>> ] 101/256, 0.2 task/s, elapsed: 500s, ETA: 767s
[>>>>>>>>>>> ] 102/256, 0.2 task/s, elapsed: 503s, ETA: 760s
[>>>>>>>>>>> ] 103/256, 0.2 task/s, elapsed: 504s, ETA: 749s
[>>>>>>>>>>> ] 104/256, 0.2 task/s, elapsed: 505s, ETA: 738s
[>>>>>>>>>>> ] 105/256, 0.2 task/s, elapsed: 506s, ETA: 728s
[>>>>>>>>>>>> ] 106/256, 0.2 task/s, elapsed: 507s, ETA: 717s
[>>>>>>>>>>>> ] 107/256, 0.2 task/s, elapsed: 508s, ETA: 707s
[>>>>>>>>>>>> ] 108/256, 0.2 task/s, elapsed: 508s, ETA: 696s
[>>>>>>>>>>>> ] 109/256, 0.2 task/s, elapsed: 509s, ETA: 687s
[>>>>>>>>>>>> ] 110/256, 0.2 task/s, elapsed: 510s, ETA: 677s
[>>>>>>>>>>>> ] 111/256, 0.2 task/s, elapsed: 510s, ETA: 666s
[>>>>>>>>>>>> ] 112/256, 0.2 task/s, elapsed: 511s, ETA: 657s
[>>>>>>>>>>>> ] 113/256, 0.2 task/s, elapsed: 511s, ETA: 647s
[>>>>>>>>>>>> ] 114/256, 0.2 task/s, elapsed: 512s, ETA: 637s
[>>>>>>>>>>>>> ] 115/256, 0.2 task/s, elapsed: 513s, ETA: 628s
[>>>>>>>>>>>>> ] 116/256, 0.2 task/s, elapsed: 513s, ETA: 620s
[>>>>>>>>>>>>> ] 117/256, 0.2 task/s, elapsed: 514s, ETA: 610s
[>>>>>>>>>>>>> ] 118/256, 0.2 task/s, elapsed: 515s, ETA: 602s
[>>>>>>>>>>>>> ] 119/256, 0.2 task/s, elapsed: 515s, ETA: 593s
[>>>>>>>>>>>>> ] 120/256, 0.2 task/s, elapsed: 516s, ETA: 585s
[>>>>>>>>>>>>> ] 121/256, 0.2 task/s, elapsed: 517s, ETA: 577s
[>>>>>>>>>>>>> ] 122/256, 0.2 task/s, elapsed: 517s, ETA: 568s
[>>>>>>>>>>>>> ] 123/256, 0.2 task/s, elapsed: 518s, ETA: 560s
[>>>>>>>>>>>>>> ] 124/256, 0.2 task/s, elapsed: 521s, ETA: 555s
[>>>>>>>>>>>>>> ] 125/256, 0.2 task/s, elapsed: 526s, ETA: 551s
[>>>>>>>>>>>>>> ] 126/256, 0.2 task/s, elapsed: 528s, ETA: 545s
[>>>>>>>>>>>>>> ] 127/256, 0.2 task/s, elapsed: 530s, ETA: 538s
[>>>>>>>>>>>>>> ] 128/256, 0.2 task/s, elapsed: 530s, ETA: 530s
[>>>>>>>>>>>>>> ] 129/256, 0.2 task/s, elapsed: 531s, ETA: 523s
[>>>>>>>>>>>>>> ] 130/256, 0.2 task/s, elapsed: 532s, ETA: 516s
[>>>>>>>>>>>>>> ] 131/256, 0.2 task/s, elapsed: 536s, ETA: 511s
[>>>>>>>>>>>>>> ] 132/256, 0.2 task/s, elapsed: 537s, ETA: 505s
[>>>>>>>>>>>>>>> ] 133/256, 0.2 task/s, elapsed: 537s, ETA: 497s
[>>>>>>>>>>>>>>> ] 134/256, 0.2 task/s, elapsed: 540s, ETA: 492s
[>>>>>>>>>>>>>>> ] 135/256, 0.2 task/s, elapsed: 541s, ETA: 485s
[>>>>>>>>>>>>>>> ] 136/256, 0.3 task/s, elapsed: 543s, ETA: 479s
[>>>>>>>>>>>>>>> ] 137/256, 0.3 task/s, elapsed: 543s, ETA: 472s
[>>>>>>>>>>>>>>> ] 138/256, 0.3 task/s, elapsed: 545s, ETA: 466s
[>>>>>>>>>>>>>>> ] 139/256, 0.3 task/s, elapsed: 546s, ETA: 459s
[>>>>>>>>>>>>>>> ] 140/256, 0.3 task/s, elapsed: 547s, ETA: 453s
[>>>>>>>>>>>>>>> ] 141/256, 0.3 task/s, elapsed: 549s, ETA: 448s
[>>>>>>>>>>>>>>>> ] 142/256, 0.3 task/s, elapsed: 550s, ETA: 442s
[>>>>>>>>>>>>>>>> ] 143/256, 0.3 task/s, elapsed: 551s, ETA: 435s
[>>>>>>>>>>>>>>>> ] 144/256, 0.3 task/s, elapsed: 552s, ETA: 429s
[>>>>>>>>>>>>>>>> ] 145/256, 0.3 task/s, elapsed: 553s, ETA: 423s
[>>>>>>>>>>>>>>>> ] 146/256, 0.3 task/s, elapsed: 553s, ETA: 417s
[>>>>>>>>>>>>>>>> ] 147/256, 0.3 task/s, elapsed: 559s, ETA: 415s
[>>>>>>>>>>>>>>>> ] 148/256, 0.3 task/s, elapsed: 562s, ETA: 410s
[>>>>>>>>>>>>>>>> ] 149/256, 0.3 task/s, elapsed: 563s, ETA: 404s
[>>>>>>>>>>>>>>>> ] 150/256, 0.3 task/s, elapsed: 564s, ETA: 399s
[>>>>>>>>>>>>>>>>> ] 151/256, 0.3 task/s, elapsed: 568s, ETA: 395s
[>>>>>>>>>>>>>>>>> ] 152/256, 0.3 task/s, elapsed: 572s, ETA: 392s
[>>>>>>>>>>>>>>>>> ] 153/256, 0.3 task/s, elapsed: 574s, ETA: 386s
[>>>>>>>>>>>>>>>>> ] 154/256, 0.3 task/s, elapsed: 575s, ETA: 381s
[>>>>>>>>>>>>>>>>> ] 155/256, 0.3 task/s, elapsed: 576s, ETA: 375s
[>>>>>>>>>>>>>>>>> ] 156/256, 0.3 task/s, elapsed: 576s, ETA: 369s
[>>>>>>>>>>>>>>>>> ] 157/256, 0.3 task/s, elapsed: 577s, ETA: 364s
[>>>>>>>>>>>>>>>>> ] 158/256, 0.3 task/s, elapsed: 578s, ETA: 358s
[>>>>>>>>>>>>>>>>>> ] 159/256, 0.3 task/s, elapsed: 578s, ETA: 353s
[>>>>>>>>>>>>>>>>>> ] 160/256, 0.3 task/s, elapsed: 578s, ETA: 347s
[>>>>>>>>>>>>>>>>>> ] 161/256, 0.3 task/s, elapsed: 581s, ETA: 343s
[>>>>>>>>>>>>>>>>>> ] 162/256, 0.3 task/s, elapsed: 582s, ETA: 337s
[>>>>>>>>>>>>>>>>>> ] 163/256, 0.3 task/s, elapsed: 582s, ETA: 332s
[>>>>>>>>>>>>>>>>>> ] 164/256, 0.3 task/s, elapsed: 583s, ETA: 327s
[>>>>>>>>>>>>>>>>>> ] 165/256, 0.3 task/s, elapsed: 583s, ETA: 322s
[>>>>>>>>>>>>>>>>>> ] 166/256, 0.3 task/s, elapsed: 584s, ETA: 316s
[>>>>>>>>>>>>>>>>>> ] 167/256, 0.3 task/s, elapsed: 584s, ETA: 311s
[>>>>>>>>>>>>>>>>>>> ] 168/256, 0.3 task/s, elapsed: 585s, ETA: 306s
[>>>>>>>>>>>>>>>>>>> ] 169/256, 0.3 task/s, elapsed: 588s, ETA: 303s
[>>>>>>>>>>>>>>>>>>> ] 170/256, 0.3 task/s, elapsed: 591s, ETA: 299s
[>>>>>>>>>>>>>>>>>>> ] 171/256, 0.3 task/s, elapsed: 593s, ETA: 295s
[>>>>>>>>>>>>>>>>>>> ] 172/256, 0.3 task/s, elapsed: 594s, ETA: 290s
[>>>>>>>>>>>>>>>>>>> ] 173/256, 0.3 task/s, elapsed: 594s, ETA: 285s
[>>>>>>>>>>>>>>>>>>> ] 174/256, 0.3 task/s, elapsed: 595s, ETA: 280s
[>>>>>>>>>>>>>>>>>>> ] 175/256, 0.3 task/s, elapsed: 595s, ETA: 276s
[>>>>>>>>>>>>>>>>>>> ] 176/256, 0.3 task/s, elapsed: 597s, ETA: 271s
[>>>>>>>>>>>>>>>>>>>> ] 177/256, 0.3 task/s, elapsed: 597s, ETA: 267s
[>>>>>>>>>>>>>>>>>>>> ] 178/256, 0.3 task/s, elapsed: 598s, ETA: 262s
[>>>>>>>>>>>>>>>>>>>> ] 179/256, 0.3 task/s, elapsed: 601s, ETA: 259s
[>>>>>>>>>>>>>>>>>>>> ] 180/256, 0.3 task/s, elapsed: 601s, ETA: 254s
[>>>>>>>>>>>>>>>>>>>> ] 181/256, 0.3 task/s, elapsed: 602s, ETA: 250s
[>>>>>>>>>>>>>>>>>>>> ] 182/256, 0.3 task/s, elapsed: 604s, ETA: 245s
[>>>>>>>>>>>>>>>>>>>> ] 183/256, 0.3 task/s, elapsed: 604s, ETA: 241s
[>>>>>>>>>>>>>>>>>>>> ] 184/256, 0.3 task/s, elapsed: 608s, ETA: 238s
[>>>>>>>>>>>>>>>>>>>> ] 185/256, 0.3 task/s, elapsed: 608s, ETA: 233s
[>>>>>>>>>>>>>>>>>>>>> ] 186/256, 0.3 task/s, elapsed: 608s, ETA: 229s
[>>>>>>>>>>>>>>>>>>>>> ] 187/256, 0.3 task/s, elapsed: 609s, ETA: 225s
[>>>>>>>>>>>>>>>>>>>>> ] 188/256, 0.3 task/s, elapsed: 612s, ETA: 221s
[>>>>>>>>>>>>>>>>>>>>> ] 189/256, 0.3 task/s, elapsed: 613s, ETA: 217s
[>>>>>>>>>>>>>>>>>>>>> ] 190/256, 0.3 task/s, elapsed: 614s, ETA: 213s
[>>>>>>>>>>>>>>>>>>>>> ] 191/256, 0.3 task/s, elapsed: 614s, ETA: 209s
[>>>>>>>>>>>>>>>>>>>>> ] 192/256, 0.3 task/s, elapsed: 619s, ETA: 206s
[>>>>>>>>>>>>>>>>>>>>> ] 193/256, 0.3 task/s, elapsed: 621s, ETA: 203s
[>>>>>>>>>>>>>>>>>>>>> ] 194/256, 0.3 task/s, elapsed: 622s, ETA: 199s
[>>>>>>>>>>>>>>>>>>>>>> ] 195/256, 0.3 task/s, elapsed: 627s, ETA: 196s
[>>>>>>>>>>>>>>>>>>>>>> ] 196/256, 0.3 task/s, elapsed: 627s, ETA: 192s
[>>>>>>>>>>>>>>>>>>>>>> ] 197/256, 0.3 task/s, elapsed: 628s, ETA: 188s
[>>>>>>>>>>>>>>>>>>>>>> ] 198/256, 0.3 task/s, elapsed: 629s, ETA: 184s
[>>>>>>>>>>>>>>>>>>>>>> ] 199/256, 0.3 task/s, elapsed: 631s, ETA: 181s
[>>>>>>>>>>>>>>>>>>>>>> ] 200/256, 0.3 task/s, elapsed: 633s, ETA: 177s
[>>>>>>>>>>>>>>>>>>>>>> ] 201/256, 0.3 task/s, elapsed: 635s, ETA: 174s
[>>>>>>>>>>>>>>>>>>>>>> ] 202/256, 0.3 task/s, elapsed: 635s, ETA: 170s
[>>>>>>>>>>>>>>>>>>>>>> ] 203/256, 0.3 task/s, elapsed: 637s, ETA: 166s
[>>>>>>>>>>>>>>>>>>>>>>> ] 204/256, 0.3 task/s, elapsed: 640s, ETA: 163s
[>>>>>>>>>>>>>>>>>>>>>>> ] 205/256, 0.3 task/s, elapsed: 641s, ETA: 159s
[>>>>>>>>>>>>>>>>>>>>>>> ] 206/256, 0.3 task/s, elapsed: 641s, ETA: 156s
[>>>>>>>>>>>>>>>>>>>>>>> ] 207/256, 0.3 task/s, elapsed: 643s, ETA: 152s
[>>>>>>>>>>>>>>>>>>>>>>> ] 208/256, 0.3 task/s, elapsed: 644s, ETA: 149s
[>>>>>>>>>>>>>>>>>>>>>>> ] 209/256, 0.3 task/s, elapsed: 645s, ETA: 145s
[>>>>>>>>>>>>>>>>>>>>>>> ] 210/256, 0.3 task/s, elapsed: 647s, ETA: 142s
[>>>>>>>>>>>>>>>>>>>>>>> ] 211/256, 0.3 task/s, elapsed: 647s, ETA: 138s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 212/256, 0.3 task/s, elapsed: 649s, ETA: 135s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 213/256, 0.3 task/s, elapsed: 659s, ETA: 133s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 214/256, 0.3 task/s, elapsed: 660s, ETA: 130s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 215/256, 0.3 task/s, elapsed: 661s, ETA: 126s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 216/256, 0.3 task/s, elapsed: 671s, ETA: 124s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 217/256, 0.3 task/s, elapsed: 672s, ETA: 121s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 218/256, 0.3 task/s, elapsed: 672s, ETA: 117s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 219/256, 0.3 task/s, elapsed: 674s, ETA: 114s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 220/256, 0.3 task/s, elapsed: 677s, ETA: 111s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 221/256, 0.3 task/s, elapsed: 677s, ETA: 107s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 222/256, 0.3 task/s, elapsed: 678s, ETA: 104s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 223/256, 0.3 task/s, elapsed: 678s, ETA: 100s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 224/256, 0.3 task/s, elapsed: 678s, ETA: 97s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 225/256, 0.3 task/s, elapsed: 679s, ETA: 93s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 226/256, 0.3 task/s, elapsed: 680s, ETA: 90s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 227/256, 0.3 task/s, elapsed: 682s, ETA: 87s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 228/256, 0.3 task/s, elapsed: 684s, ETA: 84s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 229/256, 0.3 task/s, elapsed: 686s, ETA: 81s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 230/256, 0.3 task/s, elapsed: 687s, ETA: 78s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 231/256, 0.3 task/s, elapsed: 692s, ETA: 75s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 232/256, 0.3 task/s, elapsed: 693s, ETA: 72s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 233/256, 0.3 task/s, elapsed: 696s, ETA: 69s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 234/256, 0.3 task/s, elapsed: 697s, ETA: 65s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 235/256, 0.3 task/s, elapsed: 698s, ETA: 62s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 236/256, 0.3 task/s, elapsed: 700s, ETA: 59s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 237/256, 0.3 task/s, elapsed: 707s, ETA: 57s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 238/256, 0.3 task/s, elapsed: 709s, ETA: 54s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 239/256, 0.3 task/s, elapsed: 712s, ETA: 51s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 240/256, 0.3 task/s, elapsed: 714s, ETA: 48s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 241/256, 0.3 task/s, elapsed: 714s, ETA: 44s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 242/256, 0.3 task/s, elapsed: 715s, ETA: 41s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 243/256, 0.3 task/s, elapsed: 720s, ETA: 39s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 244/256, 0.3 task/s, elapsed: 724s, ETA: 36s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 245/256, 0.3 task/s, elapsed: 729s, ETA: 33s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 246/256, 0.3 task/s, elapsed: 729s, ETA: 30s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 247/256, 0.3 task/s, elapsed: 730s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 248/256, 0.3 task/s, elapsed: 732s, ETA: 24s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 249/256, 0.3 task/s, elapsed: 736s, ETA: 21s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 250/256, 0.3 task/s, elapsed: 738s, ETA: 18s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 251/256, 0.3 task/s, elapsed: 739s, ETA: 15s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 252/256, 0.3 task/s, elapsed: 747s, ETA: 12s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 253/256, 0.3 task/s, elapsed: 755s, ETA: 9s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 254/256, 0.3 task/s, elapsed: 758s, ETA: 6s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 255/256, 0.3 task/s, elapsed: 767s, ETA: 3s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 256/256, 0.3 task/s, elapsed: 774s, ETA: 0s
+11/19 11:10:45 - OpenCompass - INFO - Partitioned into 287 tasks.
+[ ] 0/287, elapsed: 0s, ETA:
[ ] 1/287, 0.0 task/s, elapsed: 55s, ETA: 15790s
[ ] 2/287, 0.0 task/s, elapsed: 55s, ETA: 7869s
[ ] 3/287, 0.1 task/s, elapsed: 55s, ETA: 5229s
[ ] 4/287, 0.1 task/s, elapsed: 55s, ETA: 3909s
[ ] 5/287, 0.1 task/s, elapsed: 55s, ETA: 3124s
[ ] 6/287, 0.1 task/s, elapsed: 55s, ETA: 2594s
[ ] 7/287, 0.1 task/s, elapsed: 55s, ETA: 2216s
[ ] 8/287, 0.1 task/s, elapsed: 55s, ETA: 1933s
[> ] 9/287, 0.2 task/s, elapsed: 55s, ETA: 1712s
[> ] 10/287, 0.2 task/s, elapsed: 55s, ETA: 1536s
[> ] 11/287, 0.2 task/s, elapsed: 55s, ETA: 1392s
[> ] 12/287, 0.2 task/s, elapsed: 55s, ETA: 1271s
[> ] 13/287, 0.2 task/s, elapsed: 55s, ETA: 1169s
[> ] 14/287, 0.3 task/s, elapsed: 56s, ETA: 1082s
[> ] 15/287, 0.3 task/s, elapsed: 56s, ETA: 1007s
[> ] 16/287, 0.3 task/s, elapsed: 56s, ETA: 941s
[> ] 17/287, 0.3 task/s, elapsed: 56s, ETA: 882s
[> ] 18/287, 0.3 task/s, elapsed: 56s, ETA: 830s
[>> ] 19/287, 0.3 task/s, elapsed: 56s, ETA: 784s
[>> ] 20/287, 0.4 task/s, elapsed: 56s, ETA: 742s
[>> ] 21/287, 0.4 task/s, elapsed: 56s, ETA: 704s
[>> ] 22/287, 0.4 task/s, elapsed: 56s, ETA: 670s
[>> ] 23/287, 0.4 task/s, elapsed: 56s, ETA: 638s
[>> ] 24/287, 0.4 task/s, elapsed: 56s, ETA: 610s
[>> ] 25/287, 0.4 task/s, elapsed: 56s, ETA: 583s
[>> ] 26/287, 0.5 task/s, elapsed: 56s, ETA: 559s
[>> ] 27/287, 0.5 task/s, elapsed: 56s, ETA: 536s
[>>> ] 28/287, 0.5 task/s, elapsed: 56s, ETA: 515s
[>>> ] 29/287, 0.5 task/s, elapsed: 56s, ETA: 495s
[>>> ] 30/287, 0.5 task/s, elapsed: 56s, ETA: 477s
[>>> ] 31/287, 0.6 task/s, elapsed: 56s, ETA: 460s
[>>> ] 32/287, 0.6 task/s, elapsed: 56s, ETA: 444s
[>>> ] 33/287, 0.6 task/s, elapsed: 56s, ETA: 429s
[>>> ] 34/287, 0.6 task/s, elapsed: 56s, ETA: 415s
[>>> ] 35/287, 0.6 task/s, elapsed: 56s, ETA: 402s
[>>> ] 36/287, 0.6 task/s, elapsed: 56s, ETA: 389s
[>>> ] 37/287, 0.7 task/s, elapsed: 56s, ETA: 378s
[>>>> ] 38/287, 0.7 task/s, elapsed: 56s, ETA: 366s
[>>>> ] 39/287, 0.7 task/s, elapsed: 56s, ETA: 357s
[>>>> ] 40/287, 0.7 task/s, elapsed: 56s, ETA: 347s
[>>>> ] 41/287, 0.7 task/s, elapsed: 58s, ETA: 350s
[>>>> ] 42/287, 0.7 task/s, elapsed: 58s, ETA: 341s
[>>>> ] 43/287, 0.7 task/s, elapsed: 58s, ETA: 331s
[>>>> ] 44/287, 0.8 task/s, elapsed: 58s, ETA: 323s
[>>>> ] 45/287, 0.8 task/s, elapsed: 59s, ETA: 315s
[>>>> ] 46/287, 0.8 task/s, elapsed: 59s, ETA: 307s
[>>>>> ] 47/287, 0.8 task/s, elapsed: 59s, ETA: 299s
[>>>>> ] 48/287, 0.8 task/s, elapsed: 59s, ETA: 293s
[>>>>> ] 49/287, 0.8 task/s, elapsed: 59s, ETA: 286s
[>>>>> ] 50/287, 0.9 task/s, elapsed: 59s, ETA: 279s
[>>>>> ] 51/287, 0.9 task/s, elapsed: 59s, ETA: 272s
[>>>>> ] 52/287, 0.9 task/s, elapsed: 59s, ETA: 266s
[>>>>> ] 53/287, 0.9 task/s, elapsed: 59s, ETA: 260s
[>>>>> ] 54/287, 0.9 task/s, elapsed: 59s, ETA: 254s
[>>>>> ] 55/287, 0.9 task/s, elapsed: 59s, ETA: 249s
[>>>>>> ] 56/287, 1.0 task/s, elapsed: 59s, ETA: 243s
[>>>>>> ] 57/287, 1.0 task/s, elapsed: 59s, ETA: 238s
[>>>>>> ] 58/287, 1.0 task/s, elapsed: 59s, ETA: 233s
[>>>>>> ] 59/287, 1.0 task/s, elapsed: 59s, ETA: 228s
[>>>>>> ] 60/287, 1.0 task/s, elapsed: 59s, ETA: 223s
[>>>>>> ] 61/287, 1.0 task/s, elapsed: 59s, ETA: 219s
[>>>>>> ] 62/287, 1.1 task/s, elapsed: 59s, ETA: 214s
[>>>>>> ] 63/287, 1.1 task/s, elapsed: 59s, ETA: 210s
[>>>>>> ] 64/287, 1.1 task/s, elapsed: 59s, ETA: 206s
[>>>>>>> ] 65/287, 1.1 task/s, elapsed: 59s, ETA: 202s
[>>>>>>> ] 66/287, 1.1 task/s, elapsed: 59s, ETA: 198s
[>>>>>>> ] 67/287, 1.1 task/s, elapsed: 59s, ETA: 194s
[>>>>>>> ] 68/287, 1.2 task/s, elapsed: 59s, ETA: 190s
[>>>>>>> ] 69/287, 1.2 task/s, elapsed: 59s, ETA: 187s
[>>>>>>> ] 70/287, 1.2 task/s, elapsed: 59s, ETA: 183s
[>>>>>>> ] 71/287, 1.2 task/s, elapsed: 59s, ETA: 180s
[>>>>>>> ] 72/287, 1.2 task/s, elapsed: 59s, ETA: 176s
[>>>>>>> ] 73/287, 1.2 task/s, elapsed: 59s, ETA: 173s
[>>>>>>> ] 74/287, 1.3 task/s, elapsed: 59s, ETA: 170s
[>>>>>>>> ] 75/287, 1.3 task/s, elapsed: 59s, ETA: 167s
[>>>>>>>> ] 76/287, 1.3 task/s, elapsed: 59s, ETA: 164s
[>>>>>>>> ] 77/287, 1.3 task/s, elapsed: 59s, ETA: 161s
[>>>>>>>> ] 78/287, 1.3 task/s, elapsed: 59s, ETA: 158s
[>>>>>>>> ] 79/287, 1.3 task/s, elapsed: 59s, ETA: 156s
[>>>>>>>> ] 80/287, 1.4 task/s, elapsed: 59s, ETA: 153s
[>>>>>>>> ] 81/287, 1.4 task/s, elapsed: 59s, ETA: 150s
[>>>>>>>> ] 82/287, 1.4 task/s, elapsed: 59s, ETA: 148s
[>>>>>>>> ] 83/287, 1.4 task/s, elapsed: 59s, ETA: 145s
[>>>>>>>>> ] 84/287, 1.4 task/s, elapsed: 59s, ETA: 143s
[>>>>>>>>> ] 85/287, 1.4 task/s, elapsed: 59s, ETA: 140s
[>>>>>>>>> ] 86/287, 1.5 task/s, elapsed: 59s, ETA: 138s
[>>>>>>>>> ] 87/287, 1.5 task/s, elapsed: 59s, ETA: 136s
[>>>>>>>>> ] 88/287, 1.5 task/s, elapsed: 59s, ETA: 134s
[>>>>>>>>> ] 89/287, 1.5 task/s, elapsed: 60s, ETA: 134s
[>>>>>>>>> ] 90/287, 1.5 task/s, elapsed: 60s, ETA: 132s
[>>>>>>>>> ] 91/287, 1.5 task/s, elapsed: 60s, ETA: 130s
[>>>>>>>>> ] 92/287, 1.5 task/s, elapsed: 60s, ETA: 128s
[>>>>>>>>>> ] 93/287, 1.5 task/s, elapsed: 60s, ETA: 126s
[>>>>>>>>>> ] 94/287, 1.6 task/s, elapsed: 60s, ETA: 124s
[>>>>>>>>>> ] 95/287, 1.6 task/s, elapsed: 60s, ETA: 122s
[>>>>>>>>>> ] 96/287, 1.6 task/s, elapsed: 60s, ETA: 120s
[>>>>>>>>>> ] 97/287, 1.6 task/s, elapsed: 60s, ETA: 118s
[>>>>>>>>>> ] 98/287, 1.6 task/s, elapsed: 60s, ETA: 116s
[>>>>>>>>>> ] 99/287, 1.6 task/s, elapsed: 60s, ETA: 115s
[>>>>>>>>>> ] 100/287, 1.7 task/s, elapsed: 60s, ETA: 113s
[>>>>>>>>>> ] 101/287, 1.7 task/s, elapsed: 60s, ETA: 111s
[>>>>>>>>>> ] 102/287, 1.7 task/s, elapsed: 60s, ETA: 110s
[>>>>>>>>>> ] 103/287, 1.7 task/s, elapsed: 60s, ETA: 108s
[>>>>>>>>>> ] 104/287, 1.7 task/s, elapsed: 60s, ETA: 106s
[>>>>>>>>>> ] 105/287, 1.7 task/s, elapsed: 60s, ETA: 105s
[>>>>>>>>>>> ] 106/287, 1.8 task/s, elapsed: 61s, ETA: 103s
[>>>>>>>>>>> ] 107/287, 1.8 task/s, elapsed: 61s, ETA: 102s
[>>>>>>>>>>> ] 108/287, 1.8 task/s, elapsed: 61s, ETA: 100s
[>>>>>>>>>>> ] 109/287, 1.8 task/s, elapsed: 61s, ETA: 99s
[>>>>>>>>>>> ] 110/287, 1.8 task/s, elapsed: 61s, ETA: 98s
[>>>>>>>>>>> ] 111/287, 1.8 task/s, elapsed: 61s, ETA: 96s
[>>>>>>>>>>> ] 112/287, 1.8 task/s, elapsed: 61s, ETA: 95s
[>>>>>>>>>>> ] 113/287, 1.9 task/s, elapsed: 61s, ETA: 93s
[>>>>>>>>>>> ] 114/287, 1.9 task/s, elapsed: 61s, ETA: 92s
[>>>>>>>>>>>> ] 115/287, 1.9 task/s, elapsed: 61s, ETA: 91s
[>>>>>>>>>>>> ] 116/287, 1.9 task/s, elapsed: 61s, ETA: 90s
[>>>>>>>>>>>> ] 117/287, 1.9 task/s, elapsed: 61s, ETA: 88s
[>>>>>>>>>>>> ] 118/287, 1.9 task/s, elapsed: 61s, ETA: 87s
[>>>>>>>>>>>> ] 119/287, 2.0 task/s, elapsed: 61s, ETA: 86s
[>>>>>>>>>>>> ] 120/287, 2.0 task/s, elapsed: 61s, ETA: 85s
[>>>>>>>>>>>> ] 121/287, 2.0 task/s, elapsed: 61s, ETA: 83s
[>>>>>>>>>>>> ] 122/287, 2.0 task/s, elapsed: 61s, ETA: 82s
[>>>>>>>>>>>> ] 123/287, 2.0 task/s, elapsed: 61s, ETA: 81s
[>>>>>>>>>>>> ] 124/287, 2.0 task/s, elapsed: 61s, ETA: 80s
[>>>>>>>>>>>>> ] 125/287, 2.1 task/s, elapsed: 61s, ETA: 79s
[>>>>>>>>>>>>> ] 126/287, 2.1 task/s, elapsed: 61s, ETA: 78s
[>>>>>>>>>>>>> ] 127/287, 2.1 task/s, elapsed: 61s, ETA: 77s
[>>>>>>>>>>>>> ] 128/287, 2.1 task/s, elapsed: 61s, ETA: 76s
[>>>>>>>>>>>>> ] 129/287, 2.1 task/s, elapsed: 61s, ETA: 75s
[>>>>>>>>>>>>> ] 130/287, 2.1 task/s, elapsed: 61s, ETA: 74s
[>>>>>>>>>>>>> ] 131/287, 2.2 task/s, elapsed: 61s, ETA: 73s
[>>>>>>>>>>>>> ] 132/287, 2.2 task/s, elapsed: 61s, ETA: 72s
[>>>>>>>>>>>>> ] 133/287, 2.2 task/s, elapsed: 61s, ETA: 71s
[>>>>>>>>>>>>>> ] 134/287, 2.2 task/s, elapsed: 61s, ETA: 70s
[>>>>>>>>>>>>>> ] 135/287, 2.2 task/s, elapsed: 61s, ETA: 69s
[>>>>>>>>>>>>>> ] 136/287, 2.2 task/s, elapsed: 61s, ETA: 68s
[>>>>>>>>>>>>>> ] 137/287, 2.1 task/s, elapsed: 67s, ETA: 73s
[>>>>>>>>>>>>>> ] 138/287, 2.1 task/s, elapsed: 67s, ETA: 72s
[>>>>>>>>>>>>>> ] 139/287, 2.1 task/s, elapsed: 67s, ETA: 71s
[>>>>>>>>>>>>>> ] 140/287, 2.1 task/s, elapsed: 67s, ETA: 70s
[>>>>>>>>>>>>>> ] 141/287, 2.1 task/s, elapsed: 67s, ETA: 69s
[>>>>>>>>>>>>>> ] 142/287, 2.1 task/s, elapsed: 67s, ETA: 69s
[>>>>>>>>>>>>>> ] 143/287, 2.1 task/s, elapsed: 67s, ETA: 68s
[>>>>>>>>>>>>>>> ] 144/287, 2.1 task/s, elapsed: 67s, ETA: 67s
[>>>>>>>>>>>>>>> ] 145/287, 2.2 task/s, elapsed: 67s, ETA: 66s
[>>>>>>>>>>>>>>> ] 146/287, 2.2 task/s, elapsed: 67s, ETA: 65s
[>>>>>>>>>>>>>>> ] 147/287, 2.2 task/s, elapsed: 67s, ETA: 64s
[>>>>>>>>>>>>>>> ] 148/287, 2.2 task/s, elapsed: 67s, ETA: 63s
[>>>>>>>>>>>>>>> ] 149/287, 2.2 task/s, elapsed: 67s, ETA: 62s
[>>>>>>>>>>>>>>> ] 150/287, 2.2 task/s, elapsed: 67s, ETA: 62s
[>>>>>>>>>>>>>>> ] 151/287, 2.2 task/s, elapsed: 67s, ETA: 61s
[>>>>>>>>>>>>>>> ] 152/287, 2.3 task/s, elapsed: 67s, ETA: 60s
[>>>>>>>>>>>>>>> ] 153/287, 2.3 task/s, elapsed: 67s, ETA: 59s
[>>>>>>>>>>>>>>>> ] 154/287, 2.3 task/s, elapsed: 67s, ETA: 58s
[>>>>>>>>>>>>>>>> ] 155/287, 2.3 task/s, elapsed: 67s, ETA: 57s
[>>>>>>>>>>>>>>>> ] 156/287, 2.3 task/s, elapsed: 67s, ETA: 57s
[>>>>>>>>>>>>>>>> ] 157/287, 2.3 task/s, elapsed: 67s, ETA: 56s
[>>>>>>>>>>>>>>>> ] 158/287, 2.3 task/s, elapsed: 67s, ETA: 55s
[>>>>>>>>>>>>>>>> ] 159/287, 2.4 task/s, elapsed: 67s, ETA: 54s
[>>>>>>>>>>>>>>>> ] 160/287, 2.4 task/s, elapsed: 67s, ETA: 54s
[>>>>>>>>>>>>>>>> ] 161/287, 2.4 task/s, elapsed: 67s, ETA: 53s
[>>>>>>>>>>>>>>>> ] 162/287, 2.4 task/s, elapsed: 67s, ETA: 52s
[>>>>>>>>>>>>>>>>> ] 163/287, 2.4 task/s, elapsed: 68s, ETA: 51s
[>>>>>>>>>>>>>>>>> ] 164/287, 2.4 task/s, elapsed: 68s, ETA: 51s
[>>>>>>>>>>>>>>>>> ] 165/287, 2.4 task/s, elapsed: 68s, ETA: 50s
[>>>>>>>>>>>>>>>>> ] 166/287, 2.5 task/s, elapsed: 68s, ETA: 49s
[>>>>>>>>>>>>>>>>> ] 167/287, 2.5 task/s, elapsed: 68s, ETA: 49s
[>>>>>>>>>>>>>>>>> ] 168/287, 2.5 task/s, elapsed: 68s, ETA: 48s
[>>>>>>>>>>>>>>>>> ] 169/287, 2.5 task/s, elapsed: 68s, ETA: 47s
[>>>>>>>>>>>>>>>>> ] 170/287, 2.5 task/s, elapsed: 68s, ETA: 47s
[>>>>>>>>>>>>>>>>> ] 171/287, 2.5 task/s, elapsed: 68s, ETA: 46s
[>>>>>>>>>>>>>>>>> ] 172/287, 2.5 task/s, elapsed: 68s, ETA: 45s
[>>>>>>>>>>>>>>>>>> ] 173/287, 2.6 task/s, elapsed: 68s, ETA: 45s
[>>>>>>>>>>>>>>>>>> ] 174/287, 2.6 task/s, elapsed: 68s, ETA: 44s
[>>>>>>>>>>>>>>>>>> ] 175/287, 2.6 task/s, elapsed: 68s, ETA: 43s
[>>>>>>>>>>>>>>>>>> ] 176/287, 2.6 task/s, elapsed: 68s, ETA: 43s
[>>>>>>>>>>>>>>>>>> ] 177/287, 2.6 task/s, elapsed: 68s, ETA: 42s
[>>>>>>>>>>>>>>>>>> ] 178/287, 2.6 task/s, elapsed: 68s, ETA: 42s
[>>>>>>>>>>>>>>>>>> ] 179/287, 2.6 task/s, elapsed: 68s, ETA: 41s
[>>>>>>>>>>>>>>>>>> ] 180/287, 2.7 task/s, elapsed: 68s, ETA: 40s
[>>>>>>>>>>>>>>>>>> ] 181/287, 2.7 task/s, elapsed: 68s, ETA: 40s
[>>>>>>>>>>>>>>>>>>> ] 182/287, 2.7 task/s, elapsed: 68s, ETA: 39s
[>>>>>>>>>>>>>>>>>>> ] 183/287, 2.7 task/s, elapsed: 68s, ETA: 39s
[>>>>>>>>>>>>>>>>>>> ] 184/287, 2.7 task/s, elapsed: 68s, ETA: 38s
[>>>>>>>>>>>>>>>>>>> ] 185/287, 2.7 task/s, elapsed: 68s, ETA: 37s
[>>>>>>>>>>>>>>>>>>> ] 186/287, 2.7 task/s, elapsed: 68s, ETA: 37s
[>>>>>>>>>>>>>>>>>>> ] 187/287, 2.8 task/s, elapsed: 68s, ETA: 36s
[>>>>>>>>>>>>>>>>>>> ] 188/287, 2.8 task/s, elapsed: 68s, ETA: 36s
[>>>>>>>>>>>>>>>>>>> ] 189/287, 2.8 task/s, elapsed: 68s, ETA: 35s
[>>>>>>>>>>>>>>>>>>> ] 190/287, 2.8 task/s, elapsed: 68s, ETA: 35s
[>>>>>>>>>>>>>>>>>>> ] 191/287, 2.8 task/s, elapsed: 68s, ETA: 34s
[>>>>>>>>>>>>>>>>>>>> ] 192/287, 2.8 task/s, elapsed: 68s, ETA: 34s
[>>>>>>>>>>>>>>>>>>>> ] 193/287, 2.8 task/s, elapsed: 68s, ETA: 33s
[>>>>>>>>>>>>>>>>>>>> ] 194/287, 2.9 task/s, elapsed: 68s, ETA: 33s
[>>>>>>>>>>>>>>>>>>>> ] 195/287, 2.7 task/s, elapsed: 73s, ETA: 34s
[>>>>>>>>>>>>>>>>>>>> ] 196/287, 2.7 task/s, elapsed: 73s, ETA: 34s
[>>>>>>>>>>>>>>>>>>>> ] 197/287, 2.7 task/s, elapsed: 73s, ETA: 33s
[>>>>>>>>>>>>>>>>>>>> ] 198/287, 2.7 task/s, elapsed: 73s, ETA: 33s
[>>>>>>>>>>>>>>>>>>>> ] 199/287, 2.7 task/s, elapsed: 73s, ETA: 32s
[>>>>>>>>>>>>>>>>>>>> ] 200/287, 2.7 task/s, elapsed: 73s, ETA: 32s
[>>>>>>>>>>>>>>>>>>>>> ] 201/287, 2.8 task/s, elapsed: 73s, ETA: 31s
[>>>>>>>>>>>>>>>>>>>>> ] 202/287, 2.8 task/s, elapsed: 73s, ETA: 31s
[>>>>>>>>>>>>>>>>>>>>> ] 203/287, 2.7 task/s, elapsed: 75s, ETA: 31s
[>>>>>>>>>>>>>>>>>>>>> ] 204/287, 2.7 task/s, elapsed: 75s, ETA: 31s
[>>>>>>>>>>>>>>>>>>>>> ] 205/287, 2.7 task/s, elapsed: 76s, ETA: 30s
[>>>>>>>>>>>>>>>>>>>>> ] 206/287, 2.7 task/s, elapsed: 76s, ETA: 30s
[>>>>>>>>>>>>>>>>>>>>> ] 207/287, 2.7 task/s, elapsed: 76s, ETA: 29s
[>>>>>>>>>>>>>>>>>>>>> ] 208/287, 2.7 task/s, elapsed: 76s, ETA: 29s
[>>>>>>>>>>>>>>>>>>>>> ] 209/287, 2.7 task/s, elapsed: 76s, ETA: 29s
[>>>>>>>>>>>>>>>>>>>>> ] 210/287, 2.7 task/s, elapsed: 77s, ETA: 28s
[>>>>>>>>>>>>>>>>>>>>>> ] 211/287, 2.7 task/s, elapsed: 78s, ETA: 28s
[>>>>>>>>>>>>>>>>>>>>>> ] 212/287, 2.7 task/s, elapsed: 78s, ETA: 28s
[>>>>>>>>>>>>>>>>>>>>>> ] 213/287, 2.7 task/s, elapsed: 78s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>> ] 214/287, 2.7 task/s, elapsed: 78s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>> ] 215/287, 2.7 task/s, elapsed: 79s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>> ] 216/287, 2.7 task/s, elapsed: 79s, ETA: 26s
[>>>>>>>>>>>>>>>>>>>>>> ] 217/287, 2.7 task/s, elapsed: 80s, ETA: 26s
[>>>>>>>>>>>>>>>>>>>>>> ] 218/287, 2.7 task/s, elapsed: 82s, ETA: 26s
[>>>>>>>>>>>>>>>>>>>>>> ] 219/287, 2.7 task/s, elapsed: 82s, ETA: 25s
[>>>>>>>>>>>>>>>>>>>>>> ] 220/287, 2.7 task/s, elapsed: 82s, ETA: 25s
[>>>>>>>>>>>>>>>>>>>>>>> ] 221/287, 2.7 task/s, elapsed: 82s, ETA: 25s
[>>>>>>>>>>>>>>>>>>>>>>> ] 222/287, 2.7 task/s, elapsed: 82s, ETA: 24s
[>>>>>>>>>>>>>>>>>>>>>>> ] 223/287, 2.7 task/s, elapsed: 84s, ETA: 24s
[>>>>>>>>>>>>>>>>>>>>>> ] 224/287, 2.1 task/s, elapsed: 107s, ETA: 30s
[>>>>>>>>>>>>>>>>>>>>>> ] 225/287, 2.1 task/s, elapsed: 107s, ETA: 30s
[>>>>>>>>>>>>>>>>>>>>>> ] 226/287, 2.1 task/s, elapsed: 107s, ETA: 29s
[>>>>>>>>>>>>>>>>>>>>>> ] 227/287, 2.1 task/s, elapsed: 108s, ETA: 28s
[>>>>>>>>>>>>>>>>>>>>>>> ] 228/287, 2.1 task/s, elapsed: 108s, ETA: 28s
[>>>>>>>>>>>>>>>>>>>>>>> ] 229/287, 2.1 task/s, elapsed: 108s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>>> ] 230/287, 2.1 task/s, elapsed: 108s, ETA: 27s
[>>>>>>>>>>>>>>>>>>>>>>> ] 231/287, 2.1 task/s, elapsed: 108s, ETA: 26s
[>>>>>>>>>>>>>>>>>>>>>>> ] 232/287, 2.2 task/s, elapsed: 108s, ETA: 26s
[>>>>>>>>>>>>>>>>>>>>>>> ] 233/287, 2.2 task/s, elapsed: 108s, ETA: 25s
[>>>>>>>>>>>>>>>>>>>>>>> ] 234/287, 2.2 task/s, elapsed: 108s, ETA: 24s
[>>>>>>>>>>>>>>>>>>>>>>> ] 235/287, 2.2 task/s, elapsed: 108s, ETA: 24s
[>>>>>>>>>>>>>>>>>>>>>>> ] 236/287, 2.2 task/s, elapsed: 108s, ETA: 23s
[>>>>>>>>>>>>>>>>>>>>>>> ] 237/287, 2.2 task/s, elapsed: 108s, ETA: 23s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 238/287, 2.2 task/s, elapsed: 108s, ETA: 22s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 239/287, 2.2 task/s, elapsed: 108s, ETA: 22s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 240/287, 2.2 task/s, elapsed: 108s, ETA: 21s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 241/287, 2.2 task/s, elapsed: 108s, ETA: 21s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 242/287, 2.2 task/s, elapsed: 108s, ETA: 20s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 243/287, 2.2 task/s, elapsed: 108s, ETA: 20s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 244/287, 2.3 task/s, elapsed: 108s, ETA: 19s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 245/287, 2.3 task/s, elapsed: 108s, ETA: 19s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 246/287, 2.3 task/s, elapsed: 108s, ETA: 18s
[>>>>>>>>>>>>>>>>>>>>>>>> ] 247/287, 2.3 task/s, elapsed: 108s, ETA: 18s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 248/287, 2.3 task/s, elapsed: 108s, ETA: 17s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 249/287, 2.3 task/s, elapsed: 108s, ETA: 17s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 250/287, 2.3 task/s, elapsed: 109s, ETA: 16s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 251/287, 2.3 task/s, elapsed: 109s, ETA: 16s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 252/287, 2.3 task/s, elapsed: 109s, ETA: 15s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 253/287, 2.3 task/s, elapsed: 109s, ETA: 15s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 254/287, 2.3 task/s, elapsed: 109s, ETA: 14s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 255/287, 2.3 task/s, elapsed: 109s, ETA: 14s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 256/287, 2.4 task/s, elapsed: 109s, ETA: 13s
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 257/287, 2.4 task/s, elapsed: 109s, ETA: 13s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 258/287, 2.4 task/s, elapsed: 109s, ETA: 12s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 259/287, 2.4 task/s, elapsed: 109s, ETA: 12s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 260/287, 2.4 task/s, elapsed: 109s, ETA: 11s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 261/287, 2.4 task/s, elapsed: 109s, ETA: 11s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 262/287, 2.4 task/s, elapsed: 109s, ETA: 10s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 263/287, 2.4 task/s, elapsed: 109s, ETA: 10s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 264/287, 2.4 task/s, elapsed: 109s, ETA: 9s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 265/287, 2.4 task/s, elapsed: 109s, ETA: 9s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 266/287, 2.4 task/s, elapsed: 109s, ETA: 9s
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 267/287, 2.5 task/s, elapsed: 109s, ETA: 8s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 268/287, 2.5 task/s, elapsed: 109s, ETA: 8s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 269/287, 2.5 task/s, elapsed: 109s, ETA: 7s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 270/287, 2.5 task/s, elapsed: 109s, ETA: 7s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 271/287, 2.5 task/s, elapsed: 109s, ETA: 6s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 272/287, 2.5 task/s, elapsed: 109s, ETA: 6s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 273/287, 2.5 task/s, elapsed: 109s, ETA: 6s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 274/287, 2.5 task/s, elapsed: 109s, ETA: 5s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 275/287, 2.5 task/s, elapsed: 109s, ETA: 5s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 276/287, 2.5 task/s, elapsed: 109s, ETA: 4s
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 277/287, 2.5 task/s, elapsed: 109s, ETA: 4s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 278/287, 2.6 task/s, elapsed: 109s, ETA: 4s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 279/287, 2.6 task/s, elapsed: 109s, ETA: 3s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 280/287, 2.6 task/s, elapsed: 109s, ETA: 3s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 281/287, 2.6 task/s, elapsed: 109s, ETA: 2s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 282/287, 2.6 task/s, elapsed: 109s, ETA: 2s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 283/287, 2.6 task/s, elapsed: 109s, ETA: 2s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 284/287, 2.5 task/s, elapsed: 111s, ETA: 1s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 285/287, 2.5 task/s, elapsed: 112s, ETA: 1s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 286/287, 1.2 task/s, elapsed: 240s, ETA: 1s
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 287/287, 1.1 task/s, elapsed: 253s, ETA: 0s
+dataset version metric mode internvl-chat-20b
+---------------------------- --------- ---------------------------- ------ -------------------
+mmlu - naive_average gen 74.61
+mmlu_pro - - - -
+cmmlu - naive_average gen 78.70
+ceval - naive_average gen 79.74
+agieval - - - -
+GaokaoBench - weighted_average gen 77.29
+GPQA_extended - - - -
+GPQA_main - - - -
+GPQA_diamond - - - -
+ARC-c - - - -
+truthfulqa - - - -
+triviaqa 2121ce score gen 63.36
+triviaqa_wiki_1shot - - - -
+nq 3dcea1 score gen 29.36
+C3 8c358f accuracy gen 94.68
+race-high 9a54b6 accuracy gen 90.79
+flores_100 - - - -
+winogrande b36770 accuracy gen 83.50
+hellaswag e42710 accuracy gen 94.13
+bbh - naive_average gen 73.43
+gsm8k 1d7fe4 accuracy gen 77.79
+math 393424 accuracy gen 49.88
+TheoremQA 6f0af8 score gen 23.75
+MathBench - - - -
+openai_humaneval 8e312c humaneval_pass@1 gen 75.00
+humaneval_plus - - - -
+humanevalx - - - -
+sanitized_mbpp a447ff score gen 68.48
+mbpp_plus - - - -
+mbpp_cn 6fb572 score gen 55.20
+leval - - - -
+leval_closed - - - -
+leval_open - - - -
+longbench - - - -
+longbench_single-document-qa - - - -
+longbench_multi-document-qa - - - -
+longbench_summarization - - - -
+longbench_few-shot-learning - - - -
+longbench_synthetic-tasks - - - -
+longbench_code-completion - - - -
+teval - - - -
+teval_zh - - - -
+IFEval 3321a3 Prompt-level-strict-accuracy gen 50.46
+IFEval 3321a3 Inst-level-strict-accuracy gen 60.79
+IFEval 3321a3 Prompt-level-loose-accuracy gen 53.42
+IFEval 3321a3 Inst-level-loose-accuracy gen 63.67
+11/19 11:15:09 - OpenCompass - INFO - write summary to /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/work_dirs/internvl_chat_v2_5/internvl_chat_v2_5_internlm2_5_7b_dynamic_res_finetune_datav162/20241119_105739/summary/summary_20241119_105739.txt
+11/19 11:15:09 - OpenCompass - INFO - write csv to /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/work_dirs/internvl_chat_v2_5/internvl_chat_v2_5_internlm2_5_7b_dynamic_res_finetune_datav162/20241119_105739/summary/summary_20241119_105739.csv
diff --git a/examples/image1.jpg b/examples/image1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fd9891ef7e00774157a9dcd726b2ea9fa0c5ecff
GIT binary patch
literal 78073
zcma&NcUTio)HX~n5^Crgsz#(HgpTwkRp}k6p@Vb?MSAZwp#+fLyA%=WHPlc91(jYD
zDFFddUw+T?Uhf~@U*EZ|OlEgy&YYb&bKhroXa0TscZ|oWVDD+`fM*|$hlhtxh(|+6
zKuB_z{eSZB6J9hPfQ*ccoD4ut4xpvDM?uR#4FFIxFav=MKp-#ls^Y
z#J`jOXDuNSJ^?Wv2`SlsXYb_yUAVhB0zx8U8ax650(_#ogz$f=-krrGq$Q%G;RF(M
zK)FaD^a{e<@(i$tq#}&8cl|*?L`ZxmMECHC|Iu7(twWy^$|NH$8M&=yAMRvZ=wI
ztwDVtu~7;0#)0?B$L}gFe|SIc?TzgRn++6eif-C1=`EsMdV@-x3rPyN*HtbG0fs*%
zP3-$_2I%xFFC4|D5ATbd8(U;C@Te!XwMD#Ni5a!GNR>?W3$#Tn9ymOFbGQG9X~$|XbE<~h|}u(;4|q_{<92g{StzhWUJ
zK6kVB&NnqH)CS7@v!y1E^A7oG6oMvcreI`~S9VvAYm7!dkws!TUU27T0KitRsO&rL
zQf!0LZ;8JmJ_dOl->^wEr$whmmtL#b#%m!EWxZ{jfzkk~0^6&qdwK?2+1|9cywI2>
zERC;7Z*a_SfAp+@cphnAeOjTQuczRYN=OiQ^sKp^wR+5qOJP}~#cr~ON`Y@9F&2L2
zT~=Pw_MSiDqCl-qhSPkKs`BjGS9CV@!58@liRulnWlGI<^#cY5?zwR8Y@>4DH{sU?
zJF$|#S4|bWtsNOV`#~7;UX6KjPS9-=Ap!_}DSb5hsT;9jE?D^ma{l(QF=J0x7}CjT
zqe`qX+VJs<>2lWF&0CL?y(uwN-%6YQ>`&{6eB9eZWmYAKI>Ykml8{LG+Xk1;d|#$G
z&UmWkTRvrsp}Ou8Ip6ZAV9IC-@X6s5a_$yd&U-3>ZZ%)H4g9EeN8-{?g87zZ>?zc$
zkEk+K`R%*s?WTA1-foG^U5#o*#oJkWoyS&
zf7x1NcqHGP+)z7Qn+xkxE(JxMcs!dhNm8lU&o``mD`Ydru{+ldr#doWak36EAsuPe
z`-A^PJe$y7*Yr_h?LOWbjufHG+N#o0y5M~@JTFMFKd~b7YF5Jv5MvpQ8klC6z4ZU;
z*HE)dv#t$|-!GZnK~MV*2kUJ<<`KD1BN^+RRvOq~_0-Q>k8fs{?TZxKyqqe@!{8#F
zmz)kcR12T_ynaeMLZ+roaDvNfhU)AeN7jD2Rpeaf2j8C)DA;1rlSPX%3XE(J`F|Qz
z{YdL6{{`*fzhWD5-JG|7Q#`S(d~SF+t#wh6+TH-3v)X#7W!0_roT6fhULhLjQPT*W#|D1KRX%p8nx_U~yxR(Jri?zJM8wQA-l36cB)-pK{2F(U@;^MHRK#
z&A6!TL%W^2q7p4f-rupKFxI)fVvb|h(?qf8xoDczj?&$W_xJ4`9`I+JUp>>Qb$_+-
zd+Bn|arm9VhZKpmoDGBJvALxMA1;I_ZIb5sk&_3aW!D^+aNXWeXn}o2_1jzau}{cA
zGxYl<3#CF+uDLc~o{qB)=lgzf*ORkVSVfZ$KFeV?{mrGtuwuH?#xP?(q%CJr=8eYQS?1QCh7;T}WLgwZ|&yHyh
z9M#+)YQ518^HMOZRV?$!t~K*Lfck!}d}F<$XS#Bpzc^=sz)1bz&JAg*Dir&{wzFMe
zTDYUPFZaomcrkTtV)#Nr*d|RUa0`0IE?IG4wf5Z|_wzY~(%LJrPp0&jyBw>Eul_*=
z2(!A!4a9!+Q@Bl?ZMy&S;Xgd{*eMEI=_0Gmqtmx;tAxL_J@mVJS@IlLzq|R6XJj3n
zCWrY2wOBot{w7RRzczl~j7p+`G{fXEp~?IlhqTYj8dnOdoDNS~z0_fx<7lbZ<=iXX
zNYfmcT~x)^HjyYyP|rc~2Uf?hO4jt(eVIQ}#0BSjHp0W%Pd^12vs+5f`#%swz*Rd6
zM&DuVGXGs_-DDGomb9E%nWkHd@#Q$ByQR4-Nw
zLKZY8AYa04%+sFMU58wY^Nd0!m-vUrV-Wsf`FW-r#gjL)f?^z_*TJ4b`V+sUf_~b4
zm67|9)-b+C`~k8gSSKY|`cg(%;Zf4S@(!z~;HSna4gZ-=HRB|g@{3^ja*|AC75d`)
z?d;^A4|{pC;QJ(@^!TLtT(RR}73W~v;Q(#m^7V6tuJJVK{^-xO+akFPl`l-%6ATN$
zsvY;@;=h{y+}X|3XV)JY{dn^?;7wkZ-@(8_rC@N=C##57J)b#L#YAfF+=_SjW|6pa
zOvw`6^?a$IbGKfult_=IMy2bMqRKQY*CLjKDZSP0w!n|7-JZ$?X4?a~`ctr;uvnd=3T&G@-bPZ<4om4iq)4jkWnDZXw^_H9P&mReAP@L;
znG=CAUT;rY!Ef2dvKYsg^@D*Ib1Mjg_Pw=dy?btop-*-=r{#uJMXtG+TGPc!6hyv8
zk?ADrlYMr4#;k`DrnYmkyWH+13hjD3W7OEZQc;PSKN8LLvX?4lO;X2>CsWA6z;y&R7H|@Jz_vNH7eifGq@8tJB
zcdUFq`?uoT?~@aaeMh?vzQDac9?yJRsvTQgONyJ%k!*C2sWK9bx%d!Q~UfkGq6
zQuw@?HtBtR=|UJW6OVKKdjT?vtu~u`-JgtAujlVid&@spVT_xIoy=J(^&XmRFL5zW
zPwd6h6~-^2)7^+PGM2cY!K_XH7!SjeF5|wHOy=z|N0>a4CpQJjH)Kg%_atvi!fWOo
zJ7K$RaVy1W!)Vd5d3M1+!fnhQJE4tvX`ZGF!`0`FsQ1gU9=ZPKy8zpu?`$z`3d!qX
zUFrg}(!L#(I>P?2i@@F0{J>|ciq}tP;ao|HH=zgg$rc6kLF-9r)@M4O8J#75qGyZ0
z9PC}srr(H17Qx;O?XB5z>4T)(r`34iMsxQ0$!@-b!N2@Rv_^7Rg0mjEU%wQ|Yo^^I
zJ!H&3lb;_PEi|N`Mh%JuyZdW~Alm#qXKkFkcze_#G4MA^Da|OiDS6dDJ5>IygXKL3
zi@7TT1rXv;qKa<}Ir0w7k5G=fMfFtydkexb_FtR%8FqMW9Ox{KZZA{)+IL+ust=`P
zx^*dIq)$&b0v4|G*k61>FC7(P{xk<2o2F^))aFj6Ki4Vf=`c2N$g$jFf7A{umUV}C
zIF{AN)OD_{ckHgy$-4EXaLg~9n}M5b{q2s@E&nV_G)}a`K6YPz4~p_VKXw#|Ea9N(&sZ*D!vbOu!|TM>u;E(E#zc*ifxPWF7hT}?99tac;yws3>z
z6qS-1EZ?5>C6UtG6StKafw94!Oa~V-Z5kjnQPWWQqSEN<_OP_FOA{0Ce3P=o>&)uS
zhiu5d#;e!U!iR_FjfJV75mJ}VXX$iNgEbL&+MoxLR>I;bk`=^cYq@yZB~AQ@H@TLP
z;dZs(gy+rnRl3(P8t+1t19|c~8Hjp9gTyDTjMrv;@+`I8EOpG*=f@Y1CVJmL{a~Co
zz3Wz$D%mC@d3a!R$_Kf8r*?>W!1xm_$G@T$h|a~{TMYLM|Mo*LUeagFbq(CBcnzb7bUAKhE3c@K}N7JChovThV3=5=tKH>RbX!nuI~7JcREKY
zV`m;Q^>L85Ls^cGc69~XZc&*MD;4$j&434xntORy6{-W$WW?;tns*YB(Eq>zgh%aS
zkZ;?F99~Nv31>}{1~iV;KI1U_W(2Mnn4(wynk`nb6});S1a}YL+-o;*XBU4(aM=FV
z+!;5x*mZN>9+{t(C%B5`D7#(-nE4#G#A@}NDU>h-K8ie$|2)~f0>?Xa8*%He@E;KH
zs`1inCsR`~ax-#Mssq4N-8C%24@{aV9rj{ViTwp00A(!-J@s=|STG9+o1%J8g<13b
zRdXseB^{XuPZ^ospNq%RBU$#2WJqH_?W8=F;^w;vbJcL1yVo&UPAKDzGU4f-sh6}E
zYxA^Q?dbZpp;*WJjJ|yUZj+@Z(i`V$_J&19c0r=xz$V=xo?D$qI;DCzxj{Me1&VHNzFZOa{*@T`OU%~Ql;6S8tAafmsxX}T1{xwxbu@wEQ7wFz#1j22@Bcx
zwHv_zwna5%lhtvDQyoG}B~nk*Y3v7Z5c45f2f^bJcG(z0mx06vx6l|CRzlap9h~&Z
zG+U4P=u4x%moIkbMLNJ4aD#^Y?#GQ=oO5-&!USS(hLew;h}Z3J%ls5?fgN3jth_1g
z5YR_R>;}CI%{#|#+_|LVNe7(J9dA!`(?(w5%T>PYhdtIoO+DP1B2Ld
zN336maKwE@&G_0x;$du)bj}(GtI$FCX|=Qz{U?Peym^dPZ{PWewNz3Qe?!J4p365H
z7d1w%MlkJk_N+du==rSaL0`a6#a$Q9-5=?ViBI1-)^AEDRg-t=?eTqyE38Zv&sy}t1G>-0aQq+vDqoK^tP}f&
z_C0ECZ}IU-ObUB$6Mdo2|Io}Og|p1N?5o(>qHbUVC*wL@xl4x`!uF!m|6AR`-_Uo3
ze<;65clK%5yLvLSJ>O^62?N~kKewZM=Qrv-*83JEFb7h8Kp84K7QePrtl*FqFFHx7
zIIp{pulFlNo@zPE1r@bO6~~hY>?o94{99aNn>aoWzeyG@oiDf287AtPcKX8k_2AuL
z>ZwcbBb)6=Q=0%fMT%|{T;FT!&-f!#l`c|2ePP%8UAgfoG16zdOWv$Pl4I3wpZ82d
z`UlubvVXDc@K0xO?Y=j}Ff=X*c=oqy!t3}@{DUBl%l)HG;;OTB{GYyxFVYQ1
zr^YkC@@Hm#7FNmjEqy^tAPRL?q(a*-JHiEO-0dW{>IB8ao6k_+W&Kn{-O{w2cu0c1
z$e*~TG#Q>RSp11eEBJjsC5Hd8NSiNqYl`2@`T7iELe}R$_^!*R^i!=Xp)v5;FdG+V
zg-VVuYZ=6|BS$KG-Y22RttEpc_0rRo;zaw?hAkk%gn~8Qi-nT0!qyS-AZ9F+|J^9Y
z9TfA!_WEX3U$;Ig$jRZLoab?j;_?J;a%<3s2?q0&p8Y~0Wd5kUR{nCp!5FKl4^vB1
zAnxDO5LF(zm7oNY9q9o?wn-_ogTd%FWLal{?e6)4rPtcG)HOUVO={mt|3NpD$`XQalE-WI|9d>YH!n_Wc9Ll65{
zKfAni-|QxRqb+gX-oT$Z>T%uZx0yp&`agZ)2)%C{XWB&USWlQ
zwTp1nk5}@+wae_0qkc{tR|t{!0TG{`!#yQy`lQ_795*kTtyy0Sz255%IH^6ftC!zu
z;xq5r-qCJQ1?RQiNIDL0xH6zEDDlcS*YYeC71k~?i4z1cZK*aQP1%pE#|-;E8`Uov
zbBxzp31FI0O^WLtC0Z{#n`5d-oWXt^tFt5>#^s*odXqUph|L~u=lUMgT|9A^DZ4wJ
zX6a%9<3*n**0js@r6^!pYX;=i{Ojposh#*JoR46vf2_#X_00L|=|-gzIOkV%vbz;>
zYU)BZ?4`8&eY+a9K81(=C(@sH$7(g_;k(BEr}Nvs{l2pghMlbn2H)>*z38n(*k8{o<)BeR96hJ+hs0Q&
z*V1(LKf=f@I??Bd+qAfdG-;rcj*9}HQP5-iuq{G<9{fr5Zm1_`Y6(@;TP@fGA{8DF
zBx+)6G^nCz>UbyQ3#rgZs@=(-$Z&828|5(6*S>9o(j59*FaaDN0Qi!MoIc9BcDyv*
z`;%JPRq6d4X2+doK`pZn0iSJl98Gr$Pp2loO=8~2()?~qVJa5v&5_FaTBa^WJjW`L
zpCtPv`eJfpzDd1LqcF?siIgx0vp-_xDgG~+>!-16N7L>}>b(qV@_gGNV}0jOZ>~iC
zJk_>kpmCLEMf-j{IP&iutrRNpky8rU!yvTYg@vWPIrnkZ;|@{x0_u%qxIntM)yi2U
zc5NJ%94QMe!FsO6d<_2Qdzs0o2_s=6?f%iShYdc)C1n+fi!F9*-zya{ZO(RnR7`HQ
zn8@GH6s#2WE=yTYb=AL?)X;NE==w*vNYX5*L~01?Yrvi;ImM!s<_@YpBrdn@C%Fs;
znW3I`rtO5vrS&}T;fKbq$hLH6eMoVWh`QMk72qeailtu*tWf-zF>{@cTbyb5p8KWc
z-D1I(3A&ZmXD>fnLD#v(kbZIf)A@ZV^v~A=!LkSs0py;b?{
z>kgF{zFA8<3nP*p69y=K<=fr5xCA>Tp(-$Mu~D%j8&WGNlP-zxv%bJ2xn;mB)6usq
z6Q1Y?9O@D|;&fhl=|Qo0Dl3xy@=F^k<#YQ$pab)WU?lYo%O8)<+3|_cHttr22r(Lx
z7o~~{Vp1JepsDD=S~k2?&XW9^=|m3xCAYsu^Q8y3D>ciOHYD`~s^^l+0eP>*T#C$w
zhztvTv7d)=9Z
z`W!2W@v2t3qFU3x2(
zI@~@21o>Q^7jFBX*1t)0vBMhZqODCttsfhK1?KhEO(PkC`2-y4Tz%9qowHW)&P+LI
zC)AD!$6k>ryJq11QrDnBVhwmbSnD9MZo%o*yD}=<;Nbeecl;nDzHB#QCxu+Y^21RI
ze0_J}iFerrE+ZtX<7>I``}6wmn*cX2MZ0VEiSuXP$ePA@)pxLK8z*dc4~O_IU$SYw
zW_q33F=FT!azsjj6_N_#K(PIonr&+3VJ8(d!F##w?}y
z=KF^}hgQL#kA&o#qK8Lx{ppk4DA~zH%aIJOrWnQ+EaD)K^(*=PjIGw#Q!L}M=c1_V
zW{$|(I7@s5!W4;&@1>O)DJgsFX%%Jiv`ISE!gKNcPWx5BYAe8}SBz{gk5xCMMz{3~
zFUN|KzI21LIwl|p*^>LEI~UPO;-C~2zMR14W$01l!fXks^wWH0#?Mqf`z|u-8EbCt
zS7rC>&I?hIr+-uIl8l8!>3ws1RPS?f{7#y7Kem>c3QP3)s0ja%l2Q@VK)DI(zuY3X
zy4fA;INQjRT8E^;uwt!zh&k_zObd3&qot?T?{ye6_8rwl-a?AoOuD|WmPszN&is^p
zyn`Idx2F4E4kt|ck_k7f{e2n4^*ieGRn>b-Zc@H=#JVh*T*>Wk(G^pz#yim0s11#eg*Tq9$l3;IX-#DN4G(;2gM5^JBuejrL${?l5rld9Bm!%Hqz!
zBX6b(T|i|BD6RE9I@D6s&!O~&-lCDsaQim1V_{K~=9*qZY}W<+`ROwEtd6^mw6ak%
zjn6!A4!Csv(x&8np2fJXP{lopWH=6G3_hFqR6d)PmJnjFl1GW)7<#>OBz>4b=^CD*FRre
zr`WE08B4JWhy+Fd@hxG;LBE^k?9E*ka?fi~-E*<@A{^Z~hn{
zWsz*upK0>AscDY1wH2^7G^-wkt#dYwskoSMWuxHgdZEQh7YwyjMLQ*QmmvXDVX4>e
zLpJUSgCzw-@5a>W(rU;iRd=yj&y(C-x#5^4=e-+P@&X62>E6Q~((0WYE>;C%cdmnj|rx9HSk9o-WQ5A!;S3ddZJ^~
zis#cD-<^L@Dngt&fY`%NRH6%)XsMv3*;%!4Jf}^fWu~4H_b^@^Vx^?8Nbqy}cvQj23{TtSZ5d8bC4^0^mA^_$fuHS--@}KA4sfOp)*{;Z
zr@j~_-m~Q>)8zAmj#c*)oUDVuP+cM0$Y|~M&1++rKG
zvb3lvPq}p#{uRvq_Gjr}Tsc2&knNMC&J2p3efw0pn4)6^h6K||*IcIps&-WFwTQAnMW0cs`ww
z7rPW-pIvIoW+pq}gQmHWdTM5yO9r-oV$MZ8qHF~{(1tpQ6;NMfpOEd-l#|3BuuGQ?
zkUqqjr0xrGr)OE2D{sUF)`Zm$)DyH%Rg2CxX7RLa<2nb9L!a&($*Jv%fr+qwN6jCd
zeNEt$3d6eEfDyiV@%*KHUDbhw6LMT)?amCsT_&%D4eOR++0HUEft@ZAp`dhpv)8_1
zOlVV>4jjj=4cZ52t`}ZA0}arr0!#bvc-^&0QH3tC?Kz(43A2`9c`j9riJLr;p0alj
zbJQ1-7vrxPz|<^l>RS!!by_DH7n!^sOtBpC?GY`!##I%G7{IkK95s}
z8=CT_=JKnI7`k)BvV5PZs}yvlb$pX6YdFPN!_*{I8ROP=ji7Cpj-*a&%7AM~OR^j_
zU-|^8z*o0imOY{TUoU$9CLO)uaCe~eRBV~kQloO;_Bh*rzm00M^O{aPOIL+kWFBQ&
zw7Sr_@~+`$y9@Dbhwli4(Rc6mTLm6JGMdNQ2l#5&h&>MwKs+HjH2Gkpky)ti{z!zD
zvGTV>>c#G#fl&JvY|YtA?i<+Na>(dCiQ|JhWs@T#ijQJoyNdU*io@(0Q%+EHexdA`
z-opjUkVX|Qvw{lEMbUn&wV%^r>k=R1hbaaVXJDBY?>9+A5DB+Z#WYS2YiS??J$oCP
zRj{4p+-b5k0HPVs-Ch
z*Uks~jV|)@Yd2O%FfSWD5fHxjmMiL^;naxWNIdy`J0Z;S;o0?XhU(9nswPu`EZzIA
zaBV2@Zh?)h4!@CF$X8HgLmM(gO-Wh#6kpM>^r9s7`2H_B=|a~dwG%6B
zoIk-B{RfQzt9uvSwnF5~O^l#zE9MfewCY>o5xyXkBjuci1GkHZ?PJOd%cLGrl}plZ
zp0RLE`kuNPIV74@Mg^wgM%U-$EJQ_r*uUnx`Cz=~aU(aA`5pwCAI7M=E=-AS3NhwQ
z%js9n!nC~?Oj-yZYxnNLQ*xaBQ5>ek;MEj)7>zoSFH4OB*gw?(mG|U{d;PSj*JPGy
zEyre?8#8I{f?#2)O4+tHVb(@7>>UN^Mnp
zuoES&Ssi()=dUOatGGk7AB1g$0wo$QcYhgoqR-H^{8wCGT&|Vj%BR=|X7i7l6h&lY}y)00}%@HJi11V2W>j}q5<`0TvCS5efSdnA7Br4%0k!@y=0&2i$ii$9Bu
zc;UPC$|DXjp6DTG5)X>OCU{d(#8=!W-V5+)yr!>XqFb~sLR5hFG&`w8RO4Te2kG&)(7#hLLtq&a9K!HK(qz)RK3Knp4h&Xep97wpZ_v(-*LJ?%U0$Ss{why%xc=)H5K(I*^nTe<5sLW(|dA(1U)ei
zA#JK{%ZXSOS7SSwx(C9$`m5fbp=G&CJh{gM#WOVGcG?G6N
zB`vLhPcTe>e>bVEBe_ttG!q+?+0md8J~ES6W+zOGi`)oVbb5p>60&NJbXO|tLLW<)
zCCtz4+`wnYWIWmWmW_pEW4ey>;f_9;by{<8TmVqhYn{gYjZSK0aLF-}yDCxT
zhELCclBIT7E9!%{@3PTQJHva9MOmsit6)NlKafWol!0lKmzamw_IlhCY~-{Tg;
zHSCb{b7ZaL3L);Cpv9{G*+G!UV`^TX=Ik27W_iBg7kUe9RmJA>WmvSOd65H;d#J*6
zNs^DPC0&4TKKkdb_-NmREYqs@e%StW`$}zaNqjr)`utKSErsu+*|n
zW5%6&yxD=2F3T&6BW~bfi^^$Vm{{r>6M4n5;eXun&Zk~PUzZ^uVVv9iI?I1w#CB};+iAs?QcXki83BNK?o}*D{~a&d-<8m&Jr}HrPYqAaY_}i%H-l`U*>YQGR+|t
zOi33_Hge#S;+a}9PhpsRx*w&SJx@6kn#zaAxQQuo
z6{>EZch`CGZQ}`aEqA}La(-zYC|K2Vod9cU_%s|WC_@brzQYL+-KV(4XXPO1#Ebk#
z_eMzkoOs3r^IU}%-nL>DND@tuhxN-mFXLWxfRwyHL!e9A6pJ2o-F8nZdfjAp#1TF#
zt?q_DO}kHJj&HuClPwkwEIU`AXnsg&8%kBYvDnWkLf$WY_t4l5y|U}cBls*=D)8W#
zYu>Ihu)+P2fRmJ~U6c94Y3q&}`SWMwv}4NSQuiy&s0m5};U>71HmTGG+YWwLzUn`&
zw4sZg$>S28`Iw8MJAm~H);l||@xr;k&OlfN|H7;2_D#sw93yv;uFVpyT`N+wR?cm4
zQRz3_-!|G0vdFes;5y-Xqn9Hz$eLeg$Wq&4M`&SZXq5f=&C*QuK~dMYQNd6d=`elO
zy=7?T7c2Jwzhd5om99E2&|IE_gmZh53s%J~ESX^EXs;Tbdo(8V!NF{6Z5^5t;5&j?
zt~IlfeivU8SU#MwoSEBh;EO9VpIj)9j82fUr5N_26$NKFfHY~|m@Nu3Et%~JvcJ@I
zVQWt`8`$s~t4uxV#kq(>z6^q7z4Skfo&bt7*9DjmxaMJLxqTFocC93g}6auswbB41vR^(lbNBNl&QA>
z+vh6}ztJ?fADX9V1)0vhgp(b@3m$x
z63@7UjCE~9S!c`&D1J(|+PFd#EFNlQFot`%tWG~!_`Uu*7aMg;=hvh&Li=7ric^_yD>zmO0@5_L9)MSwX3MAezSJk2H#@=;5oDs<
z`czhhKEje?7kr0{u*2F8@WpP%M1>kgnA?Jnb}_ZiC_)kp&itij3S+zj-es4LCM$x*
z@b8o6Huqsh*q6HZ7)7%g=JW%NlPq;O#skdo2Y9lR256n`U&!+J-UR2@Nv!U-lkOa;
zF%u>0dVTC4lscRo%zP4;e&7G(CrRpnm=juzdFtZ%B^XJl$oW}x
z-j6(MW_~sl5UueWxTuBsPi|pEJCZEp|C3KB9E8K+HjwAtpK2f%x|wy6>tC5sk+yDY
z2HyN@+fS&-xWT(ytS6)eJ;E7gf|h|YiEnvcP8fV8qt_CW-(vIKTY<@tUprQYv7o{+
zn0Nq@;+;Nt*h4LaPg_~!!=HcB^$o;}N?2FDoge2>XJ8&zsBS0B{P4+0+1Q4+Gkfda
zu9xtzTj0xF1bf0kaV&xmHg_>=@t5oRbcR&0M`PAHzUuIL4NzSnp2+`&o{Jx(x|s
z-o1+Cy(?2YawS+X3ow2bb4yRBV(SG20+Y8OoV3L~nVSAA5y7-j38XSn!kMxZ4}bI5>4Ee~y5X=yM%K3X7?@B8D5esV?vurufi*?qxxgxwlE4
zS!mskr>$mnb8}E_9aLJ|kTWxf*FPjPk+%+-qzd1)l{n6!lP7>XmW5_O6D(25CDU!u
z^6HhXCeCHWe-H#yEE*U(31k_qIxNj=E~{{t7D{#$|HknO(DtX6w&c6!U~uja)Fj-y
zu{dKO%l^aDJKR(SMjUecRu`I(JlFE2#J_Im%Zk^+$Ux6P(;+
zfrSkbNpRx0F4(cP0(8CrkPNUvl8|tMl);h*NUd#0Zb@e5{yC&g)S+GJ0lIkaR?Byo
zgaa9^7HbqNgbmGsW+!A#$#m@!4|~5NFH}}JRxiTPl@kSyan6mgAiq;7&nik0lBY;2
zB5ZB#YGis|N5O940SRzCYa8g8GH7fO^VY5eMWaf{L-d`KBmR+?t>dOLJ9U?umKqK2
zWiKJwFqd5^a^LYYVI_i@RQF7Y2ohgx%Yyu~_dv^cOcDG_bFYyDCeuf_8Ac_|-j%Kc
zspr6Zu8ReNsQbysDiND@6`tY2v57ExMJbbh+RwB^CfHkABp-|>;jb`Ujy?REtzOy$
zN~FX{h9QE7Z?c6C(c8CXP!G}N{}v7hjLCD*q<+M+>j|LZQbA()KrnhQBuQY29{%qi
zq#HiaFUi9az+1LJ5Y}m`CCK7z`!!gqn7N+EtmroPg_7p^FZGE_dh(U_lv$XzHT$Bz&S-}Re48R
zJR~wk$KZ1IWUF*vN*o)1w|nEA?=S^wtc6D2(IXljI864K7P{19>lGb9LWtaIMM_bn
zkQH0PD750SGGoh9n_hqKpidQqR0e&ER=6YrJ3`3g0R;F^yh0nA3x~+iv8eCNUo+noGjtKcj&E9ye&hjL>6vWrnG@41*F$fV%mo0y^zXS!VEklQ#SmW><78v
zL#j%_fmj#~t|nGk@-=5vrBwS^pfyAhK?b&l!63}R5i7t$5xF1t
zD92y39CF0fq!2ktGuX&}J3>eL?`x5k#FRRt<0@@0*oWc|$q=Ui40Y6w#Fb;o6-#P&
zT*>|vz#|c0j6xnO?1SMpk_Ux~qmFX@RecEikhT+OI*Wrmos|8+TnH)V7C({}!u@bl
z`DQ<=85tdNdV<;J^rApz(SNP8Cj)>!q@*)n;FBS*qi(}5hPfg2$vlLq@{5-RZKozILmG>lBzEpzcBFqTWsdd)X
z`f*%Rm*2ir%Mi(N&qYMd;YfEJHxNKc#r_%9P>hzA)@wGd7FzRW$ZMbu=pSA>Zq^RV
zB7q7YIBD@A=h&X@LxyX;=D3QqZ701tJpri=hlEFKDXbBmB8F7;J`5D^VbsD0uq<(#
zuaQG@=sBoM|BJv&iHM5Ctl4^M7ojOKw#
z5bDlk1XU!QIzoneUdD$!`W6Obz!0%#;EguX+(Q51MN2D(U~oVX;2$3MCbdPvxQ?w)
z%pJpEA(%l&&bxjgq-8kAzZQ;7mMH!ksdu4NjME*R3N87^T-Z6Vm#xdwF!OC}pC+W*Y0NuW#j+oVt{|392*?ax%ClO23?yaNqrz+<)
z|46o?ZQg8gs)ad?cZ*x{BH=
zzWq=089QaT!ZzG?^R5?2Y(Ug@=>97FUtf=R%fCWGQCPQm@bTX*weF_UvA
z;#7uOo)C~@y-IUSt#Atnu3Dw*Ia7IM1mOdYJ_gJX5JiB&95nSd07$&rt2Q7_ys{0m
zB1iZjRCq45XWMv+Jzer33Pyft{ab$=VxM4ba{|F(Fta0h;Bt)FKw>)o78D|7Yxk9t
zcGzaF|86um?^HOCz2(4w2F4FADHu31z2=Z%B;roVQnC!$+kRZMP^^sdFc<#%3%ptm
zc7Dt)sUnPDA>lu{YaHN29=FfTxeo~YrhJ2n`-+hGo+XnrAo?JlTA%!mFXKtI2%$8W
zCQ`uLXnO|aAqO&*llChF9X@~yJ10kl_Y0F9rN**-D*$%mY5c~s-nZE&+LD-oCG$b&
zb?z1K&Z^4W33Afl2y7)%*2LvGX)ePD5nCk>wf-B~(gWLHFRP>4NgPYVRsM5Hs6@OG
zZ$HP)H9Hz^!;&gJz*Ld(4T<`$1(QHU^>UY9g%*k-x+b`3Nf68&`_@TN=-dF7hD?@l
zH;jvUibY2r;`_R)sFZ;1ea~LYRi?rj09`4Z8y}$zV`qT)%9-AUsCSI`_`|RkPL3Z3
z8JI5{0!&PRfiM_-6P>pg^mMF0b=>+dF%-_NBq_g1GI6!*xPw9?ui~d{qx$L(B9id{MGUKsA3DUHHT{XkkN{zG%m61XFtbfG
z26G{Ye{n8DG3xp$?Gln$FlX&801?T1&q@i4jS!b
zjoc5@u!C@?oyS+TJC%LVn$b^3C96Xo$j>SED>75)K&NN!WpOKhDOGkR&P`ARl118KRY=
z$=0d$U~H{mu#F7<74!RVy3kv@n(z@wr`yQB4mVp8IVu*WV|m9Ni*#JwaWZtYWJzn{
z^Kvvc18K%^=IbncE^pM|2Lq=xEa(LC;fLr%?gv@}4>oB}#uOHr%ZDye4>QPFh}2#q
zVbD4fnTI5bV2*t_nB#Zc-QE){6kzvp0563I1G#&i9Ukk~LXsysUP(j`-LgcK45XMq
z_fw1>16j#YX?t??H!}3{{i>33FVM)poJ6{SyWMzfwtZX>*eC!&6l!Z%1&oxkW#8d0
zLGA;FhYd0Pv~eL9GXME784?+;Ktuv(Nk~ZB#HbFNTe}T*=e_{EjU=zfFu#ruQPWaj
z+K+E*_mh;fWl2a&{4cc%jK&uXVIGyDP6dNu
z5(#O@Xsy@G;lpjT{EsWa((z$x)mLdq3!IcMlI&Pzh9je8Fp
zZJUHIgKiLxAhWq*55EGgN=e>9J&Q0a03!!{eF)z<C>O7Vn^n4V
zZW>>o&wp?vB;k8s8$7{i4ACLc
zz*}F8oF#St0MRf_yO@1W1oMC{dZK(U_Ls@bEo2SjrBXX}8TB
ze_!tEWAYo?DC*Cqe>r4e)C!mMTGX*pn?$haJ1Pug-dE+1E)lQMWM)(bj2~
zk&f*ktx?-XsZHX-$1KD0cS^7BT>v!|4Hr`-}Qa
z|7!ViF+|&KQgQXNU$LN2F)d8pzhjiStM^G2r39kozy8DfwrR@}FCQI}bIK6$_vu|^
zeP<=sRE41GxDP{fch^R=N&^uhbRnCqq~pph8k5Id*`5cu?xojG?FT=^eTBo
z;8uxd>xx<_-sZZ-&G)DD%z3Zm(HcN!{XVQyXAZ~i^FP4~{(r%W_3j#Bg5d~(X9m;9
zwbl>r&6Q0b%^8B%{RuxgA>Sb|>^`Qd$6w)Vucqal6m&yBSFW+$mohh!k?pj3#JbGy
z@_mePc4s7a+^C<{JxJSV>r9W3Ify(9Ci$S-j$kl)_J*G^OOlyo$R@rmU2u;+*Huu*
zaHGk58>s+UoI5A)wt?wCoF8w*)5>+D6=;w4maiyW%QYePe-K1Q?d33$^Ms>kh3b;*
zlIl^g|7WFFOdis_?<=2#G;N34@ED%ncl!E0i#|*6wzMXChzv7DGH&D^&Uhr5^p;<#
zrn41mu%@`$-|%(p(Mkep>p|v9%a9zy2QTG?FMB+%Cd(jCMsmCh@>PZmLm6G@j6v}e
zqRg&bZmzMG>jBlv+fC`@=5&7aQu>Xx^hAEWNb|N*;r@@!?e9>onq(xbZ|-$8a+`ds
z+u(hDNkQ6d^6p5kNBG^g!uM4b^f1)2+~-e>$x0?=xq=JQy^IH!i6;9?qHid_W%UU0
zgQTxIvO$+$>WE_5yy#U29A7uNJ-h;zI831%L-&6uIul1G+&_-bnIm$aC6se+jZ{jm
z%9U$ln=xa^MvkoTRR}qgVmT`}o6UyJ6}jas#}0FptAnYpioTKh`TY^k^SnQw_xt^N
zOmwmw-iW-%+=iVx%ouEfE8<$N9mMc{^=Z9+;Cx4gkSEW4dunehioPcnFZ4sU;X1Sj
zQMaK~b5v^l4pzU}?xhav7RCLoSD?(CAng%m8@@D{YF38tOwJ4|@Mh$4>z{d^PEp-0
zFgoJ!MGLmab*vNfKC8W$@%4m!&WobZpKB1)$0E^jq3=PjrsmtIDGj4dCnt^s
zKl2Dscjv%i&IBi|&L!n2^1$mxm$HvM`Chflj(9SjW_0B_!(E>l;owWT7ibq&C)T!Y
zYSr-MNyZ0%rD3f}TXW;IO&<93h&ilXb
zNHEGH8b9fC&hr=CnbM9X_QT%AcP5{-}i}mOb39
zsem3@4l}6Mzf?`o+KE|aR@u1bT*JSe=6GMZ?M|n=&zkf@t*UaBC$Xy0l0%s06k=S@
z-c#adnUPaG>;|}?QlCFkqu>bM)j0Iu0*b!t8sGBa-cc9D>un?+^mP?+*Ajd)+TPN}h~Tpi(iCF$BZd-q9K0@DB
zJgE=+vle<&=j6Ozm&WQy$3~@=IYSf=Dk}E_-<(5oUmOX%&Q&loJ{g!!Zw3rW9bKu~
z_Kr|Uv^sks+ozC6T2l(C|LmUt&edRlxfLywskaJ3-U?2AFg^>#$*A$OMt~PfmqjMe
z)@52K@A%|?UT7pEJOh&>FDzIu)dZ`_tQmpN8~GK|)5E=g5_)OSXMx5q_4O)h3~L&0
zGGol3C=#4MzugH$%*tnK-^E3!hBk`oaCCIpSG+nX(6^Isg>J0Ll`4Y@m$}IO+(1C2Y@s0R9MA)d=3MRD@s|3oi
zpn~vyY+Ex0JqtM@xK>_jXIkb=sTq&lU}-@m>WsEgyq)FQ^s5r>um5@Yue8GZ##pc{
zH57RI29x(rBD+c|6QgY-?XECeMW69RJfM;xG32LS$K7ddg-j_?N7x7$pT28)rO`k2
zS`_{GL$i-^0E6E}`Q$;XvBC5ov?4~hN2DaCVi~-%gq1b6Zk_ETwhgyQel*=bEyJOGdG1NdFGsawzrjJR-RL_ggO!%K;87tw@8?#4qYpmw1tJB1eWyzWtG~Opf
zX&c3`?FLR;v*j+Ww$t#8mT#Cu`#D9#Q=P7-#DLfMkWF^4Hu8vu_Al2NA@q-OWv3Qc
zr&T|t%6&OU$f>QNQ-i4J=7RJwwGgXp`ZhD#g}-@v@`$NL5MMgnNO3f*g(*W9#fH{PLoa=IsVBXTn$1DU&~1g6vPa8G2P~kDS~0G>UZ>c1Aw)w%`wP$BFB+`)_bggnNP(
z8{sF;co#6;7h`$_oLem;y9$r~ned5s6r&pY%S{K7U(0#Kq9crv3gq_<`W}Xt9$6#=
zsrU5~II7-C(uG&0OOIKMf8en|MoE_jTj&KFsf8$v7b?Xb{jQ*=DVVZrmgsojLikOm
zc@$6jRi2cOhhqWMqGzh%HTKDppoN&kke(`KGWL^#bghQCm{)!?C;!sHDNFKKu0j
zNS+cvhtk(OtqNus-maWzl|K8KG+&YFgMS?B&`Cxyx0wZu-O-tS-4U04eA7GiE!A4o
zOYc@s2!xwm@-y|%YTm);O?qnU-ZRerQQ6yMxq0LW>H4{QGnkIlfxm#`*%)!X&h*Zk
zQe}?@&o-R}cu%he>+6^3;=>z{9n=L+FN$fmw3y>McKG~LvtBx5pAt&|=#FGzgUai9
z369B?@KL`YFVs&j)`DiNSaw$lLP=HSQqWl+Y|r;XtY++s*w=QJ{d%We)DxVB1Z78b
z0XC0(;u-
z8Us-b$}fkOfCOzC@3?s!Qd>;fp|qOXGZR_AD+eRu{4!6m0?exIZ-zkpPxaLnK{%7j$4=HPE2Np3<7RE7xC
z6dV)K>t1xIph}fE=0RhrjTs1kwCh4$a?iId`v8~fjlEkP`72N08w~*qWWBqqM=beJ
zf?xEXLD!66Gowxr2}(+m{7Vsjqrzs10|n_<0;Mu8mVHIp$G(*%SpvoCpu^2_L7Ae0)Zl=gae7Dd?9nO@V?eQrd)oGx$-z=D9}+
zcUjppTp9=7OhSFR(|)loV(-5kUyYa#8fI1BxmArp!^)sFGMEt>6X|J{^r0ZMw<|RR
z@w4^Z$$S!zpNd9k#Hw+Iz;ix}67iX+g$F34r?exwIcy=Bbsv!=VyqDojOg;R+4M7x
zY1eOziPVb@s`FBLb4|Z7sHS2A8xu!=r4l-mIfN=Fqd;5zn~>~R)1<+3y&AeqhbxL!
zuYcAHr>0CIOQX*!xK-3AS|z?wNI~6B4ANH6qN_Y5r7oZw0*71=@NT8C3sVeG=KHX)4n1{Y=|KI$Kakxq>dzmbDZtZDgEH
z7-tyDG)TXfWTlK_A|=)Yea|0BUvm<>--tn%nox6o9aA*))&YMVAlW513s&2f_FTF%fPh6I(e=V*wzCC9xmiS1X&wF_)T8~Tj;k)78pmV%(maa`44$9d?
z$AgJli4LAjpI=8_?RjNJs-tAt3!PE_k-ip7?*>T)}h>?VicPc;?b#%?bZLGbUQUE76jn(-V
zl=3;=z~12)se29vs|HJ<;~s9DDII3$2wb_ux~pPR1%chk-d+Jg?|Toi)J2@O-y08L
zgfa&P*{ab&qW+RT3STlWP<@1cN+_fBGyR1Ed@$+I{xywA`bUaX$sL*^ca+*6^jeT)uIoW{$TtKnH
zkHe|2_e%WEH5Qz_tqMH&$)K741yE(vVx|gq;lO1D23Qn6=p!K4eZuVrn76D)Y*N9`P_!Nqc3ad?}TpatdwXE
zbEfHxtsMY|@k5Pqa6K~w^Yf(lC@IfBQUNV^##HN`w>5oV%1CIZYq$&m<
z?MZRa*9HG_v3UM5kW$KB%f5sEqM+bW3M;rit6Df~YR6K|T+%mF(C!dvF!KBt(+(uC
z|CLbc9;(b0y)Lemt+*n=5J5#)TtPpJ(uj|FbhAFAlxp$#ve89Vs*_v;CWS9wDMX`V
zL)b}nK#01j$W=S5g0;s5Q0WZHViRPtZ{R3H`(q6cq_H_sX$sJauP!Rd{s#;M^)*3A
zAnH?Az0Rgp!Di&KhO6hMyUYNarC6pLNenMm7Nj~ADUG*j40~jY^EWe76sA2B7$;>Hcp)D=
zHpvw5XU;JQ@mM23GkGnt>IZGdPfFA+a|Q~*k?`UYZ07GjgG1(HMWgT
zvQ4R0cM6LKmzlli{;jB&t_P9V2`6uMjhY#Gd<&JZAHW?o}9v;P8M{5>*3UJaA>
z=^3>}Xn^Jp(sZUUG$g+bbV~TOkP&(|OK{Q7Wzg<5OO>QhtW-sR){1&z*RUO7tAl4s
z2e?5E?``_|vQhN3;#G}2Y~wvQu@3KRm=AQ1*JQK|fAOatq{6uHi`%ZIFkXE-tvN&G
zqyB8+p-4^HUQ@{^)s}&XAbW5zkW%_x>LoXM|q)tRkS`kTH&Y7f+SDkqz6?z
zu}?_nM-MVdZ{H`XmH)*mWDH+Wr%#r(-abtVj6Z)CTF0HTh4tGrUByI4Np_UEP1)3L
z`fMsxiLS4deASwoIN_j>dHE8{coS%N{Y2<*=6V2d)~qY;Vt0uPUZv~$aIl-_HdTLi
zDg4a5$6&cHv;q>Q3cYF8?50pHtfO#&UA%K5IYeT1LqC+UMuQ@X?}?RZeglbAF^$1i
zz0g5|j8{adsafdJ`7<&%B}$!kI)-yVkqz0jbAJKfml|duWy_pIEmZe+FA#(4a1zm`
zpC6nWdI#%Xoh7om)qL_IIy^hFC}?Z;kBc@AtjV=OE*!`bYsK?O*(xNJ|$B&k7%
zcm&a5i3(Q1dUn2H8!fz~0Rkf+UReW&|s^6SfJ+PT+w*H=pTPb8M=o*QpkwDyVn
zIq0d>Q{7vUJUiF;Ys;PPeBCFO{+RhQ^nTDAvg6Kk$AK@2Vx@Xmz{9*BbF2559WzXLhO?4kRf-Q#uH
z;xB^ly!i}|YIbQ3*Bf>J%nbb;?+9#VV6zCW4*SRj+?j^f0LJD!kI?q*oMgel8rbAz
zR6|&R4h@CXQ5LVpSd00ANB1WYFLu1*1r~n9!&rVb?v_qkLX`sDMO4`&0I$do(V_)A
zBRVg8e5uin8zI%wG1xD!p(pZBVox+cQ}%w#{sYQe`0Kmv}5rDU?Ss^r=-0FX0Sw8SvPzhL6eqiKwohsoy
zNfm$>@S9I~v0e8pd13PgMVJdBay(y5;gW$?niK0lIpb5`@Kfx=Ad%fmKJ;2adsZl2
zKzopAE&Mw9ZZ&z;gMFBX=Olcs3=5vCN`7rCDy~7CQ&@)un@Yw@FNqKQv2iJ1XLu5x
z1{>7}B;AMVFC&4Z=H7@^i4WRFNjaIOz{=F(FV;FEkqWqX6x}_>Z|hzEr0$;PdT-h`
z+QS^Eu32Y7I^A$%Q^qzH_004UN=j
z!TE=Vk%UoV|4wXQSgsT|%$ub#CpbRXQjTbX%sX0=G9c4M*%LYLGpr>HA1?F@@VWo1
zq31VhF=x36L93X#&z3pL`8h78OJVvdA02Odg2A6=;zKLMr=^J@Qu_s|!>uM-k3nlR
zjrI+;QG`+>-3>5s;K+`MrPaGI?ls?S+k-4#wswa;f}d6yjOO8*ASJBZ(`yTy2Y(of
zi=+dkhZo&!R{^Qn3dRIUMv9fcYj9Fjy@bHSgTl3Vo21PWp5sNagYn#dW`R)GI(#jd
zBa##fa=kG}xa?G&b5$#|IabzsF72*H0aY*d1J7Vm&cg=Q0-r`zK^t|h-RT3kcv9kB
zV(S_r_yE`{lJc!Z(#!{lxcQ+|5xn4Tm#sv6EgVVhx>yHnkSqTn2ukVs$oslcAkk_Y
z7UW(2rpe}Mb)%LmgYV(hSmm1zeH|3`Uw}i_FOVTb!2iV5tMyF^soD7{^i}5P1u#6u
zQTZPH`iXgE`43n%G|986TC6q?k3cLmiFVzz;q1_??sy>_blLN??z6@0
zZ`<+u@AcYD@yG`leyl|JOOvH#U7{Y-moMBGc94+BJoW0Qgkb%(wUEsi~t+kd7e`0j`l&Y@Y8H$XLODDnyZVjjDl=Dh1ol
zYg_SWb_k6NTbYmG=%<()?T#&+J9p&WH+{*z&wYPqtST$aP)spkM9^PA$PeuP$*8AU
zX3mYIVC6G6-9VOUWjp(?(B#huRR0!+OlX{o$q9Iz?RF5~sHE@uKs>xkR#vxIX1F!$
zml0P8ZZ{F$ZtC_Q_QoYkJN~oTCYE0}36}YAD#4Rom~u-HsBqNY0x6Q0M#?PH
zugiJOm#D(Whl{u3WoSVCB@R9qviBrYI(f`XrK`uzt>q;I=88i@^xb&!cOUlW;F9zM
z`r#NkS{oi-^g+Df-DIcYB~7({PXKB>%L>xFy}$rdi?0ZESbxkBWEKQujNyZTn}0H
zF_EmbGYnxFs0{V1kBEVVbiV70nEL{@mmdI?tGz+Zg
z4-}jpT@VKMjB
z71^>$w6~^kzJRu1^T8ac(7PTx6Hithc|_(m<1;sYUTj6yd@~AomSPntJLwt3qmu*#Rw)$k!3oAq~8Wp2o;NtvTb|um@@wYiu2%Qu57s
zwfmqb`heWtG%@-FlM&P_Ff37Wh~ETpF|?M?7IsZ}H!wA%uIJw#jssb4NHn?;o7^Fg
z3M<>m*Qa5dXxRc__NyO_ogO!p%{_X^yiI%%3J|rGyh$a)-d!Om9R$L9)KFB_qxfd2
zv19^AIO>J>&I|YoT<(LG@3=@fH`jnZ&QdwEjjG|L$f@OV{-0p
zPlf%IGnsEjR~7EoBYgG7niEXtIbpf3C~Y-NIY+6?p8S?Px>G!F*(U7rTD@a~N2yi%
zx;o}GN4GLSB>}xI)qTomTbW;G&;4Vu#}nx91i3N2`o+OufJ({F2b@k=r}jbZbPk-<
zAQ*kW&JQbXuUw?2E&(D4^dD3}Q8
zj|{_COh`is)uG&lsVY!hxjvg{tgG+xtp3WYg1v@PT_*T4$xCSms{*Lj*C_OG#?QRB
zE5@p9y@aDX7U<^6v=aqBg)N{;#>{FcBKo!QQG&%?GoR!rjp9kIk)oJ^QA9IhT*C!@
zJtYyb_2NbJKeE+4SS9MCrETU2_e0o8(iDdQ`;(MtaG`vB^;6
z_ybc&Dw5M%GgMa4-Su-btFddIeI9h~HS_vsAI9vu2D}}SQ!&IEdE3qtR?M#HyPvAt
za~bx{;kyhnE@-fWTrIQfaiWh*X6<&(V6~sOyxM&pmv)7E_0tunfdC6DG?FqqL+`EL
zvTalOBaqha8@{zZYU4Qm
zJH5GcVVH6HPq&~kg_6@Jt_0|`tYir*4Vx6W*Mu3ae%Yb9&iA|`acCm*VWhUC43-a+
zs8dI*@m%5&6X7$g&;Iv9Eo55TS~JhpdRAkna+u{_L($x5Jn0U|NN}{I@s>S)&-+2H
zV7~WLH`S~wvs2SiPGE*>A$j*8?4{vMJYR+qadFP)!MQ^$1^OSLu)cIP6(cmz7G9dqLpA55k`39O?%b00|BG`lMrdP)=ya^IAk~X
z^OtGcK+@LOuMpe8{$ZeJAd90}+b$nOoXB7JHN$9@!GZ!gj+jba(JK!ZlZtyHL(|N>
ztw>vvg;^xQ)%cVKeA9fPPx$r!&Jtel`cF|C5%{o+{4)o1w
zOrB`^S3OPBw$l7bE9=k6qy98}Y)WHbfq9~IfW(r!Sf1ik&BQ;oP35r8dKvSds_m%*
zx#DA4%<7SP!aS2UYl#VZ)(gwh)xfgJ6ODvdD-SK)tcg64)LVLnHIZ-F4_q0QfCjyI
zKtwb+-&CxGp+DWGS-U3hW1h~eD`A}%A>eKu7LGRs{2o_$U#<-K)&NRgv{xTxKdsMK
zj}9=Br;=X3)*MO4UEkZIWvH_e+4}m>6?|Z?LNpZ2pdK6V&>3XM)O8i8HcQq%7ikL0;H@t2wcgfu4{j
zq5DdXlZ$=QAWSdqmqzo)hQ~Y4ap=-^v?J{1*L;5g@0g*Sf2>(y)l}NxL;0J2LiM+G
znqlv3+IbJ&5R*3&;)xL`e%v@m0Z~>8YW-#=
z3%)seL)$EF(UTs&7q@8W`4Q}Y{rKZHSD}I@Wr%9>Ugo?X*I%Ig-QKejSI_5^a1EvT
ziyX?Et#9uZPGg#VJ|%hGyM{&N;Htm#4aQnmnpf4aLBI~`SaS;{o9fiwD878(>=aBj
z<|xbP8GOv^p1K}TU?ujuSaxs`wK>ETH@)o@tj29oVs2nYKwL@Zcv+bJV8<@a(Us9U
zp5N?cenXAc0AZR*jG#~-40O9Nuo1%ZBN-U_8mHyWHg%F(koNh)ec8I|E{
z4AKyH3idr;r1o7eeDJVJp~z~alhD@W%Wi2G0nP{F*kY(7sBwj59&OveYM@omx-V%F
zAITGbeI2EzI&CKW6Grk~>ym2rI`^1ShamSg<(sF5k@V`n%(cw|ju*y@AV3XMw1*RV
zyc^Ib&X}`2d=fEmEiZ*~k8?XZ1?uVjq8*!~i_Oo!Lay(AKF#%zMpf+n6YU(!2QXHJ
zN8DhK=JJw{#t(_6-51qHYLpEot^<9lFV{@?xURd51UWDM^F}_VN&+}wCm5GA9~0KN
zu=!tD9W@51eHP!XD1Q00!QOd#{hSgJaU-Z~#Gd{a&~2wOc5ig}Tezvs^{&9WV~33l
zwq|d0EoUH5wRh>pd+29usG`1j;~{u%>=-6gNX$Lc?7avo7tnHmGn`fxCF)+FLF1
zY?OuZJN0?67+t5xMUInAn?vzMajG{#A}mxzP3=@WFuqQLsX*dB>?4XXd~13-g*R`6
zHinz+>t1N#oQ1p8F30S0-9^)2;twH`T%x}P-FvMBL55(>cLTJ
z!*1sW@U2^j1vr4de%zeuDNk(ZKrI$XsMzY1^_WMBQQlV%=rrduBA;)^_*8g$YAwFY
zqd#(HBzAJ<{6153pze~WX$^-_4`gzQ)f}}ag;E!9W7Z}Jdm*Yj{(JCtb=~cR<`V%;
z?=4^E=w4$o^=?>Y4hZ2qPwIs9Z?|rSm-ba(HEC6Qg?Og{a>gDY#L$V3rtsjCLwK1_g0wg@@fIA
zw`GB+;=MDjZ`P4lS!FU;Bff)UN7tDqo1SL=ZkQ>Lbaq@_PomO4K~{!+=^Flyb!}xjQIu8uh{8vV1c8b-k;B)j9anPh-&EE*G3AIKVCeA?dzX`dV756
zBbsPB3{6VMDpKl83}pOns`sDPHuCaWzM5W~Ry0v9w@~3W0ON;HSzYyO-|a
zBoO^tH-oTu2&dRYJ93zH1!@jdbaV_3FYLAMMP41A_sIH0iBRBOQx>jusP4QmlJ8IU
zX*NAkmlpzk2P+tX0-lSVsShuGU;lXcct9)V-D-Qr|u|88)qJkpo_z`
z==BdjAo#5Iz5HtI2OeIb>VsVk@&vpi6lMeM+ido+!wh!-0~UZObDpmi<(?36gL=uYwC6Ms4>s7g%}A=6i`n{o9La>I>l~#TW>v8-dMm
z_{VnTZ;3$Xukxb39(R{{zUa;4BZ~=EC+2|A$5-z9tx)yOLSn8$oy3yXj5hHqvJzw=
zpCNY*+BZ3jv@$e>UMo49d_2Rs?O7}O@mBFc`f6p3**bcxeM+fBqw6D+=fieB11!XY_&f0S;^3~J>2Eu5C^#5M
zIgkAS{~0pg-MvMg>v&w9FEq-(`oy=}rTYbS>N0F9nrg!Vbzw>b%XHx9*ce|?5A7`X
z2drvnDyoXi6$KTA|7mL9F2Qih@j0$^&}V-tFMB|9u%^G>JKR8p@N>ru*un@<=ek~V
zjt=__BP_IC0lLL2mI$u2Y{!qVSkaKCK(-Cg&+$Z4r~W|-Za$dhcX4vJuwJR)DWXMU
z3-YFEmTQChek)qcsc{p=RC$RCZAx;yt7}X3o+u{Z-t9Deyv;Cf-$6i0v;bq2W~AJo
z5qj^?X5>36>715)+$yI;MlI$k+tExIKIortzoiu$fSGGMh0bt(Yu9;S-q$;}OsrF|
z_aLJ8$G@W~y+Hy{^@=;^@*8faSAtf&6U$!!k6YQY7i{l`In}I(2nGoC@Y(tIABX(6
z9y9mgs2K=6xR*^L)Y0P-x}0xS-Y{?+vLF4~hJp`^3*$NbP2*$71LtI2VrCvOiHUNZ
zDlsz#IDg*>o-~eywxV0-jqTzM$WQ+g&g0c^
z8C7$ot$Eqz^I@`5FP3Gq5TsD9W>%DkpTqH$LuXj>AFfZC{)&y-x?JLYH*QKJ5Nfovg@K)Z-s5$=V|gNW^OxVVkYj9#B%h
zOG_h#!xATI6y+o?tUKS1#wYl21!RBO!P*?_c%lBQ-chfG5K+cdBxv?`CQ9`=YB_z2
zv#Fpz%l=U2XlhUKhI)SYP{^Kj(51$P`g8vR)=;)$Pr@9&O@%??a<}}#a~#2t<+Q%+
z1!=idypdaoUN{^QDHy=p@r@BBSmWds7N|d~-Hi4{&SpB}K)st$Ox;TbmnbWs7Xqs>
zW*<6s$THV`i$6?VJu*BLADS<^cpU3>`^SzM^h@Kx*J#KGAbo(w3Yy4*5+~p)G=VJF
zm8T00=B1g>k#elN(ZeI*M{5Io-?Fd~_K+LomBc-AaXu{s(3FRJ*6+M^-bgv<3FIer
zJ&4BAXbES5zCu*4c(unLv1?{-Z3=JR){S;dOG;NfYQR+MYTW$6rL!95>=CAVQ<7(7
zq-(1DYBW07JPkEa%D5?ruBhzt<`gEqpH_BX{LLNmi9z)-(Q`F+^&GmcO(5vLoub3UAUm3cHa?*%O5w{2Lm0CNFWL#qZo4v(lIMufMS
zUfQQ@jSGs4G^L%RiNINK=a`y7T-K~~R8^ayGBv9~{gUQI;}avrFVAY=AcB-V{0^R?#hIR`4-(M9z6>Bpc?%C<*A6Ci6
zh%CQTv3!9a^)CJ;y9=tr=l^+Auj&R)$GoH7iO)p|$jl9BHNl?v%@YR}YnHiTzWk?>PBZ38tg7Bt4Rh-&HXQuEnT_29`USwkj=JHqu&)Ab+r!No&)sVp*E{W;ko<(X
zU;OFZCCcaK4X(zhUwSzI7k6}er_i2|%0ZNZy9q7Hupwx}mQx9+QEeEl*GotU8}p^}
z49n6cecI*gwKDkiE-PG7wG5{^d_>IJ4;gf7Rt?R4(WyT;;$
zzPd>t@*+dY>;~0SCl{uOK=SvEL1W<*KCe5NQtIvcHZJ6H9nwOVQ=lA_Mv5WD%V_`8
z!sfQp1=e53Epmnk72_Ygp!xq^bAI9Q4_)djRKrYsDQA!S;}ziLX2lqgB1azGB5Ec=
zS`;c>-^g{)`bw*2yG^u6anPf4DgmsTq9ehX6omyu(~f#y}@P;S1!jt
z!D#8@!TQvUgNM!m_YGXX@0CrG1wawcx*LikH0J^OQGTO+u)$SaX$42la^wN>YO|za
zuXBO=A`>i+zm%kg;Q)=xbupiK5>GzV04
zRf(DD{df`74j`4%95lR6Zz?bUvKbjP
zU9iwzyok9|vA|%1AzOjpL?4|J4+Hx+*`P!X&dlFyZ{4GF=qzfq>9pk5U%=bAGYh-L
zN3Mj|+$!ExmN{SlV<)Wz@uAU+^`Jz3%nR=Anp218B*bI94(~8T$0BB{nZ>bW(iqrU
z@`TqcdWm-_TrynXid^cV!tq?`jE#gG&h?FiF1X_wd~oPI)0sT3N5gl*4?)aJcjrbN
z*lBg|A8J{@RjT0+TO%bVqf=6G+=*nSl<{0|p{d|0qla@M-0E1O&hJnP>ea4Wk#`RO
zPvqrM!ZI}&_;Wdo`|-Y-1Hbp6bsAd4Wv`_7!O44u+*h7=e|8mBW@cpui-48Ir*inrD|fah}_)Ooe27j3X8JupTJaK92%1Ian778D7B?zX~S(eY5l$Rj-Y=*i$yrH
z+D%*>;=k%zgSv`Gh4lqEdTY!koHi;D|MX(@*nFjPjQ-PSXFPdu;pZ54OEktRzE0{FSh4O(R~$Q`znEE5!(M1O%zuZ3HOe~|05Mr;8gePs
zq15&tnU`;jSEHYR$hw@{PYl|Mf`o7;W2Pq_DjA}*j=z!fXDQs$z0)ERiwnMV^}OeK
zx5XqkQ{etlqoJ^~vOl$O&wr(;g`U)J%+wiQYm({GW5CO
z5785^;Bk(>Q@2EF{bB&;&nt^vONiHFueH>p>GUIvckdHU?AgUEkfi<3R%cJ0R8Ofh
z80uIkxkyiKoWpFe#4-?1o7k>Q>Vv4N!4FN9$qU{Oks#YqgwDD7l8Z20)2<(6fFc|E
z=Zjw7Z)8Hj)|(7cyHrt@z1V3&)}ey2Z+jYXP4&H|4Fr&JALu5A%(}612PN~HsDL`p
zEb>%?q-2^NLp_jyCVyP5%KO_lrB%8h
zZCqBdP@`0OU;GfjlXKtfWZwA4OY9l9Ja)Od3G_F2^jq%`J`5X$bT{M*m5a$S0?
zzW}nw5yHoyR}C_~?k{HfE10q+2q&dg-bYlTV7#HvEoZ`Y-SKqwoUq
zb=iwvCS7UJ`cCA<&ft7T(-7=>X!th==EL~R0=})|$8Lff5C3u{#?-azNctS5^Cbzq
zx9xOai~Cn#SEth7ZG+<}dv2SSY21d6nj*foOt&b=9ZkyfLVt_QjmyBK>6N}9cxDQ`
zkPTQY1;o=_Td_TT=cABS&Gmw*<9`9CN7G+ZbYpXivn11O7w1-r$KLrWxI(|RiciwFgU(|{Me>#s+`94&q+dTEF&9#wm9Z=|UKwj6#N
zw@hjTL*2*#@a6M%ZXgCgHkntb>P_2-$Om{Ocr4ox=XXWvvA);3
zis-=NPKT}AM}nxtTU}$Dv~DxCncuUKCPJ!G5^F%`fK)j{<;H>d?4#ohW@I9uQ)(;&
zev+RWQX$}~GVU<1S~o1Xc1gN9SgU5qI1KXX7{*KyGJ)sw3hx&ybj}i0daHk%VzMGz
z6tC8wGX4slLb7queh^n3M+L=0n?$`Fjp7=9gXgN#!#k%v(`mXw{qDm$#$Gkp?$FBG06ZTI)nhh%7dW#8K`YDV%asGOg@P5+y-wY-9K7)wuvBGf=IFS&X6;DXZM6<)Cxi6aSBfpGh#QNtMP0$a~DVc
zE{&305_)yn_R8sYBOSP26kW7gXA&Me?LkZZ)*uf`;W*TPNqmecz@@g4$il&r2(lWWxfl?j(Lw+h-l
znH`WZKFS$R7XneCwKmYOxb;QQbTw-xl!Uk`tua)daBI$R*K79A&_TR&(FbT@!mznc
z1D3+rDN)v%zT0YF;pNV(MoJ|6lfZ|4&FY1pU^%~=sM+Tftj69w3OV5(t%d>N=P@>(
z-kuN5H{}#Vn)fCi&L0bIP%eST<1??yOsCgVcFhSjn?x4Q>;~<#0O2jz@8k`S*@Qe?
z7$-&hij`IR*lRCnu1ygci(Ys3!cGGtfwWCMo{YG9`Q}WKjGv^}bS%-~X+}*&y?XRxEnF45&iA|9G*v8(1ZJqYHFRAbh>WuRF7-IOoplJZI#f**g
zArSbS&zzarVa!>cB-Fa;3JI!7OFYt|?aG8qaW4fV)Hv@Rwahwmv8Mz?Kk<%{
zSH3{FeB$U_a7q5$KXZ+KEU(T3=dO~^KYYQp@Nn#(0&dK7?hKApytng-f49?%Tr*cg
zebs~_uQwy%;Fx`2ow)~YBmBQI-?w1f(6!{ReLq+z=oQgde
z0*ueRP!MJm1h6xX??K`GmrPcqc?O!^qAi)bU*11rJky4{O5vaHO+z1X=hy3-#KIn3
zi3PVkBUjBq?_0WuBj%6!2BoS=qf2e&TS4^Jfvp
zlBy};ld@%Bct9=3hgsy_T)S!jVxtBhMK
zS3aoY*3U)rgHCw;)|!}J<{fEYx$9~3;#+oW
z{B8f{cW2YtXWlJs%qdN+WjFM^m$hs*K^lkMa2c^r>Mx-e$RdoEK&KJ
z!I<)fx0l6D0JogK>1$ctn+3)Dt^g(6GLR|#FcxB1B&EH}T1#qI
z?mc;-V)A%uRx;zcO4)yiOg_;l*?NJ{%Ox^GeVA%k9y}m?fZe?JC)a7JwmG3dU1Ee{
zJ130roxfRd`<3SFq5<+x38}4e2kqINLqxQ1>k#;ay!q+oun~+NP(ch!O4|k!i%4J
zEtq)inN##@e+myoRd@2C{aI|+Wy{IrcElAgKZjjJ@W#?s@}F*yqMdTa#~%!K0!<^P
zE2sC%uA$AByMteRzHWuhMTMX9{;sfD$ExIjLWbW22
z$=8`{N>d80!Hpdy#uDEPJZ)>b?cZOXqtvm+JNkO%yM}m?CJ)_zKOkxxPmKuzol71s
zvSm>4<*WZPVgEdZdc#6kK)WBDOC+nNF`V8ueGK`hpIbWD1{;h4w2Wju_DogM+gKtH
zH(+Uf+RY;tn+@&wpoy$eRhM*Z(~n@hS`HT~7Gy+YSxQ=>tLudprb?^Cnu
zR>}YjwcUo;?PDCD^0>=6Cq1AUpHMN1PJDz)Eh{S6cWO46%J$m%MLuQzy<#84HcYf}
z2DfKgaW$|YwQ;d;BB!`$*Y{iUb?*G_AuOqh9wVAljHqPR{wJ~7+VE^Bg%!?993`6r
zkYK`<3_^J2=aqviSYX#wM+;9Dp%%imfEuu7)~T_nMWtz|+q0=-S!vsMZ6{>ETWIV*
zu=a~o+`h&;3|XHUGZLjdm2%#?gGu#kq>8oHMVJF)nKp1m-r9B>OtSKm$iRnolyO$|
zx=(ZI`h{aokBw^;sKsV}Fo~Q`cdXlb8(QyGu8wC0F+b6M%9mB64XdrEZ2d3QE{2h5
zm|<4K3jnk~A7`txu)9C3`%4DT>fWY?e_Q&sGNbw}X06ZF8m$KOHCeT-==E58B%2=d
z*lUgd0N8!OH*IX(LW2N-zY+*O#E)z0=-b-Uxog$;BI?~)+!zcQGX@^2a9Ci7Anz2A
zsKgY%0;`(3_4aAvFBl4>nv}~m!L{{M0-0TG2?q%BYeh8Lo^wxl7`0R5UzyjwEb6IfByhg>S;QD=i6)*_CDRJ(qR*ci`@7akx&5E
zBdG;|pR)eCHX7|q>MEHb2r8$LFH39dTYcZG{=G-8YBm7@^8;oU?aEk8w%`ZsI~!Hh
z-t-f*`vAVW{{ZzHVSQbtY1K+iD_E}QPZC;|oy}dXG{4$i!JL>5Ap}G@h{6@MYsL?C
zH*Rd~+5U0R35kw^el19xpfEHnL{@B6>|TpNYBp;~n}Evp3k7h*V1r6aP#OJvHC1HT
zfG*S40mdl7YFe_+xl-I(q%2ak!Xko}@HSY^>f9s#PT0o9%qR9
z29@jb29^x^O5<9~HJIDXDJC?jFps&Lg}w>>jtlQ>Ydvf08iD+l@Si2H#P>Xdp6r^wyn;ZUX*Ot{{Xi1-nd!4>~{~V
z^!x8`uh82?s+c=NZveLzoS4E4uzown#+F?-Ri>BEEzVEEns51O^&78b_2V;CF&K(EnEj={
zm6vOdcU4w5HT0gQO*-$^ze`@aZ`UnNwADPzVpr@`pxwfa#b9ohZD4yJZuZ&k1r|$v
zX~h?9>RU_d+co=Bv$J9?Zq_cpt-j{3^qq&YztnwQYptVBs;z2_E#Wy4H810_rEOaY
z`;Y$soHY#S=+I+5k51#DFeqTUrwp7A!!wsoL`tr~=lLTQ6m8g&t(Gw_{Hn-;=#Cz7
z$iGaneSAyxU8NePsJB=e+NGtlM&+8XeW_)1SS3!*c8Y*mjI=)uH6pvIV-0MstUzOe
z_JFHnJ9h@QVEmeEh7p$_s^h`j4%hxD&*X4>qm`Pjd1(F9$jCygh5)7h0Qe`n40VYpiW-Y_Q#XuP%g|y>{KrU#`$sI(5}s`#)c*
zp0nQS2h#Rb+XI3G%*chn8yg{O{>@6eJEuOwZr8J7_u<5usD{C-MayQfxk{ayENqpb
z??(R1iLt%6QT87DYNpxRt8|^+jc6{TVXU~*DbstBk59dE{{YkLnMRhaQHfy8V0AFJ
zW;u$WL!4D#CiOO2P+5ulSLhmk5EGmlc=T
z&Jch$Z5(xoWpz_6sB^RMWCqEzTdL69Un7zL4Ne)<#08M1XN&*<2ixC-L?KlPgmDRY
zL7}p&c0sXr>)>>#Yf|mQS~Uh(GFM&QfNI{9u)>tkbp4IZRo7l@7C0xs>wQU<_57u^
z>gzt%{RwWzwcW7V_FUC~vs&we`pY&dPhanK%Tur{Un1bxCg$iucCNbr02`tmLQR@0
z&v#bIT`UtqrW+J#krP}A;Ov#wCkz|FQL|$3_DC+4QM%Jt(|cQ`9*0)7{-(c;E#jfn
zA0|TDR7NxWFL=v|k$`e&$!!Hb6Ok&g&)Gj#^`Efo{>A#v=b^BP#Nt(8u4F;ug+?{`
z7*;RUVp&LeOq5<${W&1!AlO#H@CH;2h8MQxtz+eRk_|4I7$TkD2W~N3Qo8zqWpfz35niq>h=y
z%*@H3Vzs2sHS=#DD(?hXS0rW-L<6=aAX5Qh1%OIebJx8Dt6^CFAo&=?HK-eTTCT{g
z$SbjzYDX7L3KWxwrvc7lb~c5wQo$mX<-;-QIA>%xT3BNxP=UbbMKHC~`EkGt({jKO
zYf~LD4pcAX5}s1Pt45Wy*1IQ}MiR@i`uS;<1aNz{wi?!AaLlX{#HF`4EN$&wyF*#J
zj;8fyEa9Knpt7@LrSx_hCB?dku6(Uw_ASp_nksuPcIoUh%`Wf(6)^yWcJbuwQ_Mc9
z1${)XW~N>?+<|w!#Z*ntb+c@sUc!~*7l|gd1X>hZYZ;QnGZ0$9%0mS}(^|7(d{j;+
zjF9&5Ad>Yea%WuY+gF=a_@#G<){LnH3n5CbtM?!O05)oaCJ7LZpw7ksNd)8(k%13*
z7c9tBs^SjNf+kGvP6(B-SzVc$?!r*dnyg41@$%P-ov}^b(4JJ;#BI_8Ru3bXxg@C&
zL0jd!OD$y@HwRssj~hlMouUECjc+2Y1#GVstppieLDWzsl>{EwXrGn)V*YUZh~
z7F9+j`|(X+R%}=ZQaG+z(YT=efvn=?B0exbCbHby?TnkxS
zO100j<<%E*`YNXVhG*9yYrAByr+ij4y_NPqtM($rqV;&B7_p83IgC^bPi>whTYOTx
ze&W2g!`#&2VH`oYz;XZ}XE0VEK^Kucb1}psq^zn#aIpUXn=K&)d4tKo(X!{j=zpw%
zw0BK0EDkkxkChy83&<-KmA`M?Km7Y&+YQ|foh^bP?K@0}s<l}K!6w03mW3*@y>uu>SSa;m(&)hgFz
zKpmDO=v{-aO53Nq_*r2DRZ|z!R|3lAbTchglr;7ofY`GY?w=;ko~r@IRJe`V)k3D*
z^SKY>vuc4^%2Qb`qb!PZVIduVD=~uIs~{bni);biN!)u4TRfXdZMENam4R%v+S=($
zHxW|Zk2Q?RTeYRha>W&x>e#7Xw(YNWpSr1T*Y-8IqiZM12=Xkya3fpn(;FV%kazck
ztwGq+QzHcAaCn7r7bXAE;S(_<4}uuArC#442vo!+kc
zdxo;UH#Ob_s?meqcM)wl{gd@yUGMe(0Ac-a*!nHD>`o>oNF=~!MCCa^(2!toMno?G
z?r=wW^#PQ$uEK#fRJD5}DSep$%9H~|3t}h$DuFnI6o5tv|0dqW#GlY(rfO%1`l
zX7c!b$SZXPIxf_=tM*rUAKsc*U*9i@{{UgAO|Np+KE>M>*~QqJgx0SSYhx0ePYyz^
z7@$z9s}mCo3aOT4MP(6FXEo~<16H7TAcV0hj0QQv3?PVDnS(hP_41VNTS21gE;ZSR
zmi7&pz{;$^_I|TR+iHH-`X=w8v6BRjhvAblxCR40<_yCaG9WO)6_rPnnNp0a)~Tw>
zV5Y42iytFaXmG(!B^Kt*y2k`x!%~BD?Ey&_2W^W3z37CsC1#DGEcQC9v#q09Sy}=t
z{8pY^p7%OQ`sA9>;ii}E4RNY=!3pkqG
z)ar7uqJoM;S6B)b0KkC800d>`kCi;jDpRsry&^5AQ(Z}^KpmnkCJ8)q3mWH;5M5XX
z3<>}mvL;3_4ElvTD-=50VTE=XUAc(agkY;MEGfFb-2VXl;i?T=D;2_+CNMe)nU28s
zRs_27CC?Zq(k_Yr0{I%`vJn*)lRbl7L#E9n5@*mO9*4Gj%`o?_BZfa&nMR%@G)BcdkT0lB~%W6
z#IK)`HfxT~zl}AOH4ga1x3sxKJ@&gRE=__ZPh(k)SWirznVEpf^@}P{ss&A&^}&Tz
zWmT&(X##vqrn=A{0;}K33R-yMC<3~37K{O2BO$_;C&t*XO6C-4t*~m>TPp(`Fe(fO
z5RhV+rEtKp84@!LE2y#awfLqOL8i>kgwbc^%CF9_WuCTH2u>1
z4IRA=f+cCm+zgy&u5t!5{#}%yGd=kUlQ{?&M6d}?!2ns4O<9#HDcE27YYbU!MsWsE
z{#Cisu~m$a0gfa^!Fvl9td(8MJeGOhx2DFsS6a+UHb`Z;S*%RD)y8sQ+}j@CYZG5{
z4X=1yMy3kE*;7
zc*kakkR*dS6994%SkD-oJ7I(oe!GHHFa!ur4ki_+)FTjxL7$n7jWzFjr5g&us|FR0
z0|N<_+Eo6-`q!)di(mF1)%}m5*~BpX6c_;w4seKEkVFs&6dA@Sz)T3XsFkyTj>WhA
zXia(t{R-x?2P#{IyNFuUrRB$nu#k*6%7->(z%AGl9XRo{7mF;wM$60{5z?X6j!K1T47_E3ifmc47NTJ#M60M8E1^-CJG=Wm6jmnAzbwCBK81-4{7+3g0tx4IOp4rAyI&%Dyte`
z7`9nMQM$H@uLOq%EBAqjJGwd^%UAag(6;?0hcNXJJ50{;a8(Zf|}{<@HPGh&8QhwK;gtXcx2b8Vi>ZG^QdLpcn!Q-^E`s^9X!GR*Oz9qct3
zU0t~1Zk$(t29=_s&~kMctf7l0`TTudnWx%XR?C$W)$4Y8bIX)PG6CE%h`wfyngt|`V1I+Wp1gyw2Gc!Hs
z5rhCS9UF1Zy7y?!T2p@cv)AtYm066j+X|kusKwy}kQ;;AwAY%g_wOQkhbJ-QJ
zQJz#xsfKRi`iu2h7FOp1^etAI%^W}!cYV{_fh-6BTM%VSepT^ATX>#qWh{X?W-rr!
zuduxMscT)>Wi@LlH4rhXu$AsUT8*=$mcTfjrxj`R22>H4
ze<-66eHH>{7VOEHM&YfIjw!ube}H-=tS9Y%qG)^6_Fl7h)$JG{Ga??jfu>^pXSfh{
zBGZH-S7+tBa_iErK&ksN%T&u3g57HzYbjK?^K3NYO8Cf?a^ksb21MXUsZ35wDLEiU
z2OB-kLOu8K8fVE~io)tDy1>U}@U~pYr3p=XyXn>#c!wnbMmUanNAD_9OVr_Mqm7#v
zDpQwXY7j^{VF-kLJumD}>&C05&bl)o&tRqz+?CJzg<$G}#dcm`SBTZNRZ6h$Gl`B5IYa324`b44rTy@6SqQu
zimtm*)3H_SP3$6&U~V(+0#KagKn@<{6HF3EE}>Gl`h0GLn(`
zY_qlCel?s6or9qy0PATPYySY}P*T#m#CpIba|JENmHz;vuj5eU60ZI()gcZ9ZsPiR
z4Y9&pZn3>$vZk^OY`+S>AvQS`Vo6o}5MpS5sr$wyou!5;%T&NHu}rS9WxE>nUt{Yc
zE7tSIab`gF)u}!<
z5H2QMO2{Axz>;j@F7n_5jyACu8!EwN#A`4RVgLxPtTkzw##Didpu^S#xN%=$T9ww*
zR~6KWC||>Qz(=nOdH|I!Y~pYi3!rc;ax4rbuEbe6VhaeBUQ8AeZy6Tr4jUD!t7Xl|
zc@YSNd{v6Krv19wn^$EnuZS?JfGZ*out9>bClxaoouM-T%whx69hqSaAjp;24Eu#a
zm?CE*JYfNWQXxQQSWYGbF);`N42Tr4Wb7Ciayk*2gO~))BV>$X5j8hM*vi#4I|ZNs
zjF?R13jX8z1#E1s>wk!tiw7KqQ3M7nxNEEBHMRh2d)Jr_N*v8*0j^SEMzT{&AShm2
zOiEp>6e$To%GHTj#ASe$mDtgaM%{&D)NFc_#1xHh#>_1@491QZYh
zfyhA*PV+Ey0uQkWjIfD`lLLV=W=WZtn1oKo55#3!41<$7LT8i%H>-e!pWM0nivC#qR)>kIPxH
zW6{RSn+a_PCcers4&%xK2*%4iyZixOQxVT|Ss!7~Tq3?_D;M7YWC7)tN5m!okliD6imC4lysgin9^
z_Mf@e_kM?a(`;j)f_A|Bi5H29D9LKi7qg31U5kXYWUf&d@!}1FvEp}n+KW7@Nm;K)
znW`&X@fQh&mhmX5fsW;3j+^V1*A3413c8>4x7a@oeMZf%eh%T}v?t;h^KfE<01?!v
z7e=$ShXj!nnMpkAy-=q5BW-&Iupy$je#+k*1E#k7C7RUp@WVb
zcFKqeAU!F6HxzkR%DHsNR*V6j)79&5YyQRhw$G}w
zsDQy148#UfSTS-LShY*{z(p*Jt3_^h5G9oy)D;{Gl_3PpN{S^^<*gnh@)JKMB!yat
zU>0x@lM?j|gIEc$g{QAjv)ycQt4(k%W68fGOiGJEQ#G$rg)Bob4vaXlSHYsoBIs5S
zYSfc}i&r&cS{LA7XGqOWl-Hwu)Y`|o{pY+L%MSkl9K?Goe@Xk5h4nY7zMg78%7D;5
zO0byB$3h?`cAel8h!Yr@nCwIb35oSH2on#Gs3U0_rUvVm%V$e?Bt%Q@l)
zQ#k?|u^5@2gCcP-f*@=PcUriXrHfX58@Twjn6(yBGnShQxh-&-z
z@-}D^h2XyK+{!|PcI4nqaW%*jUY{kFqM}@?ZbcQ#1y+!(4O|tnc_n26x-jk?-E3`&
zt-4iEuDi8S#rk=c>$*>?Z1Oc{b_
z8Hh)CMEX53FhmSydS(+dKISL+E1Aa=1{Dx5e0OAqvI|(?*BB50Ak4tWEOH4T{{WhH
z9;TyeyOz5d3cA>@Uv?n2&bW2`i}f1zrp~SUw?Jhzh60K)#{~$@eVX*iAgR}=TW0y@
z%YxLClyIa1&;y<@9@?xiL-A#_!^DuNKvg3h!j3tK+&|Dbm!Tols(WtM(tiSsSt+rA
z#WmNqLGsuIfLUc=TVl0U)@TUU-PzTdxL2EO1%Mgy;JH>6h2m@PiQ`tA8cIgifd%Sh
zn1JB!Fl7D|cCGy%?qEH8HQb?FDhPngz-D@6!}JO1pMZuQfajs>nfE@Y+#rZ&)FK>-
z{?UM7CJZ<|3rygXh+8u!E#7iuZEe+PtkSE)$E{eLtg@AL_rI=d`yG3ICgP|OUjc0}
zzD;OlA%k3Ua;$Y6QWm1XsW+5xw0Q}o%F8z%t0>;gTs?Zf?M;eUm;so1R!BGyJ7mho
zkZU@w-w7wqZ^_ghlSNu@=WyGD0ejH#`-l{Jb&*jX9Zlvghjy46E*RydDl0ODgP
zcGstgdIPg1sRsv*RL>anhMO8n^8Gff@HykyshBYJ110O@@Wk}K`)}0Q>OW=D_G*r<
zq;&cq%=9qK^)tK%7BCh7X9PrJg8{_tGCH2n9P~d~?oMIEEeRqFiI7hqNi&uYd@~p<
z%*Zi-ou;nu2&0f&Szu7X={3&GEYnf@C+)wZ?dWXh5e`W06AWu-&EM?V<>g{&ZzXN2
zt8uI-ZtVX6{5~%rO3;dv1xZO=uD0uKc?$&)5f`+w#aJ$@7W+yg`->|d`%T7et*x!M
z7r0gdoUO6a!w$P}JcJgZU~CGt7A&C&v7QQ)xpY~`G7;F?ywkgx(R@qbfPu)#(Uv9h9
zj)SIp
zxMed9HMb2Wi|V;iiV*1H+OWJ!dkwW)Z2h0~TK0vsU9E6o5De9T_STp+&J8KKt6VFF
zT!MmDIYh66@KtoBm~7X^twl$$)+(0!8ftd-cbT)hy>l9ZhgN!4j6p*yJ0MWtOaP!b
z4xFds8M%UApA{~FKx;B7CkF{*R23mFlJt!3KF9)e>~
z$O?y#tAZJpGKLO}2@5UP?QJZw(_kfYG2a=OIaRjLRyq%caXnwX-u0GxkJz=nqOJ@F
zp>aVPkDsWE15XE6bg-}HY
zz5zBN%`3#?^J-fZ-qo)#^=n}+vej*J?ABHth^ZZXJeg47xIoO$7$}N{csQOai;!Vh
z0~X_27QEooai)z8wl#Xz0dy);T@P5QDr7Pt;Ku_zLXt^unh(sR{C3jg(o`?5LH5b=?aFv~mz?q$8{(~!(0LJTAl3}G=3OfZ-XIOZ~u4{?J5
zF`ckl5TA)!!J}+z!5|GZBLcDHjbleQu|)qxL`2w*6BhjO?7Upd!}5S0qxnSGL;9
z-^ejOB*TMXO>)C6U^@Eg{e(kuQBJi2TDDl#)QY5lZZ-uCACnsEz&2A*vfx!;0<^Z+
zH7)>p+wi`omXzsh)CFidVqyU}&NBo;1_UEBxQ8Y(&M=PULkFaY$gK@*k+mNd$5e
z6}r7RjaXrJu1Howr3`eeuF9wcd_?-`iRu3UV&C;m>i+;>(e`Q`Ou?8WcO6e|os9aK
z1{H%KpUO3CMn)Oa7E+8r5F;zsa4B7=A#9V_izj0UB48QxIZj=P&rU-mh)fPd{g!SBk^|Z?Lbye^2{`h}zq9?5E@>ex$&|9J4ZV$GXU`Be6o3<{TJXu{ed{vooGXNQLtmjyaJzm~8BBg3>{}s2dHRC~5e!QWH~O
zSK3=!>h4!+rGy&qMuY-Y@K^1JcUa0w)lVkiLD#bfYOf)r*ppje0p$FK%VSc;vhE0w
z`2c!vu%?JG=<*eM`l};am4Os=#2rU@m?4A_GY=sZx(E!;CT2wJV4Sft4q+1*hbB1g
z%ZbOel_RU`u#B~|YD$K{jEd0mSW2{dL6>XGwTi}42@@R$#0iem)%$(FS8t~Mhfmn2
zHRR2J_7e@yTPtP@m&kDpz%x8xaA5-saSj-i%sEolR3YOTMZ2>OBn2$(Yl35kdVRm3aX7?^-{
z;|D>|Mh6H^L7cmTGl+0zCLsb85UxQ^z%Z6;qsu6CjJ3G7mRXVy6=2K89xN=^25f5y
zWel^4giJ&5Q#0G3c1HC70J&RC*WRmUzQmq5uGbL=NE{evd6;2fBal~}rUzjSggVto
zV`8kkXo!{o4WF5Ej2pK&&LMLNCIcC#IPO96kXpiFwi^`3e9td$DG{tV7a8mItlhuc
zZ&$9Ay_Xy4r8RZGHEp)ful6m-{9-t8z-GmsCT6!4(&dm_V?59$O>V7i-P2y>
zv09`dP#3DYgScNJHHfO<#YwWzT1tCN!s_g*VHwsURWo=zQEN&NMBa@U}hvzi)T{{XXRji~{Qw%0*N
zu(SI#YztP(ou;L)#aVtpcE?^=frKC!atQA@oZ}D-!mZQ}#k4ZQAeiV31|PnL7?xRGp0oDbYfXFg
z_9;?Bc^JXmc8Us&!t+)ZV-X7x>fN2dDz)7jmx`lXCCeMPhsqh70Cm3sNuCG{I$)LpPN
z-u5l$3d{Ks>(RRdM5f~TRB
zj^u1IN_NGL8>pEAW^xlMXJ}3ce9yR61_0wQf+s9ML4bAb2nJ)P5N8r0=)jm%vTToXmG$-qo?%Ue>a{ghoS4<6Pfh^oUsEbgloXtZj*8OC*h?w|TTUSu=%)~3Vqy^+>K=;Bjq5*X
zU^VyZx}#_@Z4sSBGHiwiU}5hWYert)AnbN+j03Pt
zV(k`(Y5gWJX(U}*L0W)|Rz*6+&2`o5YUO42S5X8mBef_PW&@c`qO^bhF{buW>u;i)
z@7fDInP51+)uH1UaW-lywI){UTD@v&F4lz&c50{8mlkZHH`PmV55O1X$x1zTdhuC7
ziv4cqk*o4mX0AgzBTFgbNrycNImwxsnd!!4!9JjTL*8NrA_o%147Sx^u{SNQQ@KUG
zGcz*@5E=CWJp{xe7(j(gbXJyX=3T3Z_3~EJ?De%|oSQ2cuwq!V6A@U!(EUdEW_tes
zYqOyG9_EeIHW+SUKeVk4;6t$4^X5I>+Tjh*-wozH%4N0`BW^a(hhUkDTqdoIM!E=9
zK~*MJAgq|Q_F7cLs7mR`*RG#mtkzw(ZE6uh_a*TDP!KcD439}bs>D}DzQdna`q#45
zp2fIcmZ&?}#dwDh`R~i4>00;pB0tP<-tg?e(%N6d9x>HD9*%S!g6jD)Z
z11VDRPW0nKaCdXH8l^nf-kwN#Cb%9n+EGQREhrRzYueaf&jv94wQ2`Yg@p%X85Tm%
zUNNOY37(}wp6DIaAeg#~J<;PrC|vG|P4>H#>=)$T>Au(MzGW6baqxmISwWtXza~MM
z8SOO91MQtCQo6EHWF0*PKNsqO3Nx}O)ZrK&=dx+?QG~t^#Ttbyzb5!0wDuGmX_O-B
z6egbguTevI`@i1lzA$v0F70NaK^8*<=P#gCzFE3ZuUJ0TZCSQ1H8-*ySkx{PxOXl_mQ?2ju$0{Pd@o{%?
z;1BN@b<*miL~cro`E4`k#`)4XM$GWus&@|uD7Rm~fIWQF?03LT^2HYGU}^Q5QM=dC
z4R$q-*XZK%Q;g@0hD}d=0?1Kb^-sl4vGT_gydR|+kb`9#^Q0R*ZwFDHG^KYq=TE&y
zr~ldj2mu2E20s9#F6hRc<`|~p-{45)jwaqhK
zE7Ce5qNfzHp0zr!RE=RstFxhn-_c7W_RD!_x|P=KF`!=*aSAYmbH^Pk?st=*GyXC+&YPDAT;&(#rwmYR^|v=grs?CON^hPbFhZQt
zp-(00V8ZG+_*0%}8M*1FN!^s7%6Yk~#@ujXolp-7%8wHhqTG4X9BA5uY*sm%yJ#9nrzemod=XIy!y=ZmSwJ|8N0F0sa(G-mXryG$WKCpQNNH;_RQ36Nai<+$