582 lines
164 KiB
Plaintext
582 lines
164 KiB
Plaintext
/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl_eval2/lib/python3.10/site-packages/bitsandbytes/cextension.py:34: UserWarning: The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.
|
|
warn("The installed version of bitsandbytes was compiled without GPU support. "
|
|
/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl_eval2/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so: undefined symbol: cadam32bit_grad_fp32
|
|
model path is /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B
|
|
12/05 03:39:44 - OpenCompass - WARNING - No previous results to reuse!
|
|
12/05 03:39:44 - OpenCompass - INFO - Reusing experiements from 20241205_033944
|
|
12/05 03:39:44 - OpenCompass - INFO - Current exp folder: /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B/20241205_033944
|
|
12/05 03:39:48 - OpenCompass - INFO - Partitioned into 256 tasks.
|
|
[ ] 0/256, elapsed: 0s, ETA:use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26604 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99913_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13280 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100181_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24526 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100186_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14432 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100185_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30532 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100173_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24834 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99968_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25967 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100187_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24407 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100182_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26493 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99942_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21062 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99920_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31494 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100177_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27535 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99966_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16310 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100171_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21774 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99898_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12469 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100166_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14638 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99929_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16693 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99888_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27499 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99952_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16708 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100060_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27588 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99988_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25288 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99889_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31890 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100174_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26297 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100180_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16397 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100157_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12164 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100168_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26540 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100176_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29112 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100179_params.py
|
|
command torchrun --master_port=23949 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99887_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=18811 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99994_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13418 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100169_params.py
|
|
use_backend False use_backend{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29222 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100032_params.py
|
|
command torchrun --master_port=18950 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99947_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21842 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100162_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30688 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99870_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17817 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100149_params.py
|
|
use_backenduse_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16554 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100139_params.py
|
|
False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=18486 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99855_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=18748 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100178_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28115 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100184_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15313 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100158_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28444 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100151_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29823 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99914_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28431 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99932_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30982 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99863_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22312 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99901_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26987 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100175_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19899 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99983_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19490 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100167_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=23066 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100133_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30647 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100135_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13344 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99979_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20192 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99893_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19336 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100160_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22730 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100132_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14349 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100122_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22606 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100141_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31476 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99916_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12584 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99921_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17514 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100142_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22902 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100170_params.py
|
|
command torchrun --master_port=17978 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100172_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17883 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99948_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25101 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99909_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12829 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100119_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14215 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100153_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24184 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99939_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24341 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100147_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12512 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99970_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26047 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100134_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12077 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99874_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14692 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100188_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27755 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100116_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29389 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99877_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=18251 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100127_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30001 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99895_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29486 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99886_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24196 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100138_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17277 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99982_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30313 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100155_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25681 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100164_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20526 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99977_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14619 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100111_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26666 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100144_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20493 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99866_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28853 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99962_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22252 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100101_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24525 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100108_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=23600 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100114_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15879 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100159_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12811 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100107_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17538 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100125_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22218 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100183_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26764 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100150_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28394 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100120_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15424 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100078_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19811 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100148_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30170 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99987_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19203 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99900_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12951 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99928_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29541 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99910_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21271 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99926_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=23626 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99919_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17356 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99931_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16269 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100140_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24375 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100115_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20257 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99915_params.py
|
|
use_backend False use_backend False{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15094 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99985_params.py
|
|
command torchrun --master_port=15353 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100082_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17610 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99878_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13371 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99986_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29090 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100123_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21099 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99892_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29294 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100145_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21659 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100165_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16912 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100152_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13163 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99865_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16231 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100124_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12932 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100083_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14734 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99958_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31341 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100073_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14328 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99876_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20416 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100113_params.py
|
|
use_backend False use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command commandtorchrun --master_port=17135 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99879_params.py
|
|
torchrun --master_port=18677 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99924_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12094 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99912_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19355 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100112_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15928 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100104_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12862 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100106_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27787 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99864_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13000 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100090_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28745 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100063_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25264 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100126_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16067 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100146_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27299 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100096_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16906 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100156_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19369 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100110_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26611 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99961_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16376 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99963_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27537 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100088_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21108 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99941_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17468 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100102_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20674 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99957_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29071 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100163_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15347 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100077_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24037 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100131_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12977 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100072_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13057 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100109_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13267 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99959_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15163 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99972_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25976 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100079_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27184 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99944_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31203 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100089_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27583 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99976_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30842 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100118_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25586 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100121_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19830 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100161_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31081 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100095_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29153 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100075_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=23362 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100081_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29435 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99953_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12824 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100093_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=18395 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99884_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24334 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99881_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20392 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99896_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19381 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99984_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16250 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100080_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20045 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100098_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12230 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99908_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=21004 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100117_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30647 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99872_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19046 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99989_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19156 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100062_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28238 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99956_params.py
|
|
use_backenduse_backend False False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16891 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100094_params.py
|
|
command torchrun --master_port=15867 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99930_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31662 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100128_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28884 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100071_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30497 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100092_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17261 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99954_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25699 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100129_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30047 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99981_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13601 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100066_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}command torchrun --master_port=26219 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100130_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19258 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100154_params.py
|
|
command torchrun --master_port=12233 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99894_params.py
|
|
use_backenduse_backend
|
|
False False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
{'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22309 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100084_params.py
|
|
command torchrun --master_port=23255 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100074_params.py
|
|
command torchrun --master_port=12847 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100086_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22258 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99868_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=18497 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99967_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29005 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100076_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=23792 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100091_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16578 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99885_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17748 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99991_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30847 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100068_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24524 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99990_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22131 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100085_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15395 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99995_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12427 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100100_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15801 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100099_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31270 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99940_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29268 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99978_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16793 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100065_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17810 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99993_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15272 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99992_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13425 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99936_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=19139 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100069_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26643 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99973_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27017 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100097_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=20868 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100103_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25388 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99883_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29743 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99975_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31817 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99911_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13689 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99933_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26617 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100070_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22327 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99918_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12446 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99938_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28013 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99969_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13858 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99974_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25635 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99923_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17105 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99880_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31600 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99927_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27439 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100105_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=24107 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99859_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28968 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99903_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=15510 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100064_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=18400 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99945_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27945 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99875_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31191 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99899_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28700 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99971_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22613 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99907_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30183 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100087_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=14671 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99935_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25091 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99922_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22640 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99917_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22182 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99960_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17209 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99867_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13371 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99858_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=27849 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99856_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=23151 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99897_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22044 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99965_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28553 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99950_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=12281 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99980_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16734 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99860_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=31318 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99861_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16303 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99882_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=13460 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99873_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26241 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99869_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=23737 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99857_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=28106 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99906_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29863 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99854_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=29951 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99853_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16872 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99902_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30472 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99964_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=25834 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99871_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=17323 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99949_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=30716 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/100187_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=16512 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99888_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=22260 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99966_params.py
|
|
use_backend False {'abbr': 'internvl-chat-20b', 'batch_size': 4, 'max_out_len': 1024, 'model_args': {'device': 'cuda'}, 'path': '/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B', 'run_cfg': {'num_gpus': 1}, 'type': 'opencompass.models.InternVLChat'}
|
|
command torchrun --master_port=26044 --nproc_per_node 1 /mnt/hwfile/wangweiyun/workspace_tcx/opencompass/opencompass/tasks/openicl_infer.py tmp/99898_params.py
|
|
|
|
[ ] 1/256, 0.0 task/s, elapsed: 205s, ETA: 52230s
|
|
[ ] 2/256, 0.0 task/s, elapsed: 209s, ETA: 26539s
|
|
[ ] 3/256, 0.0 task/s, elapsed: 219s, ETA: 18510s
|
|
[ ] 4/256, 0.0 task/s, elapsed: 223s, ETA: 14078s
|
|
[ ] 5/256, 0.0 task/s, elapsed: 225s, ETA: 11290s
|
|
[ ] 6/256, 0.0 task/s, elapsed: 226s, ETA: 9431s
|
|
[ ] 7/256, 0.0 task/s, elapsed: 231s, ETA: 8229s
|
|
[ ] 8/256, 0.0 task/s, elapsed: 235s, ETA: 7277s
|
|
[> ] 9/256, 0.0 task/s, elapsed: 237s, ETA: 6506s
|
|
[> ] 10/256, 0.0 task/s, elapsed: 240s, ETA: 5909s
|
|
[> ] 11/256, 0.0 task/s, elapsed: 241s, ETA: 5366s
|
|
[> ] 12/256, 0.0 task/s, elapsed: 242s, ETA: 4913s
|
|
[> ] 13/256, 0.1 task/s, elapsed: 244s, ETA: 4569s
|
|
[> ] 14/256, 0.1 task/s, elapsed: 246s, ETA: 4259s
|
|
[> ] 15/256, 0.1 task/s, elapsed: 250s, ETA: 4013s
|
|
[> ] 16/256, 0.1 task/s, elapsed: 251s, ETA: 3760s
|
|
[> ] 17/256, 0.1 task/s, elapsed: 251s, ETA: 3534s
|
|
[>> ] 18/256, 0.1 task/s, elapsed: 254s, ETA: 3364s
|
|
[>> ] 19/256, 0.1 task/s, elapsed: 256s, ETA: 3199s
|
|
[>> ] 20/256, 0.1 task/s, elapsed: 259s, ETA: 3056s
|
|
[>> ] 21/256, 0.1 task/s, elapsed: 261s, ETA: 2917s
|
|
[>> ] 22/256, 0.1 task/s, elapsed: 264s, ETA: 2804s
|
|
[>> ] 23/256, 0.1 task/s, elapsed: 264s, ETA: 2676s
|
|
[>> ] 24/256, 0.1 task/s, elapsed: 264s, ETA: 2554s
|
|
[>> ] 25/256, 0.1 task/s, elapsed: 266s, ETA: 2461s
|
|
[>>> ] 26/256, 0.1 task/s, elapsed: 271s, ETA: 2393s
|
|
[>>> ] 27/256, 0.1 task/s, elapsed: 276s, ETA: 2337s
|
|
[>>> ] 28/256, 0.1 task/s, elapsed: 279s, ETA: 2273s
|
|
[>>> ] 29/256, 0.1 task/s, elapsed: 281s, ETA: 2203s
|
|
[>>> ] 30/256, 0.1 task/s, elapsed: 282s, ETA: 2123s
|
|
[>>> ] 31/256, 0.1 task/s, elapsed: 282s, ETA: 2049s
|
|
[>>> ] 32/256, 0.1 task/s, elapsed: 283s, ETA: 1978s
|
|
[>>> ] 33/256, 0.1 task/s, elapsed: 284s, ETA: 1916s
|
|
[>>> ] 34/256, 0.1 task/s, elapsed: 284s, ETA: 1856s
|
|
[>>>> ] 35/256, 0.1 task/s, elapsed: 286s, ETA: 1806s
|
|
[>>>> ] 36/256, 0.1 task/s, elapsed: 288s, ETA: 1758s
|
|
[>>>> ] 37/256, 0.1 task/s, elapsed: 288s, ETA: 1706s
|
|
[>>>> ] 38/256, 0.1 task/s, elapsed: 290s, ETA: 1666s
|
|
[>>>> ] 39/256, 0.1 task/s, elapsed: 292s, ETA: 1623s
|
|
[>>>> ] 40/256, 0.1 task/s, elapsed: 292s, ETA: 1578s
|
|
[>>>> ] 41/256, 0.1 task/s, elapsed: 292s, ETA: 1532s
|
|
[>>>> ] 42/256, 0.1 task/s, elapsed: 294s, ETA: 1497s
|
|
[>>>>> ] 43/256, 0.1 task/s, elapsed: 294s, ETA: 1456s
|
|
[>>>>> ] 44/256, 0.1 task/s, elapsed: 295s, ETA: 1421s
|
|
[>>>>> ] 45/256, 0.2 task/s, elapsed: 296s, ETA: 1386s
|
|
[>>>>> ] 46/256, 0.2 task/s, elapsed: 296s, ETA: 1353s
|
|
[>>>>> ] 47/256, 0.2 task/s, elapsed: 296s, ETA: 1318s
|
|
[>>>>> ] 48/256, 0.2 task/s, elapsed: 297s, ETA: 1285s
|
|
[>>>>> ] 49/256, 0.2 task/s, elapsed: 298s, ETA: 1259s
|
|
[>>>>> ] 50/256, 0.2 task/s, elapsed: 299s, ETA: 1230s
|
|
[>>>>> ] 51/256, 0.2 task/s, elapsed: 299s, ETA: 1202s
|
|
[>>>>>> ] 52/256, 0.2 task/s, elapsed: 299s, ETA: 1173s
|
|
[>>>>>> ] 53/256, 0.2 task/s, elapsed: 300s, ETA: 1150s
|
|
[>>>>>> ] 54/256, 0.2 task/s, elapsed: 301s, ETA: 1127s
|
|
[>>>>>> ] 55/256, 0.2 task/s, elapsed: 302s, ETA: 1103s
|
|
[>>>>>> ] 56/256, 0.2 task/s, elapsed: 303s, ETA: 1083s
|
|
[>>>>>> ] 57/256, 0.2 task/s, elapsed: 304s, ETA: 1060s
|
|
[>>>>>> ] 58/256, 0.2 task/s, elapsed: 304s, ETA: 1037s
|
|
[>>>>>> ] 59/256, 0.2 task/s, elapsed: 304s, ETA: 1014s
|
|
[>>>>>>> ] 60/256, 0.2 task/s, elapsed: 304s, ETA: 994s
|
|
[>>>>>>> ] 61/256, 0.2 task/s, elapsed: 305s, ETA: 976s
|
|
[>>>>>>> ] 62/256, 0.2 task/s, elapsed: 305s, ETA: 955s
|
|
[>>>>>>> ] 63/256, 0.2 task/s, elapsed: 306s, ETA: 939s
|
|
[>>>>>>> ] 64/256, 0.2 task/s, elapsed: 309s, ETA: 927s
|
|
[>>>>>>> ] 65/256, 0.2 task/s, elapsed: 311s, ETA: 915s
|
|
[>>>>>>> ] 66/256, 0.2 task/s, elapsed: 312s, ETA: 897s
|
|
[>>>>>>> ] 67/256, 0.2 task/s, elapsed: 312s, ETA: 880s
|
|
[>>>>>>> ] 68/256, 0.2 task/s, elapsed: 315s, ETA: 870s
|
|
[>>>>>>>> ] 69/256, 0.2 task/s, elapsed: 315s, ETA: 853s
|
|
[>>>>>>>> ] 70/256, 0.2 task/s, elapsed: 315s, ETA: 837s
|
|
[>>>>>>>> ] 71/256, 0.2 task/s, elapsed: 315s, ETA: 821s
|
|
[>>>>>>>> ] 72/256, 0.2 task/s, elapsed: 316s, ETA: 806s
|
|
[>>>>>>>> ] 73/256, 0.2 task/s, elapsed: 317s, ETA: 795s
|
|
[>>>>>>>> ] 74/256, 0.2 task/s, elapsed: 318s, ETA: 782s
|
|
[>>>>>>>> ] 75/256, 0.2 task/s, elapsed: 319s, ETA: 770s
|
|
[>>>>>>>> ] 76/256, 0.2 task/s, elapsed: 319s, ETA: 757s
|
|
[>>>>>>>>> ] 77/256, 0.2 task/s, elapsed: 320s, ETA: 743s
|
|
[>>>>>>>>> ] 78/256, 0.2 task/s, elapsed: 320s, ETA: 730s
|
|
[>>>>>>>>> ] 79/256, 0.2 task/s, elapsed: 322s, ETA: 721s
|
|
[>>>>>>>>> ] 80/256, 0.2 task/s, elapsed: 323s, ETA: 711s
|
|
[>>>>>>>>> ] 81/256, 0.3 task/s, elapsed: 324s, ETA: 699s
|
|
[>>>>>>>>> ] 82/256, 0.3 task/s, elapsed: 324s, ETA: 688s
|
|
[>>>>>>>>> ] 83/256, 0.3 task/s, elapsed: 324s, ETA: 675s
|
|
[>>>>>>>>> ] 84/256, 0.3 task/s, elapsed: 326s, ETA: 667s
|
|
[>>>>>>>>> ] 85/256, 0.3 task/s, elapsed: 326s, ETA: 657s
|
|
[>>>>>>>>>> ] 86/256, 0.3 task/s, elapsed: 330s, ETA: 651s
|
|
[>>>>>>>>>> ] 87/256, 0.3 task/s, elapsed: 330s, ETA: 640s
|
|
[>>>>>>>>>> ] 88/256, 0.3 task/s, elapsed: 330s, ETA: 630s
|
|
[>>>>>>>>>> ] 89/256, 0.3 task/s, elapsed: 331s, ETA: 622s
|
|
[>>>>>>>>>> ] 90/256, 0.3 task/s, elapsed: 332s, ETA: 612s
|
|
[>>>>>>>>>> ] 91/256, 0.3 task/s, elapsed: 333s, ETA: 604s
|
|
[>>>>>>>>>> ] 92/256, 0.3 task/s, elapsed: 334s, ETA: 595s
|
|
[>>>>>>>>>> ] 93/256, 0.3 task/s, elapsed: 335s, ETA: 586s
|
|
[>>>>>>>>>>> ] 94/256, 0.3 task/s, elapsed: 335s, ETA: 578s
|
|
[>>>>>>>>>>> ] 95/256, 0.3 task/s, elapsed: 338s, ETA: 573s
|
|
[>>>>>>>>>>> ] 96/256, 0.3 task/s, elapsed: 339s, ETA: 565s
|
|
[>>>>>>>>>>> ] 97/256, 0.3 task/s, elapsed: 340s, ETA: 557s
|
|
[>>>>>>>>>>> ] 98/256, 0.3 task/s, elapsed: 340s, ETA: 548s
|
|
[>>>>>>>>>>> ] 99/256, 0.3 task/s, elapsed: 341s, ETA: 540s
|
|
[>>>>>>>>>>> ] 100/256, 0.3 task/s, elapsed: 343s, ETA: 535s
|
|
[>>>>>>>>>>> ] 101/256, 0.3 task/s, elapsed: 343s, ETA: 527s
|
|
[>>>>>>>>>>> ] 102/256, 0.3 task/s, elapsed: 344s, ETA: 519s
|
|
[>>>>>>>>>>> ] 103/256, 0.3 task/s, elapsed: 345s, ETA: 512s
|
|
[>>>>>>>>>>> ] 104/256, 0.3 task/s, elapsed: 345s, ETA: 505s
|
|
[>>>>>>>>>>> ] 105/256, 0.3 task/s, elapsed: 346s, ETA: 498s
|
|
[>>>>>>>>>>>> ] 106/256, 0.3 task/s, elapsed: 347s, ETA: 491s
|
|
[>>>>>>>>>>>> ] 107/256, 0.3 task/s, elapsed: 348s, ETA: 485s
|
|
[>>>>>>>>>>>> ] 108/256, 0.3 task/s, elapsed: 349s, ETA: 478s
|
|
[>>>>>>>>>>>> ] 109/256, 0.3 task/s, elapsed: 350s, ETA: 471s
|
|
[>>>>>>>>>>>> ] 110/256, 0.3 task/s, elapsed: 350s, ETA: 464s
|
|
[>>>>>>>>>>>> ] 111/256, 0.3 task/s, elapsed: 350s, ETA: 457s
|
|
[>>>>>>>>>>>> ] 112/256, 0.3 task/s, elapsed: 351s, ETA: 452s
|
|
[>>>>>>>>>>>> ] 113/256, 0.3 task/s, elapsed: 352s, ETA: 445s
|
|
[>>>>>>>>>>>> ] 114/256, 0.3 task/s, elapsed: 354s, ETA: 441s
|
|
[>>>>>>>>>>>>> ] 115/256, 0.3 task/s, elapsed: 354s, ETA: 434s
|
|
[>>>>>>>>>>>>> ] 116/256, 0.3 task/s, elapsed: 354s, ETA: 427s
|
|
[>>>>>>>>>>>>> ] 117/256, 0.3 task/s, elapsed: 355s, ETA: 422s
|
|
[>>>>>>>>>>>>> ] 118/256, 0.3 task/s, elapsed: 355s, ETA: 416s
|
|
[>>>>>>>>>>>>> ] 119/256, 0.3 task/s, elapsed: 356s, ETA: 410s
|
|
[>>>>>>>>>>>>> ] 120/256, 0.3 task/s, elapsed: 357s, ETA: 405s
|
|
[>>>>>>>>>>>>> ] 121/256, 0.3 task/s, elapsed: 358s, ETA: 399s
|
|
[>>>>>>>>>>>>> ] 122/256, 0.3 task/s, elapsed: 358s, ETA: 393s
|
|
[>>>>>>>>>>>>> ] 123/256, 0.3 task/s, elapsed: 359s, ETA: 388s
|
|
[>>>>>>>>>>>>>> ] 124/256, 0.3 task/s, elapsed: 360s, ETA: 384s
|
|
[>>>>>>>>>>>>>> ] 125/256, 0.3 task/s, elapsed: 361s, ETA: 378s
|
|
[>>>>>>>>>>>>>> ] 126/256, 0.3 task/s, elapsed: 361s, ETA: 372s
|
|
[>>>>>>>>>>>>>> ] 127/256, 0.4 task/s, elapsed: 362s, ETA: 367s
|
|
[>>>>>>>>>>>>>> ] 128/256, 0.4 task/s, elapsed: 362s, ETA: 362s
|
|
[>>>>>>>>>>>>>> ] 129/256, 0.4 task/s, elapsed: 365s, ETA: 359s
|
|
[>>>>>>>>>>>>>> ] 130/256, 0.4 task/s, elapsed: 366s, ETA: 355s
|
|
[>>>>>>>>>>>>>> ] 131/256, 0.4 task/s, elapsed: 367s, ETA: 350s
|
|
[>>>>>>>>>>>>>> ] 132/256, 0.4 task/s, elapsed: 367s, ETA: 345s
|
|
[>>>>>>>>>>>>>>> ] 133/256, 0.4 task/s, elapsed: 367s, ETA: 339s
|
|
[>>>>>>>>>>>>>>> ] 134/256, 0.4 task/s, elapsed: 367s, ETA: 334s
|
|
[>>>>>>>>>>>>>>> ] 135/256, 0.4 task/s, elapsed: 368s, ETA: 330s
|
|
[>>>>>>>>>>>>>>> ] 136/256, 0.4 task/s, elapsed: 369s, ETA: 326s
|
|
[>>>>>>>>>>>>>>> ] 137/256, 0.4 task/s, elapsed: 369s, ETA: 321s
|
|
[>>>>>>>>>>>>>>> ] 138/256, 0.4 task/s, elapsed: 369s, ETA: 316s
|
|
[>>>>>>>>>>>>>>> ] 139/256, 0.4 task/s, elapsed: 370s, ETA: 311s
|
|
[>>>>>>>>>>>>>>> ] 140/256, 0.4 task/s, elapsed: 371s, ETA: 307s
|
|
[>>>>>>>>>>>>>>> ] 141/256, 0.4 task/s, elapsed: 372s, ETA: 303s
|
|
[>>>>>>>>>>>>>>>> ] 142/256, 0.4 task/s, elapsed: 373s, ETA: 300s
|
|
[>>>>>>>>>>>>>>>> ] 143/256, 0.4 task/s, elapsed: 373s, ETA: 295s
|
|
[>>>>>>>>>>>>>>>> ] 144/256, 0.4 task/s, elapsed: 375s, ETA: 292s
|
|
[>>>>>>>>>>>>>>>> ] 145/256, 0.4 task/s, elapsed: 375s, ETA: 287s
|
|
[>>>>>>>>>>>>>>>> ] 146/256, 0.4 task/s, elapsed: 376s, ETA: 284s
|
|
[>>>>>>>>>>>>>>>> ] 147/256, 0.4 task/s, elapsed: 377s, ETA: 279s
|
|
[>>>>>>>>>>>>>>>> ] 148/256, 0.4 task/s, elapsed: 377s, ETA: 275s
|
|
[>>>>>>>>>>>>>>>> ] 149/256, 0.4 task/s, elapsed: 377s, ETA: 270s
|
|
[>>>>>>>>>>>>>>>> ] 150/256, 0.4 task/s, elapsed: 377s, ETA: 266s
|
|
[>>>>>>>>>>>>>>>>> ] 151/256, 0.4 task/s, elapsed: 378s, ETA: 263s
|
|
[>>>>>>>>>>>>>>>>> ] 152/256, 0.4 task/s, elapsed: 378s, ETA: 259s
|
|
[>>>>>>>>>>>>>>>>> ] 153/256, 0.4 task/s, elapsed: 378s, ETA: 255s
|
|
[>>>>>>>>>>>>>>>>> ] 154/256, 0.4 task/s, elapsed: 378s, ETA: 251s
|
|
[>>>>>>>>>>>>>>>>> ] 155/256, 0.4 task/s, elapsed: 379s, ETA: 247s
|
|
[>>>>>>>>>>>>>>>>> ] 156/256, 0.4 task/s, elapsed: 379s, ETA: 243s
|
|
[>>>>>>>>>>>>>>>>> ] 157/256, 0.4 task/s, elapsed: 380s, ETA: 239s
|
|
[>>>>>>>>>>>>>>>>> ] 158/256, 0.4 task/s, elapsed: 380s, ETA: 236s
|
|
[>>>>>>>>>>>>>>>>>> ] 159/256, 0.4 task/s, elapsed: 380s, ETA: 232s
|
|
[>>>>>>>>>>>>>>>>>> ] 160/256, 0.4 task/s, elapsed: 380s, ETA: 228s
|
|
[>>>>>>>>>>>>>>>>>> ] 161/256, 0.4 task/s, elapsed: 380s, ETA: 224s
|
|
[>>>>>>>>>>>>>>>>>> ] 162/256, 0.4 task/s, elapsed: 380s, ETA: 221s
|
|
[>>>>>>>>>>>>>>>>>> ] 163/256, 0.4 task/s, elapsed: 380s, ETA: 217s
|
|
[>>>>>>>>>>>>>>>>>> ] 164/256, 0.4 task/s, elapsed: 381s, ETA: 214s
|
|
[>>>>>>>>>>>>>>>>>> ] 165/256, 0.4 task/s, elapsed: 384s, ETA: 212s
|
|
[>>>>>>>>>>>>>>>>>> ] 166/256, 0.4 task/s, elapsed: 385s, ETA: 209s
|
|
[>>>>>>>>>>>>>>>>>> ] 167/256, 0.4 task/s, elapsed: 385s, ETA: 205s
|
|
[>>>>>>>>>>>>>>>>>>> ] 168/256, 0.4 task/s, elapsed: 385s, ETA: 202s
|
|
[>>>>>>>>>>>>>>>>>>> ] 169/256, 0.4 task/s, elapsed: 386s, ETA: 199s
|
|
[>>>>>>>>>>>>>>>>>>> ] 170/256, 0.4 task/s, elapsed: 386s, ETA: 195s
|
|
[>>>>>>>>>>>>>>>>>>> ] 171/256, 0.4 task/s, elapsed: 388s, ETA: 193s
|
|
[>>>>>>>>>>>>>>>>>>> ] 172/256, 0.4 task/s, elapsed: 388s, ETA: 190s
|
|
[>>>>>>>>>>>>>>>>>>> ] 173/256, 0.4 task/s, elapsed: 389s, ETA: 187s
|
|
[>>>>>>>>>>>>>>>>>>> ] 174/256, 0.4 task/s, elapsed: 390s, ETA: 184s
|
|
[>>>>>>>>>>>>>>>>>>> ] 175/256, 0.4 task/s, elapsed: 390s, ETA: 181s
|
|
[>>>>>>>>>>>>>>>>>>> ] 176/256, 0.5 task/s, elapsed: 391s, ETA: 178s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 177/256, 0.5 task/s, elapsed: 391s, ETA: 175s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 178/256, 0.5 task/s, elapsed: 391s, ETA: 172s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 179/256, 0.5 task/s, elapsed: 392s, ETA: 169s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 180/256, 0.5 task/s, elapsed: 393s, ETA: 166s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 181/256, 0.5 task/s, elapsed: 394s, ETA: 163s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 182/256, 0.5 task/s, elapsed: 395s, ETA: 160s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 183/256, 0.5 task/s, elapsed: 396s, ETA: 158s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 184/256, 0.5 task/s, elapsed: 396s, ETA: 155s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 185/256, 0.5 task/s, elapsed: 397s, ETA: 152s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 186/256, 0.5 task/s, elapsed: 397s, ETA: 149s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 187/256, 0.5 task/s, elapsed: 397s, ETA: 147s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 188/256, 0.5 task/s, elapsed: 398s, ETA: 144s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 189/256, 0.5 task/s, elapsed: 399s, ETA: 141s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 190/256, 0.5 task/s, elapsed: 400s, ETA: 139s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 191/256, 0.5 task/s, elapsed: 400s, ETA: 136s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 192/256, 0.5 task/s, elapsed: 401s, ETA: 134s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 193/256, 0.5 task/s, elapsed: 401s, ETA: 131s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 194/256, 0.5 task/s, elapsed: 402s, ETA: 128s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 195/256, 0.5 task/s, elapsed: 402s, ETA: 126s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 196/256, 0.5 task/s, elapsed: 403s, ETA: 123s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 197/256, 0.5 task/s, elapsed: 403s, ETA: 121s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 198/256, 0.5 task/s, elapsed: 403s, ETA: 118s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 199/256, 0.5 task/s, elapsed: 404s, ETA: 116s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 200/256, 0.5 task/s, elapsed: 405s, ETA: 113s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 201/256, 0.5 task/s, elapsed: 407s, ETA: 111s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 202/256, 0.5 task/s, elapsed: 407s, ETA: 109s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 203/256, 0.5 task/s, elapsed: 408s, ETA: 107s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 204/256, 0.5 task/s, elapsed: 408s, ETA: 104s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 205/256, 0.5 task/s, elapsed: 408s, ETA: 102s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 206/256, 0.5 task/s, elapsed: 410s, ETA: 99s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 207/256, 0.5 task/s, elapsed: 410s, ETA: 97s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 208/256, 0.5 task/s, elapsed: 410s, ETA: 95s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 209/256, 0.5 task/s, elapsed: 410s, ETA: 92s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 210/256, 0.5 task/s, elapsed: 411s, ETA: 90s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 211/256, 0.5 task/s, elapsed: 412s, ETA: 88s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 212/256, 0.5 task/s, elapsed: 413s, ETA: 86s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 213/256, 0.5 task/s, elapsed: 413s, ETA: 83s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 214/256, 0.5 task/s, elapsed: 415s, ETA: 81s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 215/256, 0.5 task/s, elapsed: 415s, ETA: 79s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 216/256, 0.5 task/s, elapsed: 415s, ETA: 77s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 217/256, 0.5 task/s, elapsed: 417s, ETA: 75s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 218/256, 0.5 task/s, elapsed: 417s, ETA: 73s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 219/256, 0.5 task/s, elapsed: 417s, ETA: 71s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 220/256, 0.5 task/s, elapsed: 419s, ETA: 69s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 221/256, 0.5 task/s, elapsed: 420s, ETA: 66s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 222/256, 0.5 task/s, elapsed: 420s, ETA: 64s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 223/256, 0.5 task/s, elapsed: 422s, ETA: 62s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 224/256, 0.5 task/s, elapsed: 423s, ETA: 60s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 225/256, 0.5 task/s, elapsed: 425s, ETA: 58s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 226/256, 0.5 task/s, elapsed: 425s, ETA: 56s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 227/256, 0.5 task/s, elapsed: 428s, ETA: 55s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 228/256, 0.5 task/s, elapsed: 428s, ETA: 53s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 229/256, 0.5 task/s, elapsed: 428s, ETA: 51s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 230/256, 0.5 task/s, elapsed: 429s, ETA: 48s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 231/256, 0.5 task/s, elapsed: 431s, ETA: 47s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 232/256, 0.5 task/s, elapsed: 431s, ETA: 45s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 233/256, 0.5 task/s, elapsed: 431s, ETA: 43s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 234/256, 0.5 task/s, elapsed: 431s, ETA: 41s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 235/256, 0.5 task/s, elapsed: 431s, ETA: 39s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 236/256, 0.5 task/s, elapsed: 433s, ETA: 37s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 237/256, 0.5 task/s, elapsed: 437s, ETA: 35s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 238/256, 0.5 task/s, elapsed: 438s, ETA: 33s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 239/256, 0.5 task/s, elapsed: 442s, ETA: 31s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 240/256, 0.5 task/s, elapsed: 443s, ETA: 30s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 241/256, 0.5 task/s, elapsed: 444s, ETA: 28s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 242/256, 0.5 task/s, elapsed: 444s, ETA: 26s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 243/256, 0.5 task/s, elapsed: 446s, ETA: 24s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 244/256, 0.5 task/s, elapsed: 448s, ETA: 22s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 245/256, 0.5 task/s, elapsed: 452s, ETA: 20s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 246/256, 0.5 task/s, elapsed: 454s, ETA: 18s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 247/256, 0.5 task/s, elapsed: 455s, ETA: 17s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 248/256, 0.5 task/s, elapsed: 457s, ETA: 15s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 249/256, 0.5 task/s, elapsed: 463s, ETA: 13s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 250/256, 0.5 task/s, elapsed: 467s, ETA: 11s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 251/256, 0.5 task/s, elapsed: 468s, ETA: 9s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 252/256, 0.5 task/s, elapsed: 483s, ETA: 8s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 253/256, 0.5 task/s, elapsed: 484s, ETA: 6s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 254/256, 0.5 task/s, elapsed: 487s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 255/256, 0.5 task/s, elapsed: 504s, ETA: 2s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 256/256, 0.5 task/s, elapsed: 519s, ETA: 0s
|
|
12/05 03:48:35 - OpenCompass - INFO - Partitioned into 287 tasks.
|
|
[ ] 0/287, elapsed: 0s, ETA:
|
|
[ ] 1/287, 0.1 task/s, elapsed: 18s, ETA: 5192s
|
|
[ ] 2/287, 0.1 task/s, elapsed: 21s, ETA: 3005s
|
|
[ ] 3/287, 0.1 task/s, elapsed: 21s, ETA: 2001s
|
|
[ ] 4/287, 0.2 task/s, elapsed: 21s, ETA: 1496s
|
|
[ ] 5/287, 0.2 task/s, elapsed: 21s, ETA: 1201s
|
|
[ ] 6/287, 0.3 task/s, elapsed: 22s, ETA: 1009s
|
|
[ ] 7/287, 0.3 task/s, elapsed: 25s, ETA: 1001s
|
|
[ ] 8/287, 0.3 task/s, elapsed: 25s, ETA: 879s
|
|
[> ] 9/287, 0.4 task/s, elapsed: 25s, ETA: 780s
|
|
[> ] 10/287, 0.4 task/s, elapsed: 25s, ETA: 700s
|
|
[> ] 11/287, 0.4 task/s, elapsed: 25s, ETA: 634s
|
|
[> ] 12/287, 0.5 task/s, elapsed: 25s, ETA: 580s
|
|
[> ] 13/287, 0.5 task/s, elapsed: 25s, ETA: 534s
|
|
[> ] 14/287, 0.6 task/s, elapsed: 25s, ETA: 494s
|
|
[> ] 15/287, 0.6 task/s, elapsed: 25s, ETA: 460s
|
|
[> ] 16/287, 0.6 task/s, elapsed: 25s, ETA: 429s
|
|
[> ] 17/287, 0.7 task/s, elapsed: 25s, ETA: 403s
|
|
[> ] 18/287, 0.7 task/s, elapsed: 25s, ETA: 379s
|
|
[>> ] 19/287, 0.7 task/s, elapsed: 25s, ETA: 358s
|
|
[>> ] 20/287, 0.8 task/s, elapsed: 25s, ETA: 339s
|
|
[>> ] 21/287, 0.8 task/s, elapsed: 25s, ETA: 322s
|
|
[>> ] 22/287, 0.9 task/s, elapsed: 25s, ETA: 306s
|
|
[>> ] 23/287, 0.9 task/s, elapsed: 26s, ETA: 293s
|
|
[>> ] 24/287, 0.9 task/s, elapsed: 26s, ETA: 280s
|
|
[>> ] 25/287, 0.8 task/s, elapsed: 30s, ETA: 319s
|
|
[>> ] 26/287, 0.9 task/s, elapsed: 30s, ETA: 306s
|
|
[>> ] 27/287, 0.9 task/s, elapsed: 31s, ETA: 294s
|
|
[>>> ] 28/287, 0.9 task/s, elapsed: 31s, ETA: 283s
|
|
[>>> ] 29/287, 0.9 task/s, elapsed: 31s, ETA: 272s
|
|
[>>> ] 30/287, 1.0 task/s, elapsed: 31s, ETA: 262s
|
|
[>>> ] 31/287, 1.0 task/s, elapsed: 31s, ETA: 253s
|
|
[>>> ] 32/287, 1.0 task/s, elapsed: 31s, ETA: 244s
|
|
[>>> ] 33/287, 1.1 task/s, elapsed: 31s, ETA: 236s
|
|
[>>> ] 34/287, 1.1 task/s, elapsed: 31s, ETA: 228s
|
|
[>>> ] 35/287, 1.1 task/s, elapsed: 31s, ETA: 221s
|
|
[>>> ] 36/287, 1.2 task/s, elapsed: 31s, ETA: 214s
|
|
[>>> ] 37/287, 1.2 task/s, elapsed: 31s, ETA: 207s
|
|
[>>>> ] 38/287, 1.2 task/s, elapsed: 31s, ETA: 201s
|
|
[>>>> ] 39/287, 1.3 task/s, elapsed: 31s, ETA: 195s
|
|
[>>>> ] 40/287, 1.3 task/s, elapsed: 31s, ETA: 190s
|
|
[>>>> ] 41/287, 1.3 task/s, elapsed: 31s, ETA: 184s
|
|
[>>>> ] 42/287, 1.4 task/s, elapsed: 31s, ETA: 179s
|
|
[>>>> ] 43/287, 1.4 task/s, elapsed: 31s, ETA: 174s
|
|
[>>>> ] 44/287, 1.4 task/s, elapsed: 31s, ETA: 170s
|
|
[>>>> ] 45/287, 1.5 task/s, elapsed: 31s, ETA: 166s
|
|
[>>>> ] 46/287, 1.5 task/s, elapsed: 31s, ETA: 161s
|
|
[>>>>> ] 47/287, 1.5 task/s, elapsed: 31s, ETA: 158s
|
|
[>>>>> ] 48/287, 1.6 task/s, elapsed: 31s, ETA: 154s
|
|
[>>>>> ] 49/287, 1.6 task/s, elapsed: 31s, ETA: 152s
|
|
[>>>>> ] 50/287, 1.6 task/s, elapsed: 31s, ETA: 149s
|
|
[>>>>> ] 51/287, 1.6 task/s, elapsed: 32s, ETA: 146s
|
|
[>>>>> ] 52/287, 1.6 task/s, elapsed: 32s, ETA: 143s
|
|
[>>>>> ] 53/287, 1.7 task/s, elapsed: 32s, ETA: 140s
|
|
[>>>>> ] 54/287, 1.7 task/s, elapsed: 32s, ETA: 137s
|
|
[>>>>> ] 55/287, 1.7 task/s, elapsed: 32s, ETA: 134s
|
|
[>>>>>> ] 56/287, 1.8 task/s, elapsed: 32s, ETA: 131s
|
|
[>>>>>> ] 57/287, 1.8 task/s, elapsed: 32s, ETA: 129s
|
|
[>>>>>> ] 58/287, 1.8 task/s, elapsed: 32s, ETA: 126s
|
|
[>>>>>> ] 59/287, 1.8 task/s, elapsed: 32s, ETA: 124s
|
|
[>>>>>> ] 60/287, 1.9 task/s, elapsed: 32s, ETA: 121s
|
|
[>>>>>> ] 61/287, 1.9 task/s, elapsed: 32s, ETA: 119s
|
|
[>>>>>> ] 62/287, 1.9 task/s, elapsed: 32s, ETA: 117s
|
|
[>>>>>> ] 63/287, 2.0 task/s, elapsed: 32s, ETA: 114s
|
|
[>>>>>> ] 64/287, 2.0 task/s, elapsed: 33s, ETA: 114s
|
|
[>>>>>>> ] 65/287, 2.0 task/s, elapsed: 33s, ETA: 112s
|
|
[>>>>>>> ] 66/287, 2.0 task/s, elapsed: 33s, ETA: 110s
|
|
[>>>>>>> ] 67/287, 2.0 task/s, elapsed: 33s, ETA: 108s
|
|
[>>>>>>> ] 68/287, 2.1 task/s, elapsed: 33s, ETA: 106s
|
|
[>>>>>>> ] 69/287, 2.1 task/s, elapsed: 33s, ETA: 104s
|
|
[>>>>>>> ] 70/287, 2.1 task/s, elapsed: 33s, ETA: 103s
|
|
[>>>>>>> ] 71/287, 2.1 task/s, elapsed: 33s, ETA: 101s
|
|
[>>>>>>> ] 72/287, 2.1 task/s, elapsed: 34s, ETA: 100s
|
|
[>>>>>>> ] 73/287, 2.1 task/s, elapsed: 35s, ETA: 102s
|
|
[>>>>>>> ] 74/287, 2.1 task/s, elapsed: 35s, ETA: 100s
|
|
[>>>>>>>> ] 75/287, 2.1 task/s, elapsed: 35s, ETA: 99s
|
|
[>>>>>>>> ] 76/287, 2.2 task/s, elapsed: 35s, ETA: 97s
|
|
[>>>>>>>> ] 77/287, 2.2 task/s, elapsed: 35s, ETA: 96s
|
|
[>>>>>>>> ] 78/287, 2.2 task/s, elapsed: 35s, ETA: 94s
|
|
[>>>>>>>> ] 79/287, 2.2 task/s, elapsed: 35s, ETA: 92s
|
|
[>>>>>>>> ] 80/287, 2.3 task/s, elapsed: 35s, ETA: 91s
|
|
[>>>>>>>> ] 81/287, 2.3 task/s, elapsed: 35s, ETA: 90s
|
|
[>>>>>>>> ] 82/287, 2.3 task/s, elapsed: 35s, ETA: 88s
|
|
[>>>>>>>> ] 83/287, 2.3 task/s, elapsed: 35s, ETA: 87s
|
|
[>>>>>>>>> ] 84/287, 2.4 task/s, elapsed: 35s, ETA: 86s
|
|
[>>>>>>>>> ] 85/287, 2.4 task/s, elapsed: 35s, ETA: 84s
|
|
[>>>>>>>>> ] 86/287, 2.4 task/s, elapsed: 35s, ETA: 83s
|
|
[>>>>>>>>> ] 87/287, 2.5 task/s, elapsed: 35s, ETA: 82s
|
|
[>>>>>>>>> ] 88/287, 2.5 task/s, elapsed: 35s, ETA: 80s
|
|
[>>>>>>>>> ] 89/287, 2.5 task/s, elapsed: 35s, ETA: 79s
|
|
[>>>>>>>>> ] 90/287, 2.5 task/s, elapsed: 36s, ETA: 78s
|
|
[>>>>>>>>> ] 91/287, 2.6 task/s, elapsed: 36s, ETA: 77s
|
|
[>>>>>>>>> ] 92/287, 2.6 task/s, elapsed: 36s, ETA: 75s
|
|
[>>>>>>>>>> ] 93/287, 2.6 task/s, elapsed: 36s, ETA: 74s
|
|
[>>>>>>>>>> ] 94/287, 2.6 task/s, elapsed: 36s, ETA: 73s
|
|
[>>>>>>>>>> ] 95/287, 2.7 task/s, elapsed: 36s, ETA: 72s
|
|
[>>>>>>>>>> ] 96/287, 2.7 task/s, elapsed: 36s, ETA: 71s
|
|
[>>>>>>>>>> ] 97/287, 2.7 task/s, elapsed: 36s, ETA: 70s
|
|
[>>>>>>>>>> ] 98/287, 2.7 task/s, elapsed: 36s, ETA: 70s
|
|
[>>>>>>>>>> ] 99/287, 2.7 task/s, elapsed: 36s, ETA: 69s
|
|
[>>>>>>>>>> ] 100/287, 2.8 task/s, elapsed: 36s, ETA: 68s
|
|
[>>>>>>>>>> ] 101/287, 2.8 task/s, elapsed: 36s, ETA: 67s
|
|
[>>>>>>>>>> ] 102/287, 2.8 task/s, elapsed: 36s, ETA: 66s
|
|
[>>>>>>>>>> ] 103/287, 2.9 task/s, elapsed: 36s, ETA: 65s
|
|
[>>>>>>>>>> ] 104/287, 2.9 task/s, elapsed: 36s, ETA: 64s
|
|
[>>>>>>>>>> ] 105/287, 2.9 task/s, elapsed: 36s, ETA: 63s
|
|
[>>>>>>>>>>> ] 106/287, 2.9 task/s, elapsed: 36s, ETA: 62s
|
|
[>>>>>>>>>>> ] 107/287, 3.0 task/s, elapsed: 36s, ETA: 61s
|
|
[>>>>>>>>>>> ] 108/287, 3.0 task/s, elapsed: 36s, ETA: 60s
|
|
[>>>>>>>>>>> ] 109/287, 3.0 task/s, elapsed: 36s, ETA: 59s
|
|
[>>>>>>>>>>> ] 110/287, 3.0 task/s, elapsed: 36s, ETA: 58s
|
|
[>>>>>>>>>>> ] 111/287, 3.1 task/s, elapsed: 36s, ETA: 58s
|
|
[>>>>>>>>>>> ] 112/287, 3.1 task/s, elapsed: 36s, ETA: 57s
|
|
[>>>>>>>>>>> ] 113/287, 3.1 task/s, elapsed: 36s, ETA: 56s
|
|
[>>>>>>>>>>> ] 114/287, 3.1 task/s, elapsed: 36s, ETA: 55s
|
|
[>>>>>>>>>>>> ] 115/287, 3.2 task/s, elapsed: 36s, ETA: 54s
|
|
[>>>>>>>>>>>> ] 116/287, 3.2 task/s, elapsed: 36s, ETA: 54s
|
|
[>>>>>>>>>>>> ] 117/287, 3.2 task/s, elapsed: 36s, ETA: 53s
|
|
[>>>>>>>>>>>> ] 118/287, 3.2 task/s, elapsed: 36s, ETA: 52s
|
|
[>>>>>>>>>>>> ] 119/287, 3.3 task/s, elapsed: 36s, ETA: 51s
|
|
[>>>>>>>>>>>> ] 120/287, 3.3 task/s, elapsed: 36s, ETA: 51s
|
|
[>>>>>>>>>>>> ] 121/287, 3.3 task/s, elapsed: 37s, ETA: 50s
|
|
[>>>>>>>>>>>> ] 122/287, 3.3 task/s, elapsed: 37s, ETA: 49s
|
|
[>>>>>>>>>>>> ] 123/287, 3.4 task/s, elapsed: 37s, ETA: 49s
|
|
[>>>>>>>>>>>> ] 124/287, 3.4 task/s, elapsed: 37s, ETA: 48s
|
|
[>>>>>>>>>>>>> ] 125/287, 3.4 task/s, elapsed: 37s, ETA: 48s
|
|
[>>>>>>>>>>>>> ] 126/287, 3.4 task/s, elapsed: 37s, ETA: 47s
|
|
[>>>>>>>>>>>>> ] 127/287, 3.4 task/s, elapsed: 37s, ETA: 47s
|
|
[>>>>>>>>>>>>> ] 128/287, 3.4 task/s, elapsed: 37s, ETA: 46s
|
|
[>>>>>>>>>>>>> ] 129/287, 3.4 task/s, elapsed: 37s, ETA: 46s
|
|
[>>>>>>>>>>>>> ] 130/287, 3.5 task/s, elapsed: 38s, ETA: 45s
|
|
[>>>>>>>>>>>>> ] 131/287, 3.5 task/s, elapsed: 38s, ETA: 45s
|
|
[>>>>>>>>>>>>> ] 132/287, 3.5 task/s, elapsed: 38s, ETA: 44s
|
|
[>>>>>>>>>>>>> ] 133/287, 3.5 task/s, elapsed: 38s, ETA: 44s
|
|
[>>>>>>>>>>>>>> ] 134/287, 3.5 task/s, elapsed: 38s, ETA: 43s
|
|
[>>>>>>>>>>>>>> ] 135/287, 3.6 task/s, elapsed: 38s, ETA: 43s
|
|
[>>>>>>>>>>>>>> ] 136/287, 3.6 task/s, elapsed: 38s, ETA: 42s
|
|
[>>>>>>>>>>>>>> ] 137/287, 3.6 task/s, elapsed: 38s, ETA: 42s
|
|
[>>>>>>>>>>>>>> ] 138/287, 3.6 task/s, elapsed: 38s, ETA: 41s
|
|
[>>>>>>>>>>>>>> ] 139/287, 3.7 task/s, elapsed: 38s, ETA: 40s
|
|
[>>>>>>>>>>>>>> ] 140/287, 3.7 task/s, elapsed: 38s, ETA: 40s
|
|
[>>>>>>>>>>>>>> ] 141/287, 3.7 task/s, elapsed: 38s, ETA: 39s
|
|
[>>>>>>>>>>>>>> ] 142/287, 3.7 task/s, elapsed: 38s, ETA: 39s
|
|
[>>>>>>>>>>>>>> ] 143/287, 3.8 task/s, elapsed: 38s, ETA: 38s
|
|
[>>>>>>>>>>>>>>> ] 144/287, 3.8 task/s, elapsed: 38s, ETA: 38s
|
|
[>>>>>>>>>>>>>>> ] 145/287, 3.8 task/s, elapsed: 38s, ETA: 37s
|
|
[>>>>>>>>>>>>>>> ] 146/287, 3.8 task/s, elapsed: 38s, ETA: 37s
|
|
[>>>>>>>>>>>>>>> ] 147/287, 3.9 task/s, elapsed: 38s, ETA: 36s
|
|
[>>>>>>>>>>>>>>> ] 148/287, 3.9 task/s, elapsed: 38s, ETA: 36s
|
|
[>>>>>>>>>>>>>>> ] 149/287, 3.9 task/s, elapsed: 38s, ETA: 35s
|
|
[>>>>>>>>>>>>>>> ] 150/287, 3.9 task/s, elapsed: 38s, ETA: 35s
|
|
[>>>>>>>>>>>>>>> ] 151/287, 4.0 task/s, elapsed: 38s, ETA: 34s
|
|
[>>>>>>>>>>>>>>> ] 152/287, 4.0 task/s, elapsed: 38s, ETA: 34s
|
|
[>>>>>>>>>>>>>>> ] 153/287, 4.0 task/s, elapsed: 38s, ETA: 33s
|
|
[>>>>>>>>>>>>>>>> ] 154/287, 4.0 task/s, elapsed: 38s, ETA: 33s
|
|
[>>>>>>>>>>>>>>>> ] 155/287, 4.1 task/s, elapsed: 38s, ETA: 33s
|
|
[>>>>>>>>>>>>>>>> ] 156/287, 4.1 task/s, elapsed: 38s, ETA: 32s
|
|
[>>>>>>>>>>>>>>>> ] 157/287, 4.1 task/s, elapsed: 38s, ETA: 32s
|
|
[>>>>>>>>>>>>>>>> ] 158/287, 4.1 task/s, elapsed: 38s, ETA: 31s
|
|
[>>>>>>>>>>>>>>>> ] 159/287, 4.2 task/s, elapsed: 38s, ETA: 31s
|
|
[>>>>>>>>>>>>>>>> ] 160/287, 4.2 task/s, elapsed: 38s, ETA: 30s
|
|
[>>>>>>>>>>>>>>>> ] 161/287, 4.2 task/s, elapsed: 38s, ETA: 30s
|
|
[>>>>>>>>>>>>>>>> ] 162/287, 4.2 task/s, elapsed: 38s, ETA: 30s
|
|
[>>>>>>>>>>>>>>>>> ] 163/287, 4.2 task/s, elapsed: 38s, ETA: 29s
|
|
[>>>>>>>>>>>>>>>>> ] 164/287, 4.3 task/s, elapsed: 38s, ETA: 29s
|
|
[>>>>>>>>>>>>>>>>> ] 165/287, 4.3 task/s, elapsed: 38s, ETA: 28s
|
|
[>>>>>>>>>>>>>>>>> ] 166/287, 4.3 task/s, elapsed: 38s, ETA: 28s
|
|
[>>>>>>>>>>>>>>>>> ] 167/287, 4.3 task/s, elapsed: 38s, ETA: 28s
|
|
[>>>>>>>>>>>>>>>>> ] 168/287, 4.4 task/s, elapsed: 38s, ETA: 27s
|
|
[>>>>>>>>>>>>>>>>> ] 169/287, 4.4 task/s, elapsed: 38s, ETA: 27s
|
|
[>>>>>>>>>>>>>>>>> ] 170/287, 4.4 task/s, elapsed: 38s, ETA: 26s
|
|
[>>>>>>>>>>>>>>>>> ] 171/287, 4.4 task/s, elapsed: 39s, ETA: 26s
|
|
[>>>>>>>>>>>>>>>>> ] 172/287, 4.4 task/s, elapsed: 39s, ETA: 26s
|
|
[>>>>>>>>>>>>>>>>>> ] 173/287, 4.4 task/s, elapsed: 39s, ETA: 26s
|
|
[>>>>>>>>>>>>>>>>>> ] 174/287, 4.4 task/s, elapsed: 39s, ETA: 25s
|
|
[>>>>>>>>>>>>>>>>>> ] 175/287, 4.4 task/s, elapsed: 39s, ETA: 25s
|
|
[>>>>>>>>>>>>>>>>>> ] 176/287, 4.5 task/s, elapsed: 39s, ETA: 25s
|
|
[>>>>>>>>>>>>>>>>>> ] 177/287, 4.5 task/s, elapsed: 39s, ETA: 24s
|
|
[>>>>>>>>>>>>>>>>>> ] 178/287, 4.5 task/s, elapsed: 39s, ETA: 24s
|
|
[>>>>>>>>>>>>>>>>>> ] 179/287, 4.5 task/s, elapsed: 39s, ETA: 24s
|
|
[>>>>>>>>>>>>>>>>>> ] 180/287, 4.6 task/s, elapsed: 39s, ETA: 23s
|
|
[>>>>>>>>>>>>>>>>>> ] 181/287, 4.6 task/s, elapsed: 39s, ETA: 23s
|
|
[>>>>>>>>>>>>>>>>>>> ] 182/287, 4.6 task/s, elapsed: 39s, ETA: 23s
|
|
[>>>>>>>>>>>>>>>>>>> ] 183/287, 4.6 task/s, elapsed: 39s, ETA: 22s
|
|
[>>>>>>>>>>>>>>>>>>> ] 184/287, 4.7 task/s, elapsed: 39s, ETA: 22s
|
|
[>>>>>>>>>>>>>>>>>>> ] 185/287, 4.7 task/s, elapsed: 39s, ETA: 22s
|
|
[>>>>>>>>>>>>>>>>>>> ] 186/287, 4.7 task/s, elapsed: 39s, ETA: 21s
|
|
[>>>>>>>>>>>>>>>>>>> ] 187/287, 4.7 task/s, elapsed: 39s, ETA: 21s
|
|
[>>>>>>>>>>>>>>>>>>> ] 188/287, 4.8 task/s, elapsed: 40s, ETA: 21s
|
|
[>>>>>>>>>>>>>>>>>>> ] 189/287, 4.8 task/s, elapsed: 40s, ETA: 20s
|
|
[>>>>>>>>>>>>>>>>>>> ] 190/287, 4.8 task/s, elapsed: 40s, ETA: 20s
|
|
[>>>>>>>>>>>>>>>>>>> ] 191/287, 4.8 task/s, elapsed: 40s, ETA: 20s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 192/287, 4.8 task/s, elapsed: 40s, ETA: 20s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 193/287, 4.9 task/s, elapsed: 40s, ETA: 19s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 194/287, 4.8 task/s, elapsed: 40s, ETA: 19s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 195/287, 4.8 task/s, elapsed: 40s, ETA: 19s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 196/287, 4.8 task/s, elapsed: 40s, ETA: 19s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 197/287, 4.9 task/s, elapsed: 40s, ETA: 18s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 198/287, 4.9 task/s, elapsed: 41s, ETA: 18s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 199/287, 4.9 task/s, elapsed: 41s, ETA: 18s
|
|
[>>>>>>>>>>>>>>>>>>>> ] 200/287, 4.9 task/s, elapsed: 41s, ETA: 18s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 201/287, 5.0 task/s, elapsed: 41s, ETA: 17s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 202/287, 5.0 task/s, elapsed: 41s, ETA: 17s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 203/287, 5.0 task/s, elapsed: 41s, ETA: 17s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 204/287, 5.0 task/s, elapsed: 41s, ETA: 17s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 205/287, 5.0 task/s, elapsed: 41s, ETA: 16s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 206/287, 5.1 task/s, elapsed: 41s, ETA: 16s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 207/287, 5.0 task/s, elapsed: 41s, ETA: 16s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 208/287, 5.1 task/s, elapsed: 41s, ETA: 16s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 209/287, 5.1 task/s, elapsed: 41s, ETA: 15s
|
|
[>>>>>>>>>>>>>>>>>>>>> ] 210/287, 5.1 task/s, elapsed: 41s, ETA: 15s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 211/287, 5.1 task/s, elapsed: 42s, ETA: 15s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 212/287, 5.1 task/s, elapsed: 42s, ETA: 15s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 213/287, 5.1 task/s, elapsed: 42s, ETA: 15s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 214/287, 5.1 task/s, elapsed: 42s, ETA: 14s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 215/287, 5.1 task/s, elapsed: 42s, ETA: 14s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 216/287, 5.2 task/s, elapsed: 42s, ETA: 14s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 217/287, 5.1 task/s, elapsed: 42s, ETA: 14s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 218/287, 5.2 task/s, elapsed: 42s, ETA: 13s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 219/287, 5.2 task/s, elapsed: 42s, ETA: 13s
|
|
[>>>>>>>>>>>>>>>>>>>>>> ] 220/287, 5.2 task/s, elapsed: 42s, ETA: 13s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 221/287, 5.2 task/s, elapsed: 42s, ETA: 13s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 222/287, 5.2 task/s, elapsed: 42s, ETA: 12s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 223/287, 5.2 task/s, elapsed: 42s, ETA: 12s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 224/287, 5.3 task/s, elapsed: 42s, ETA: 12s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 225/287, 5.3 task/s, elapsed: 43s, ETA: 12s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 226/287, 5.3 task/s, elapsed: 43s, ETA: 11s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 227/287, 5.3 task/s, elapsed: 43s, ETA: 11s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 228/287, 5.3 task/s, elapsed: 43s, ETA: 11s
|
|
[>>>>>>>>>>>>>>>>>>>>>>> ] 229/287, 5.4 task/s, elapsed: 43s, ETA: 11s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 230/287, 5.4 task/s, elapsed: 43s, ETA: 11s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 231/287, 5.4 task/s, elapsed: 43s, ETA: 10s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 232/287, 5.4 task/s, elapsed: 43s, ETA: 10s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 233/287, 5.5 task/s, elapsed: 43s, ETA: 10s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 234/287, 5.5 task/s, elapsed: 43s, ETA: 10s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 235/287, 5.5 task/s, elapsed: 43s, ETA: 9s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 236/287, 5.5 task/s, elapsed: 43s, ETA: 9s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 237/287, 5.5 task/s, elapsed: 43s, ETA: 9s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 238/287, 5.6 task/s, elapsed: 43s, ETA: 9s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>> ] 239/287, 5.6 task/s, elapsed: 43s, ETA: 9s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 240/287, 5.6 task/s, elapsed: 43s, ETA: 8s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 241/287, 5.6 task/s, elapsed: 43s, ETA: 8s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 242/287, 5.6 task/s, elapsed: 43s, ETA: 8s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 243/287, 5.7 task/s, elapsed: 43s, ETA: 8s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 244/287, 5.7 task/s, elapsed: 43s, ETA: 8s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 245/287, 5.7 task/s, elapsed: 43s, ETA: 7s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 246/287, 5.7 task/s, elapsed: 43s, ETA: 7s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 247/287, 5.7 task/s, elapsed: 43s, ETA: 7s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>> ] 248/287, 5.8 task/s, elapsed: 43s, ETA: 7s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 249/287, 5.8 task/s, elapsed: 43s, ETA: 7s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 250/287, 5.8 task/s, elapsed: 43s, ETA: 6s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 251/287, 5.8 task/s, elapsed: 43s, ETA: 6s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 252/287, 5.9 task/s, elapsed: 43s, ETA: 6s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 253/287, 5.9 task/s, elapsed: 43s, ETA: 6s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 254/287, 5.9 task/s, elapsed: 43s, ETA: 6s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 255/287, 5.9 task/s, elapsed: 43s, ETA: 5s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 256/287, 5.9 task/s, elapsed: 43s, ETA: 5s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 257/287, 6.0 task/s, elapsed: 43s, ETA: 5s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>> ] 258/287, 6.0 task/s, elapsed: 43s, ETA: 5s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 259/287, 6.0 task/s, elapsed: 43s, ETA: 5s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 260/287, 6.0 task/s, elapsed: 43s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 261/287, 5.9 task/s, elapsed: 44s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 262/287, 5.9 task/s, elapsed: 44s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 263/287, 5.8 task/s, elapsed: 45s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 264/287, 5.8 task/s, elapsed: 46s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 265/287, 5.7 task/s, elapsed: 47s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 266/287, 5.7 task/s, elapsed: 47s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 267/287, 5.7 task/s, elapsed: 47s, ETA: 4s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 268/287, 5.6 task/s, elapsed: 48s, ETA: 3s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 269/287, 5.6 task/s, elapsed: 48s, ETA: 3s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 270/287, 5.6 task/s, elapsed: 48s, ETA: 3s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 271/287, 5.5 task/s, elapsed: 49s, ETA: 3s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 272/287, 5.5 task/s, elapsed: 50s, ETA: 3s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 273/287, 5.5 task/s, elapsed: 50s, ETA: 3s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 274/287, 5.5 task/s, elapsed: 50s, ETA: 2s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 275/287, 5.5 task/s, elapsed: 50s, ETA: 2s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 276/287, 5.3 task/s, elapsed: 52s, ETA: 2s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 277/287, 5.3 task/s, elapsed: 52s, ETA: 2s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 278/287, 5.3 task/s, elapsed: 52s, ETA: 2s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 279/287, 5.2 task/s, elapsed: 53s, ETA: 2s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 280/287, 5.2 task/s, elapsed: 54s, ETA: 1s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 281/287, 5.2 task/s, elapsed: 54s, ETA: 1s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 282/287, 5.2 task/s, elapsed: 54s, ETA: 1s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 283/287, 5.1 task/s, elapsed: 56s, ETA: 1s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 284/287, 4.7 task/s, elapsed: 60s, ETA: 1s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 285/287, 4.6 task/s, elapsed: 61s, ETA: 0s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>> ] 286/287, 1.3 task/s, elapsed: 214s, ETA: 1s
|
|
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 287/287, 1.3 task/s, elapsed: 216s, ETA: 0s
|
|
dataset version metric mode internvl-chat-20b
|
|
---------------------------- --------- ---------------------------- ------ -------------------
|
|
mmlu - naive_average gen 46.35
|
|
mmlu_pro - - - -
|
|
cmmlu - naive_average gen 47.13
|
|
ceval - naive_average gen 48.56
|
|
agieval - - - -
|
|
GaokaoBench - weighted_average gen 32.28
|
|
GPQA_extended - - - -
|
|
GPQA_main - - - -
|
|
GPQA_diamond - - - -
|
|
ARC-c - - - -
|
|
truthfulqa - - - -
|
|
triviaqa 2121ce score gen 31.47
|
|
triviaqa_wiki_1shot - - - -
|
|
nq 3dcea1 score gen 13.21
|
|
C3 8c358f accuracy gen 76.88
|
|
race-high 9a54b6 accuracy gen 72.56
|
|
flores_100 - - - -
|
|
winogrande b36770 accuracy gen 58.72
|
|
hellaswag e42710 accuracy gen 53.69
|
|
bbh - naive_average gen 36.32
|
|
gsm8k 1d7fe4 accuracy gen 40.71
|
|
math 393424 accuracy gen 6.96
|
|
TheoremQA 6f0af8 score gen 12.25
|
|
MathBench - - - -
|
|
openai_humaneval 8e312c humaneval_pass@1 gen 32.32
|
|
humaneval_plus - - - -
|
|
humanevalx - - - -
|
|
sanitized_mbpp a447ff score gen 33.07
|
|
mbpp_plus - - - -
|
|
mbpp_cn 6fb572 score gen 23.40
|
|
leval - - - -
|
|
leval_closed - - - -
|
|
leval_open - - - -
|
|
longbench - - - -
|
|
longbench_single-document-qa - - - -
|
|
longbench_multi-document-qa - - - -
|
|
longbench_summarization - - - -
|
|
longbench_few-shot-learning - - - -
|
|
longbench_synthetic-tasks - - - -
|
|
longbench_code-completion - - - -
|
|
teval - - - -
|
|
teval_zh - - - -
|
|
IFEval 3321a3 Prompt-level-strict-accuracy gen 19.78
|
|
IFEval 3321a3 Inst-level-strict-accuracy gen 31.89
|
|
IFEval 3321a3 Prompt-level-loose-accuracy gen 22.92
|
|
IFEval 3321a3 Inst-level-loose-accuracy gen 35.13
|
|
12/05 03:52:22 - OpenCompass - INFO - write summary to /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B/20241205_033944/summary/summary_20241205_033944.txt
|
|
12/05 03:52:22 - OpenCompass - INFO - write csv to /mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/share_internvl/InternVL2-2B/20241205_033944/summary/summary_20241205_033944.csv
|