707e9fd50f | ||
---|---|---|
examples | ||
.gitattributes | ||
README.md | ||
config.json | ||
configuration.json | ||
configuration_mplug_docowl.py | ||
constants.py | ||
generation_config.json | ||
model.safetensors | ||
modeling_llama2_mam.py | ||
modeling_mplug_docowl.py | ||
preprocessor_config.json | ||
processor.py | ||
special_tokens_map.json | ||
tokenizer.model | ||
tokenizer_config.json | ||
visual_compressor.py | ||
visual_encoder.py |
README.md
frameworks | license | tasks | ||
---|---|---|---|---|
|
Apache License 2.0 |
|
mPLUG-DocOwl2
Introduction
mPLUG-DocOwl2 is a state-of-the-art Multimodal LLM for OCR-free Multi-page Document Understanding.
Through a compressing module named High-resolution DocCompressor, each page is encoded with just 324 tokens.
Github: mPLUG-DocOwl
SDK下载
#安装ModelScope
pip install modelscope
#SDK模型下载
from modelscope import snapshot_download
model_dir = snapshot_download('iic/DocOwl2')
Git下载
#Git模型下载
git clone https://www.modelscope.cn/iic/DocOwl2.git
Quickstart
import torch
import os
from modelscope import AutoTokenizer, AutoModel
from icecream import ic
import time
class DocOwlInfer():
def __init__(self, ckpt_path):
self.tokenizer = AutoTokenizer.from_pretrained(ckpt_path, use_fast=False)
self.model = AutoModel.from_pretrained(ckpt_path, trust_remote_code=True, low_cpu_mem_usage=True, torch_dtype=torch.float16, device_map='auto')
self.model.init_processor(tokenizer=self.tokenizer, basic_image_size=504, crop_anchors='grid_12')
def inference(self, images, query):
messages = [{'role': 'USER', 'content': '<|image|>'*len(images)+query}]
answer = self.model.chat(messages=messages, images=images, tokenizer=self.tokenizer)
return answer
docowl = DocOwlInfer(ckpt_path='$your_model_local_dir')
images = [
'$your_model_local_dir'+'/examples/docowl2_page0.png',
'$your_model_local_dir'+'/examples/docowl2_page1.png',
'$your_model_local_dir'+'/examples/docowl2_page2.png',
'$your_model_local_dir'+'/examples/docowl2_page3.png',
'$your_model_local_dir'+'/examples/docowl2_page4.png',
'$your_model_local_dir'+'/examples/docowl2_page5.png',
]
answer = docowl.inference(images, query='what is this paper about? provide detailed information.')
answer = docowl.inference(images, query='what is the third page about? provide detailed information.')