first commit
This commit is contained in:
parent
b5a00436f1
commit
3fddc4a305
79
README.md
79
README.md
|
@ -1,3 +1,78 @@
|
||||||
# reader-lm-1.5b_a13683982864871424733474
|
---
|
||||||
|
pipeline_tag: text-generation
|
||||||
|
language:
|
||||||
|
- multilingual
|
||||||
|
inference: false
|
||||||
|
license: cc-by-nc-4.0
|
||||||
|
library_name: transformers
|
||||||
|
---
|
||||||
|
|
||||||
|
<br><br>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://aeiljuispo.cloudimg.io/v7/https://cdn-uploads.huggingface.co/production/uploads/603763514de52ff951d89793/AFoybzd5lpBQXEBrQHuTt.png?w=200&h=200&f=face" alt="Finetuner logo: Finetuner helps you to create experiments in order to improve embeddings on search tasks. It accompanies you to deliver the last mile of performance-tuning for neural search applications." width="150px">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<b>Trained by <a href="https://jina.ai/"><b>Jina AI</b></a>.</b>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
[Blog](https://jina.ai/news/reader-lm-small-language-models-for-cleaning-and-converting-html-to-markdown) | [Colab](https://colab.research.google.com/drive/1wXWyj5hOxEHY6WeHbOwEzYAC0WB1I5uA)
|
||||||
|
|
||||||
|
# Intro
|
||||||
|
|
||||||
|
Jina Reader-LM is a series of models that convert HTML content to Markdown content, which is useful for content conversion tasks. The model is trained on a curated collection of HTML content and its corresponding Markdown content.
|
||||||
|
|
||||||
|
# Models
|
||||||
|
|
||||||
|
| Name | Context Length | Download |
|
||||||
|
|-----------------|-------------------|-----------------------------------------------------------------------|
|
||||||
|
| reader-lm-0.5b | 256K | [🤗 Hugging Face](https://huggingface.co/jinaai/reader-lm-0.5b) |
|
||||||
|
| reader-lm-1.5b | 256K | [🤗 Hugging Face](https://huggingface.co/jinaai/reader-lm-1.5b) |
|
||||||
|
| |
|
||||||
|
|
||||||
|
# Get Started
|
||||||
|
|
||||||
|
## On Google Colab
|
||||||
|
The easiest way to experience reader-lm is by running [our Colab notebook](https://colab.research.google.com/drive/1wXWyj5hOxEHY6WeHbOwEzYAC0WB1I5uA),
|
||||||
|
where we demonstrate how to use reader-lm-1.5b to convert the HackerNews website into markdown. The notebook is optimized to run smoothly on Google Colab’s free T4 GPU tier. You can also load reader-lm-0.5b or change the URL to any website and explore the output. Note that the input (i.e., the prompt) to the model is the raw HTML—no prefix instruction is required.
|
||||||
|
|
||||||
|
## Local
|
||||||
|
|
||||||
|
To use this model, you need to install `transformers`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install transformers<=4.43.4
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, you can use the model as follows:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# pip install transformers modelscope
|
||||||
|
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
checkpoint = "jinaai/reader-lm-1.5b"
|
||||||
|
|
||||||
|
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
||||||
|
|
||||||
|
# example html content
|
||||||
|
html_content = "<html><body><h1>Hello, world!</h1></body></html>"
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": html_content}]
|
||||||
|
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
||||||
|
|
||||||
|
print(input_text)
|
||||||
|
|
||||||
|
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
||||||
|
outputs = model.generate(inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08)
|
||||||
|
|
||||||
|
print(tokenizer.decode(outputs[0]))
|
||||||
|
```
|
||||||
|
|
||||||
|
## AWS Sagemaker & Azure Marketplace
|
||||||
|
[AWS 0.5b](https://aws.amazon.com/marketplace/pp/prodview-nli7b6dueo424?sr=0-1&ref_=beagle&applicationId=AWSMPContessa)
|
||||||
|
[AWS 1.5b](https://aws.amazon.com/marketplace/pp/prodview-ms27ixcwq3wjk?sr=0-2&ref_=beagle&applicationId=AWSMPContessa)
|
||||||
|
[Azure 0.5b](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/jinaai.reader-lm-500m)
|
||||||
|
[Azure 1.5b](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/jinaai.reader-lm-1500m)
|
||||||
|
|
||||||
reader-lm-1.5b
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"<|endoftext|>": 151643,
|
||||||
|
"<|im_end|>": 151645,
|
||||||
|
"<|im_start|>": 151644
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
{
|
||||||
|
"_name_or_path": "jinaai/qwen2-1.5b-reader",
|
||||||
|
"architectures": [
|
||||||
|
"Qwen2ForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"eos_token_id": 151645,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 1536,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 8960,
|
||||||
|
"max_position_embeddings": 256000,
|
||||||
|
"max_window_layers": 28,
|
||||||
|
"model_type": "qwen2",
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_hidden_layers": 28,
|
||||||
|
"num_key_value_heads": 2,
|
||||||
|
"rms_norm_eps": 1e-06,
|
||||||
|
"rope_theta": 2000000,
|
||||||
|
"sliding_window": null,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"torch_dtype": "bfloat16",
|
||||||
|
"transformers_version": "4.43.3",
|
||||||
|
"use_cache": true,
|
||||||
|
"use_sliding_window": false,
|
||||||
|
"vocab_size": 151936
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
{}
|
|
@ -0,0 +1,17 @@
|
||||||
|
{
|
||||||
|
"attn_implementation": "flash_attention_2",
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"do_sample": true,
|
||||||
|
"eos_token_id": [
|
||||||
|
151645,
|
||||||
|
151643
|
||||||
|
],
|
||||||
|
"pad_token_id": 151643,
|
||||||
|
"repetition_penalty": 1.1,
|
||||||
|
"rope_theta": 2000000,
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_k": 20,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"transformers_version": "4.43.3",
|
||||||
|
"use_cache": false
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
@ -0,0 +1,20 @@
|
||||||
|
{
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>"
|
||||||
|
],
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,43 @@
|
||||||
|
{
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"151643": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151644": {
|
||||||
|
"content": "<|im_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151645": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>"
|
||||||
|
],
|
||||||
|
"bos_token": null,
|
||||||
|
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|im_end|>",
|
||||||
|
"errors": "replace",
|
||||||
|
"model_max_length": 256000,
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"split_special_tokens": false,
|
||||||
|
"tokenizer_class": "Qwen2Tokenizer",
|
||||||
|
"unk_token": null
|
||||||
|
}
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue