23 lines
579 B
Plaintext
23 lines
579 B
Plaintext
torch>=2.5.0
|
|
torchvision>=0.20.0
|
|
transformers>=4.46.0
|
|
huggingface-hub>=0.25.1
|
|
sentencepiece>=0.2.0
|
|
jinja2>=3.1.4
|
|
pydantic>=2.9.2
|
|
timm>=1.0.9
|
|
tiktoken>=0.7.0
|
|
numpy==1.26.4 # Need less than 2.0.0
|
|
accelerate>=1.0.1
|
|
sentence_transformers>=3.1.1
|
|
gradio==4.44.1 # web demo
|
|
openai>=1.51.0 # openai demo
|
|
einops>=0.8.0
|
|
pillow>=10.4.0
|
|
sse-starlette>=2.1.3
|
|
bitsandbytes>=0.43.3 # INT4 Loading
|
|
|
|
# vllm>=0.6.3 # using with VLLM Framework
|
|
# flash-attn>=2.6.3 # using with flash-attention 2
|
|
# PEFT model, not need if you don't use PEFT finetune model.
|
|
# peft>=0.13.0 # Using with finetune model |