Skip to content

Commit

Permalink
Integrate mistral.rs LLM (#13105)
Browse files Browse the repository at this point in the history
* Integrate

* Changes based on comments

* Run pants tailor

* Properly extract and pass logprobs

* Add a simple test

* Add a usage section

* Fix silly mistake

* Add mistralrs as a dependancy

* Fix extract logprobs and update api

* Update for new version

* Update version

* Prettier

* Fix typing

* Remove unnecessary in docs BUILD
  • Loading branch information
EricLBuehler committed May 2, 2024
1 parent 4f967b8 commit 772a575
Show file tree
Hide file tree
Showing 14 changed files with 865 additions and 0 deletions.
153 changes: 153 additions & 0 deletions llama-index-integrations/llms/llama-index-llms-mistral-rs/.gitignore
@@ -0,0 +1,153 @@
llama_index/_static
.DS_Store
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
bin/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
etc/
include/
lib/
lib64/
parts/
sdist/
share/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
.ruff_cache

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints
notebooks/

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
pyvenv.cfg

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# Jetbrains
.idea
modules/
*.swp

# VsCode
.vscode

# pipenv
Pipfile
Pipfile.lock

# pyright
pyrightconfig.json
@@ -0,0 +1,3 @@
poetry_requirements(
name="poetry",
)
17 changes: 17 additions & 0 deletions llama-index-integrations/llms/llama-index-llms-mistral-rs/Makefile
@@ -0,0 +1,17 @@
GIT_ROOT ?= $(shell git rev-parse --show-toplevel)

help: ## Show all Makefile targets.
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'

format: ## Run code autoformatters (black).
pre-commit install
git ls-files | xargs pre-commit run black --files

lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy
pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files

test: ## Run tests via pytest.
pytest tests

watch-docs: ## Build and watch documentation.
sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
@@ -0,0 +1,68 @@
# LlamaIndex Llms Integration: `mistral.rs`

To use this integration, please install the Python `mistralrs` package:

## Installation of `mistralrs` from PyPi

0. Install Rust: https://rustup.rs/

```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
source $HOME/.cargo/env
```

1. `mistralrs` depends on the `openssl` library.

To install it on Ubuntu:

```
sudo apt install libssl-dev
sudo apt install pkg-config
```

2. Install it!

- CUDA

`pip install mistralrs-cuda`

- Metal

`pip install mistralrs-metal`

- Apple Accelerate

`pip install mistralrs-accelerate`

- Intel MKL

`pip install mistralrs-mkl`

- Without accelerators

`pip install mistralrs`

All installations will install the `mistralrs` package. The suffix on the package installed by `pip` only controls the feature activation.

## Installation from source

Please follow the instructions [here](https://github.com/EricLBuehler/mistral.rs/blob/master/mistralrs-pyo3/README.md).

## Usage

```python
from llama_index.llms.mistral_rs import MistralRS
from mistralrs import Which

llm = MistralRS(
which=Which.GGUF(
tok_model_id="mistralai/Mistral-7B-Instruct-v0.1",
quantized_model_id="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
quantized_filename="mistral-7b-instruct-v0.1.Q4_K_M.gguf",
tokenizer_json=None,
repeat_last_n=64,
),
max_new_tokens=4096,
context_window=1024 * 5,
)
```
@@ -0,0 +1,69 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings\n",
"from llama_index.core.embeddings import resolve_embed_model\n",
"from llama_index.llms.mistral_rs import MistralRS\n",
"from mistralrs import Which, Architecture\n",
"import sys\n",
"\n",
"documents = SimpleDirectoryReader(\"data\").load_data()\n",
"\n",
"# bge embedding model\n",
"Settings.embed_model = resolve_embed_model(\"local:BAAI/bge-small-en-v1.5\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Settings.llm = MistralRS(\n",
" which=Which.Plain(\n",
" model_id=\"mistralai/Mistral-7B-Instruct-v0.1\",\n",
" arch=Architecture.Mistral,\n",
" tokenizer_json=None,\n",
" repeat_last_n=64,\n",
" ),\n",
" max_new_tokens=4096,\n",
" context_window=1024 * 5,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index = VectorStoreIndex.from_documents(\n",
" documents,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"query_engine = index.as_query_engine()\n",
"response = query_engine.query(\"How do I pronounce graphene?\")\n",
"print(response)"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
@@ -0,0 +1,72 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings\n",
"from llama_index.core.embeddings import resolve_embed_model\n",
"from llama_index.llms.mistral_rs import MistralRS\n",
"from mistralrs import Which\n",
"import sys\n",
"\n",
"documents = SimpleDirectoryReader(\"data\").load_data()\n",
"\n",
"# bge embedding model\n",
"Settings.embed_model = resolve_embed_model(\"local:BAAI/bge-small-en-v1.5\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Settings.llm = MistralRS(\n",
" which=Which.GGUF(\n",
" tok_model_id=\"mistralai/Mistral-7B-Instruct-v0.1\",\n",
" quantized_model_id=\"TheBloke/Mistral-7B-Instruct-v0.1-GGUF\",\n",
" quantized_filename=\"mistral-7b-instruct-v0.1.Q4_K_M.gguf\",\n",
" tokenizer_json=None,\n",
" repeat_last_n=64,\n",
" ),\n",
" max_new_tokens=4096,\n",
" context_window=1024 * 5,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index = VectorStoreIndex.from_documents(\n",
" documents,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"query_engine = index.as_query_engine(streaming=True)\n",
"response = query_engine.query(\"What are non-granular scalings?\")\n",
"for text in response.response_gen:\n",
" print(text, end=\"\")\n",
" sys.stdout.flush()"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

0 comments on commit 772a575

Please sign in to comment.