llm elasticsearch cache
1.0.0
重要的
LLMS的缓存层,可利用与Langchain Caching完全兼容的Elasticsearch,无论是用于聊天还是嵌入模型。
pip install llm-elasticsearch-cacheLangchain高速缓存可以与其他缓存集成类似。
from langchain . globals import set_llm_cache
from llmescache . langchain import ElasticsearchCache
from elasticsearch import Elasticsearch
es_client = Elasticsearch ( hosts = "http://localhost:9200" )
set_llm_cache (
ElasticsearchCache (
es_client = es_client ,
es_index = "llm-chat-cache" ,
metadata = { "project" : "my_chatgpt_project" }
)
) es_index参数也可以采用别名。这允许使用ILM:管理我们建议考虑用于管理保留和控制缓存增长的索引生命周期。
查看所有参数的类docstring。
默认情况下,缓存数据无法搜索。开发人员可以自定义Elasticsearch文档的构建,以添加索引的文本字段,例如,在何处使用LLM生成的文本。
这可以通过子类末端覆盖方法来完成。新的缓存类也可以应用于预先存在的缓存索引:
from llmescache . langchain import ElasticsearchCache
from elasticsearch import Elasticsearch
from langchain_core . caches import RETURN_VAL_TYPE
from typing import Any , Dict , List
from langchain . globals import set_llm_cache
import json
class SearchableElasticsearchCache ( ElasticsearchCache ):
@ property
def mapping ( self ) -> Dict [ str , Any ]:
mapping = super (). mapping
mapping [ "mappings" ][ "properties" ][ "parsed_llm_output" ] = { "type" : "text" , "analyzer" : "english" }
return mapping
def build_document ( self , prompt : str , llm_string : str , return_val : RETURN_VAL_TYPE ) -> Dict [ str , Any ]:
body = super (). build_document ( prompt , llm_string , return_val )
body [ "parsed_llm_output" ] = self . _parse_output ( body [ "llm_output" ])
return body
@ staticmethod
def _parse_output ( data : List [ str ]) -> List [ str ]:
return [ json . loads ( output )[ "kwargs" ][ "message" ][ "kwargs" ][ "content" ] for output in data ]
es_client = Elasticsearch ( hosts = "http://localhost:9200" )
set_llm_cache ( SearchableElasticsearchCache ( es_client = es_client , es_index = "llm-chat-cache" ))通过使用Cachebackedembeddings以与官方文档略有不同的方式获得缓存嵌入。
from llmescache . langchain import ElasticsearchStore
from elasticsearch import Elasticsearch
from langchain . embeddings import CacheBackedEmbeddings
from langchain_openai import OpenAIEmbeddings
es_client = Elasticsearch ( hosts = "http://localhost:9200" )
underlying_embeddings = OpenAIEmbeddings ( model = "text-embedding-3-small" )
store = ElasticsearchStore (
es_client = es_client ,
es_index = "llm-embeddings-cache" ,
namespace = underlying_embeddings . model ,
metadata = { "project" : "my_llm_project" }
)
cached_embeddings = CacheBackedEmbeddings (
underlying_embeddings ,
store
)与聊天缓存类似,可以为搜索索引向量索引ElasticsearchStore 。
from llmescache . langchain import ElasticsearchStore
from typing import Any , Dict , List
class SearchableElasticsearchStore ( ElasticsearchStore ):
@ property
def mapping ( self ) -> Dict [ str , Any ]:
mapping = super (). mapping
mapping [ "mappings" ][ "properties" ][ "vector" ] = { "type" : "dense_vector" , "dims" : 1536 , "index" : True , "similarity" : "dot_product" }
return mapping
def build_document ( self , llm_input : str , vector : List [ float ]) -> Dict [ str , Any ]:
body = super (). build_document ( llm_input , vector )
body [ "vector" ] = vector
return body请注意, CacheBackedEmbeddings当前不支持缓存查询,这意味着对于向量搜索而言,文本查询不会被缓存。但是,通过覆盖embed_query方法,应该能够轻松实现它。