imodelsX
Include KAN & QAEmbs
Scikit-Learn友好库,以解释,预测和引导文本模型/数据。
还有一堆用于启动文本数据的实用程序。
演示笔记本
可解释的建模/转向
| 模型 | 参考 | 输出 | 描述 |
|---|---|---|---|
| 树的贡献 | ,?,?,,,, | 解释 +转向 | 生成一棵提示树 转向LLM(官方) |
| IPROMPT | ,?,?,, | 解释 +转向 | 生成一个提示 解释数据中的模式(官方) |
| 自动爆发 | ㅤㅤ,?,? | 解释 +转向 | 找到自然语言提示 使用输入梯度 |
| D3 | ,?,?,, | 解释 | 解释两个分布之间的区别 |
| sasc | ㅤㅤ,?,? | 解释 | 解释一个黑框文本模块 使用LLM(官方) |
| 8月线性 | ,?,?,, | 线性模型 | 使用LLM拟合更好的线性模型 提取嵌入(官方) |
| Aug-Tree | ,?,?,, | 决策树 | 使用LLM拟合更好的决策树 扩展功能(官方) |
| Qaemb | ,?,?,, | 可以解释 嵌入 | 生成可解释的嵌入 通过询问LLMS问题(官方) |
| kan | ,?,?,, | 小的 网络 | 适合2层Kolmogorov-Arnold网络 |
演示笔记本文档?参考代码?研究论文
⌛我们计划支持其他可解释的算法,例如RLPrompt,CBM和NBDT。如果您想贡献算法,请随时打开PR吗?
通用公用事业
| 模型 | 参考 |
|---|---|
| LLM包装纸 | 轻松致电不同的LLM |
| 数据集包装器 | 下载最小处理的拥抱面数据集 |
| ngrams袋 | 学习ngram的线性模型 |
| 线性芬日 | Finetune在LLM嵌入式的顶部单个线性层 |
安装: pip install imodelsx (或为了获得更多控制,请从源来克隆并安装)
演示:请参阅演示笔记本
from imodelsx import TreePromptClassifier
import datasets
import numpy as np
from sklearn . tree import plot_tree
import matplotlib . pyplot as plt
# set up data
rng = np . random . default_rng ( seed = 42 )
dset_train = datasets . load_dataset ( 'rotten_tomatoes' )[ 'train' ]
dset_train = dset_train . select ( rng . choice (
len ( dset_train ), size = 100 , replace = False ))
dset_val = datasets . load_dataset ( 'rotten_tomatoes' )[ 'validation' ]
dset_val = dset_val . select ( rng . choice (
len ( dset_val ), size = 100 , replace = False ))
# set up arguments
prompts = [
"This movie is" ,
" Positive or Negative? The movie was" ,
" The sentiment of the movie was" ,
" The plot of the movie was really" ,
" The acting in the movie was" ,
]
verbalizer = { 0 : " Negative." , 1 : " Positive." }
checkpoint = "gpt2"
# fit model
m = TreePromptClassifier (
checkpoint = checkpoint ,
prompts = prompts ,
verbalizer = verbalizer ,
cache_prompt_features_dir = None , # 'cache_prompt_features_dir/gp2',
)
m . fit ( dset_train [ "text" ], dset_train [ "label" ])
# compute accuracy
preds = m . predict ( dset_val [ 'text' ])
print ( ' n Tree-Prompt acc (val) ->' ,
np . mean ( preds == dset_val [ 'label' ])) # -> 0.7
# compare to accuracy for individual prompts
for i , prompt in enumerate ( prompts ):
print ( i , prompt , '->' , m . prompt_accs_ [ i ]) # -> 0.65, 0.5, 0.5, 0.56, 0.51
# visualize decision tree
plot_tree (
m . clf_ ,
fontsize = 10 ,
feature_names = m . feature_names_ ,
class_names = list ( verbalizer . values ()),
filled = True ,
)
plt . show () from imodelsx import explain_dataset_iprompt , get_add_two_numbers_dataset
# get a simple dataset of adding two numbers
input_strings , output_strings = get_add_two_numbers_dataset ( num_examples = 100 )
for i in range ( 5 ):
print ( repr ( input_strings [ i ]), repr ( output_strings [ i ]))
# explain the relationship between the inputs and outputs
# with a natural-language prompt string
prompts , metadata = explain_dataset_iprompt (
input_strings = input_strings ,
output_strings = output_strings ,
checkpoint = 'EleutherAI/gpt-j-6B' , # which language model to use
num_learned_tokens = 3 , # how long of a prompt to learn
n_shots = 3 , # shots per example
n_epochs = 15 , # how many epochs to search
verbose = 0 , # how much to print
llm_float16 = True , # whether to load the model in float_16
)
- - - - - - - -
prompts is a list of found natural - language prompt strings from imodelsx import explain_dataset_d3
hypotheses , hypothesis_scores = explain_dataset_d3 (
pos = positive_samples , # List[str] of positive examples
neg = negative_samples , # another List[str]
num_steps = 100 ,
num_folds = 2 ,
batch_size = 64 ,
)在这里,我们解释一个模块而不是数据集
from imodelsx import explain_module_sasc
# a toy module that responds to the length of a string
mod = lambda str_list : np . array ([ len ( s ) for s in str_list ])
# a toy dataset where the longest strings are animals
text_str_list = [ "red" , "blue" , "x" , "1" , "2" , "hippopotamus" , "elephant" , "rhinoceros" ]
explanation_dict = explain_module_sasc (
text_str_list ,
mod ,
ngrams = 1 ,
)使用这些就像Scikit-Learn模型一样。在培训期间,它们通过LLM拟合更好的功能,但是在测试时,它们非常快速且完全透明。
from imodelsx import AugLinearClassifier , AugTreeClassifier , AugLinearRegressor , AugTreeRegressor
import datasets
import numpy as np
# set up data
dset = datasets . load_dataset ( 'rotten_tomatoes' )[ 'train' ]
dset = dset . select ( np . random . choice ( len ( dset ), size = 300 , replace = False ))
dset_val = datasets . load_dataset ( 'rotten_tomatoes' )[ 'validation' ]
dset_val = dset_val . select ( np . random . choice ( len ( dset_val ), size = 300 , replace = False ))
# fit model
m = AugLinearClassifier (
checkpoint = 'textattack/distilbert-base-uncased-rotten-tomatoes' ,
ngrams = 2 , # use bigrams
)
m . fit ( dset [ 'text' ], dset [ 'label' ])
# predict
preds = m . predict ( dset_val [ 'text' ])
print ( 'acc_val' , np . mean ( preds == dset_val [ 'label' ]))
# interpret
print ( 'Total ngram coefficients: ' , len ( m . coefs_dict_ ))
print ( 'Most positive ngrams' )
for k , v in sorted ( m . coefs_dict_ . items (), key = lambda item : item [ 1 ], reverse = True )[: 8 ]:
print ( ' t ' , k , round ( v , 2 ))
print ( 'Most negative ngrams' )
for k , v in sorted ( m . coefs_dict_ . items (), key = lambda item : item [ 1 ])[: 8 ]:
print ( ' t ' , k , round ( v , 2 )) import imodelsx
from sklearn . datasets import make_classification , make_regression
from sklearn . metrics import accuracy_score
import numpy as np
X , y = make_classification ( n_samples = 5000 , n_features = 5 , n_informative = 3 )
model = imodelsx . KANClassifier ( hidden_layer_size = 64 , device = 'cpu' ,
regularize_activation = 1.0 , regularize_entropy = 1.0 )
model . fit ( X , y )
y_pred = model . predict ( X )
print ( 'Test acc' , accuracy_score ( y , y_pred ))
# now try regression
X , y = make_regression ( n_samples = 5000 , n_features = 5 , n_informative = 3 )
model = imodelsx . kan . KANRegressor ( hidden_layer_size = 64 , device = 'cpu' ,
regularize_activation = 1.0 , regularize_entropy = 1.0 )
model . fit ( X , y )
y_pred = model . predict ( X )
print ( 'Test correlation' , np . corrcoef ( y , y_pred . flatten ())[ 0 , 1 ])遵循Sklearn API的易于拟合基线。
from imodelsx import LinearFinetuneClassifier , LinearNgramClassifier
# fit a simple one-layer finetune on top of LLM embeddings
m = LinearFinetuneClassifier (
checkpoint = 'distilbert-base-uncased' ,
)
m . fit ( dset [ 'text' ], dset [ 'label' ])
preds = m . predict ( dset_val [ 'text' ])
acc = ( preds == dset_val [ 'label' ]). mean ()
print ( 'validation acc' , acc )轻松使用缓存的不同语言模型(比Langchain更轻巧)。
import imodelsx . llm
# supports any huggingface checkpoint or openai checkpoint (including chat models)
llm = imodelsx . llm . get_llm (
checkpoint = "gpt2-xl" , # text-davinci-003, gpt-3.5-turbo, ...
CACHE_DIR = ".cache" ,
)
out = llm ( "May the Force be" )
llm ( "May the Force be" ) # when computing the same string again, uses the cache用于使用基本预处理加载拥抱面数据集的API。
import imodelsx . data
dset , dataset_key_text = imodelsx . data . load_huggingface_dataset ( 'ag_news' )
# Ensures that dset has a split named 'train' and 'validation',
# and that the input data is contained for each split in a column given by {dataset_key_text}