unify tiktoken model

这个提交包含在:
Your Name
2023-04-17 19:41:50 +08:00
父节点 40bc865d33
当前提交 2472185de9
共有 8 个文件被更改,包括 9 次插入9 次删除

查看文件

@@ -13,7 +13,7 @@ class PaperFileGroup():
# count_token
import tiktoken
from toolbox import get_conf
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num

查看文件

@@ -13,7 +13,7 @@ class PaperFileGroup():
# count_token
import tiktoken
from toolbox import get_conf
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num

查看文件

@@ -4,7 +4,7 @@ from toolbox import update_ui, get_conf
def input_clipping(inputs, history, max_token_limit):
import tiktoken
import numpy as np
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
mode = 'input-and-history'

查看文件

@@ -61,7 +61,7 @@ def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_
MAX_TOKEN = 3000
import tiktoken
from toolbox import get_conf
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))

查看文件

@@ -13,7 +13,7 @@ class PaperFileGroup():
# count_token
import tiktoken
from toolbox import get_conf
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num

查看文件

@@ -69,7 +69,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
# 递归地切割PDF文件
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from toolbox import get_conf
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)

查看文件

@@ -18,7 +18,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
from toolbox import get_conf
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)