镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
Boyin rag (#1983)
* first_version * rag document support * RAG interactive prompts added, issues resolved * Resolve conflicts * Resolve conflicts * Resolve conflicts * more file format support * move import * Resolve LlamaIndexRagWorker bug * new resolve * Address import LlamaIndexRagWorker problem * change import order --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com>
这个提交包含在:
@@ -1,3 +1,9 @@
|
|||||||
|
import os,glob
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from shared_utils.fastapi_server import validate_path_safety
|
||||||
|
|
||||||
|
from toolbox import report_exception
|
||||||
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_lastest_msg
|
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_lastest_msg
|
||||||
from crazy_functions.crazy_utils import input_clipping
|
from crazy_functions.crazy_utils import input_clipping
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
@@ -7,6 +13,37 @@ MAX_HISTORY_ROUND = 5
|
|||||||
MAX_CONTEXT_TOKEN_LIMIT = 4096
|
MAX_CONTEXT_TOKEN_LIMIT = 4096
|
||||||
REMEMBER_PREVIEW = 1000
|
REMEMBER_PREVIEW = 1000
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def handle_document_upload(files: List[str], llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request, rag_worker):
|
||||||
|
"""
|
||||||
|
Handles document uploads by extracting text and adding it to the vector store.
|
||||||
|
"""
|
||||||
|
from llama_index.core import Document
|
||||||
|
from crazy_functions.rag_fns.rag_file_support import extract_text, supports_format
|
||||||
|
user_name = chatbot.get_user()
|
||||||
|
checkpoint_dir = get_log_folder(user_name, plugin_name='experimental_rag')
|
||||||
|
|
||||||
|
for file_path in files:
|
||||||
|
try:
|
||||||
|
validate_path_safety(file_path, user_name)
|
||||||
|
text = extract_text(file_path)
|
||||||
|
if text is None:
|
||||||
|
chatbot.append(
|
||||||
|
[f"上传文件: {os.path.basename(file_path)}", f"文件解析失败,无法提取文本内容,请更换文件。失败原因可能为:1.文档格式过于复杂;2. 不支持的文件格式,支持的文件格式后缀有:" + ", ".join(supports_format)])
|
||||||
|
else:
|
||||||
|
chatbot.append(
|
||||||
|
[f"上传文件: {os.path.basename(file_path)}", f"上传文件前50个字符为:{text[:50]}。"])
|
||||||
|
document = Document(text=text, metadata={"source": file_path})
|
||||||
|
rag_worker.add_documents_to_vector_store([document])
|
||||||
|
chatbot.append([f"上传文件: {os.path.basename(file_path)}", "文件已成功添加到知识库。"])
|
||||||
|
except Exception as e:
|
||||||
|
report_exception(chatbot, history, a=f"处理文件: {file_path}", b=str(e))
|
||||||
|
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Main Q&A function with document upload support
|
||||||
@CatchException
|
@CatchException
|
||||||
def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
|
||||||
@@ -30,21 +67,40 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
|
|||||||
user_name,
|
user_name,
|
||||||
llm_kwargs,
|
llm_kwargs,
|
||||||
checkpoint_dir=checkpoint_dir,
|
checkpoint_dir=checkpoint_dir,
|
||||||
auto_load_checkpoint=True)
|
auto_load_checkpoint=True
|
||||||
|
)
|
||||||
|
|
||||||
current_context = f"{VECTOR_STORE_TYPE} @ {checkpoint_dir}"
|
current_context = f"{VECTOR_STORE_TYPE} @ {checkpoint_dir}"
|
||||||
tip = "提示:输入“清空向量数据库”可以清空RAG向量数据库"
|
tip = "提示:输入“清空向量数据库”可以清空RAG向量数据库"
|
||||||
if txt == "清空向量数据库":
|
|
||||||
|
# 2. Handle special commands
|
||||||
|
if os.path.exists(txt) and os.path.isdir(txt):
|
||||||
|
project_folder = txt
|
||||||
|
validate_path_safety(project_folder, chatbot.get_user())
|
||||||
|
# Extract file paths from the user input
|
||||||
|
# Assuming the user inputs file paths separated by commas after the command
|
||||||
|
file_paths = [f for f in glob.glob(f'{project_folder}/**/*', recursive=True)]
|
||||||
|
chatbot.append([txt, f'正在处理上传的文档 ({current_context}) ...'])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
yield from handle_document_upload(file_paths, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request, rag_worker)
|
||||||
|
return
|
||||||
|
|
||||||
|
elif txt == "清空向量数据库":
|
||||||
chatbot.append([txt, f'正在清空 ({current_context}) ...'])
|
chatbot.append([txt, f'正在清空 ({current_context}) ...'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
rag_worker.purge()
|
rag_worker.purge_vector_store()
|
||||||
yield from update_ui_lastest_msg('已清空', chatbot, history, delay=0) # 刷新界面
|
yield from update_ui_lastest_msg('已清空', chatbot, history, delay=0) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
|
else:
|
||||||
|
report_exception(chatbot, history, a=f"上传文件路径错误: {txt}", b="请检查并提供正确路径。")
|
||||||
|
|
||||||
|
# 3. Normal Q&A processing
|
||||||
chatbot.append([txt, f'正在召回知识 ({current_context}) ...'])
|
chatbot.append([txt, f'正在召回知识 ({current_context}) ...'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# 2. clip history to reduce token consumption
|
# 4. Clip history to reduce token consumption
|
||||||
# 2-1. reduce chat round
|
|
||||||
txt_origin = txt
|
txt_origin = txt
|
||||||
|
|
||||||
if len(history) > MAX_HISTORY_ROUND * 2:
|
if len(history) > MAX_HISTORY_ROUND * 2:
|
||||||
@@ -52,41 +108,47 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
|
|||||||
txt_clip, history, flags = input_clipping(txt, history, max_token_limit=MAX_CONTEXT_TOKEN_LIMIT, return_clip_flags=True)
|
txt_clip, history, flags = input_clipping(txt, history, max_token_limit=MAX_CONTEXT_TOKEN_LIMIT, return_clip_flags=True)
|
||||||
input_is_clipped_flag = (flags["original_input_len"] != flags["clipped_input_len"])
|
input_is_clipped_flag = (flags["original_input_len"] != flags["clipped_input_len"])
|
||||||
|
|
||||||
# 2-2. if input is clipped, add input to vector store before retrieve
|
# 5. If input is clipped, add input to vector store before retrieve
|
||||||
if input_is_clipped_flag:
|
if input_is_clipped_flag:
|
||||||
yield from update_ui_lastest_msg('检测到长输入, 正在向量化 ...', chatbot, history, delay=0) # 刷新界面
|
yield from update_ui_lastest_msg('检测到长输入, 正在向量化 ...', chatbot, history, delay=0) # 刷新界面
|
||||||
# save input to vector store
|
# Save input to vector store
|
||||||
rag_worker.add_text_to_vector_store(txt_origin)
|
rag_worker.add_text_to_vector_store(txt_origin)
|
||||||
yield from update_ui_lastest_msg('向量化完成 ...', chatbot, history, delay=0) # 刷新界面
|
yield from update_ui_lastest_msg('向量化完成 ...', chatbot, history, delay=0) # 刷新界面
|
||||||
|
|
||||||
if len(txt_origin) > REMEMBER_PREVIEW:
|
if len(txt_origin) > REMEMBER_PREVIEW:
|
||||||
HALF = REMEMBER_PREVIEW // 2
|
HALF = REMEMBER_PREVIEW // 2
|
||||||
i_say_to_remember = txt[:HALF] + f" ...\n...(省略{len(txt_origin)-REMEMBER_PREVIEW}字)...\n... " + txt[-HALF:]
|
i_say_to_remember = txt[:HALF] + f" ...\n...(省略{len(txt_origin)-REMEMBER_PREVIEW}字)...\n... " + txt[-HALF:]
|
||||||
if (flags["original_input_len"] - flags["clipped_input_len"]) > HALF:
|
if (flags["original_input_len"] - flags["clipped_input_len"]) > HALF:
|
||||||
txt_clip = txt_clip + f" ...\n...(省略{len(txt_origin)-len(txt_clip)-HALF}字)...\n... " + txt[-HALF:]
|
txt_clip = txt_clip + f" ...\n...(省略{len(txt_origin)-len(txt_clip)-HALF}字)...\n... " + txt[-HALF:]
|
||||||
else:
|
|
||||||
pass
|
|
||||||
i_say = txt_clip
|
|
||||||
else:
|
else:
|
||||||
i_say_to_remember = i_say = txt_clip
|
i_say_to_remember = i_say = txt_clip
|
||||||
else:
|
else:
|
||||||
i_say_to_remember = i_say = txt_clip
|
i_say_to_remember = i_say = txt_clip
|
||||||
|
|
||||||
# 3. we search vector store and build prompts
|
# 6. Search vector store and build prompts
|
||||||
nodes = rag_worker.retrieve_from_store_with_query(i_say)
|
nodes = rag_worker.retrieve_from_store_with_query(i_say)
|
||||||
prompt = rag_worker.build_prompt(query=i_say, nodes=nodes)
|
prompt = rag_worker.build_prompt(query=i_say, nodes=nodes)
|
||||||
|
# 7. Query language model
|
||||||
|
if len(chatbot) != 0:
|
||||||
|
chatbot.pop(-1) # Pop temp chat, because we are going to add them again inside `request_gpt_model_in_new_thread_with_ui_alive`
|
||||||
|
|
||||||
# 4. it is time to query llms
|
|
||||||
if len(chatbot) != 0: chatbot.pop(-1) # pop temp chat, because we are going to add them again inside `request_gpt_model_in_new_thread_with_ui_alive`
|
|
||||||
model_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
model_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=prompt, inputs_show_user=i_say,
|
inputs=prompt,
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
inputs_show_user=i_say,
|
||||||
|
llm_kwargs=llm_kwargs,
|
||||||
|
chatbot=chatbot,
|
||||||
|
history=history,
|
||||||
sys_prompt=system_prompt,
|
sys_prompt=system_prompt,
|
||||||
retry_times_at_unknown_error=0
|
retry_times_at_unknown_error=0
|
||||||
)
|
)
|
||||||
|
|
||||||
# 5. remember what has been asked / answered
|
# 8. Remember Q&A
|
||||||
yield from update_ui_lastest_msg(model_say + '</br></br>' + f'对话记忆中, 请稍等 ({current_context}) ...', chatbot, history, delay=0.5) # 刷新界面
|
yield from update_ui_lastest_msg(
|
||||||
|
model_say + '</br></br>' + f'对话记忆中, 请稍等 ({current_context}) ...',
|
||||||
|
chatbot, history, delay=0.5
|
||||||
|
)
|
||||||
rag_worker.remember_qa(i_say_to_remember, model_say)
|
rag_worker.remember_qa(i_say_to_remember, model_say)
|
||||||
history.extend([i_say, model_say])
|
history.extend([i_say, model_say])
|
||||||
|
|
||||||
yield from update_ui_lastest_msg(model_say, chatbot, history, delay=0, msg=tip) # 刷新界面
|
# 9. Final UI Update
|
||||||
|
yield from update_ui_lastest_msg(model_say, chatbot, history, delay=0, msg=tip)
|
||||||
@@ -1,17 +1,13 @@
|
|||||||
import llama_index
|
|
||||||
import os
|
|
||||||
import atexit
|
import atexit
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from llama_index.core import Document
|
from llama_index.core import Document
|
||||||
from llama_index.core.schema import TextNode
|
|
||||||
from request_llms.embed_models.openai_embed import OpenAiEmbeddingModel
|
|
||||||
from shared_utils.connect_void_terminal import get_chat_default_kwargs
|
|
||||||
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
|
|
||||||
from crazy_functions.rag_fns.vector_store_index import GptacVectorStoreIndex
|
|
||||||
from llama_index.core.ingestion import run_transformations
|
from llama_index.core.ingestion import run_transformations
|
||||||
from llama_index.core import PromptTemplate
|
from llama_index.core.schema import TextNode
|
||||||
from llama_index.core.response_synthesizers import TreeSummarize
|
|
||||||
|
from crazy_functions.rag_fns.vector_store_index import GptacVectorStoreIndex
|
||||||
|
from request_llms.embed_models.openai_embed import OpenAiEmbeddingModel
|
||||||
|
|
||||||
DEFAULT_QUERY_GENERATION_PROMPT = """\
|
DEFAULT_QUERY_GENERATION_PROMPT = """\
|
||||||
Now, you have context information as below:
|
Now, you have context information as below:
|
||||||
@@ -63,7 +59,7 @@ class SaveLoad():
|
|||||||
def purge(self):
|
def purge(self):
|
||||||
import shutil
|
import shutil
|
||||||
shutil.rmtree(self.checkpoint_dir, ignore_errors=True)
|
shutil.rmtree(self.checkpoint_dir, ignore_errors=True)
|
||||||
self.vs_index = self.create_new_vs()
|
self.vs_index = self.create_new_vs(self.checkpoint_dir)
|
||||||
|
|
||||||
|
|
||||||
class LlamaIndexRagWorker(SaveLoad):
|
class LlamaIndexRagWorker(SaveLoad):
|
||||||
@@ -75,7 +71,7 @@ class LlamaIndexRagWorker(SaveLoad):
|
|||||||
if auto_load_checkpoint:
|
if auto_load_checkpoint:
|
||||||
self.vs_index = self.load_from_checkpoint(checkpoint_dir)
|
self.vs_index = self.load_from_checkpoint(checkpoint_dir)
|
||||||
else:
|
else:
|
||||||
self.vs_index = self.create_new_vs(checkpoint_dir)
|
self.vs_index = self.create_new_vs()
|
||||||
atexit.register(lambda: self.save_to_checkpoint(checkpoint_dir))
|
atexit.register(lambda: self.save_to_checkpoint(checkpoint_dir))
|
||||||
|
|
||||||
def assign_embedding_model(self):
|
def assign_embedding_model(self):
|
||||||
@@ -91,17 +87,21 @@ class LlamaIndexRagWorker(SaveLoad):
|
|||||||
logger.info('oo --------inspect_vector_store end--------')
|
logger.info('oo --------inspect_vector_store end--------')
|
||||||
return vector_store_preview
|
return vector_store_preview
|
||||||
|
|
||||||
def add_documents_to_vector_store(self, document_list):
|
def add_documents_to_vector_store(self, document_list: List[Document]):
|
||||||
documents = [Document(text=t) for t in document_list]
|
"""
|
||||||
|
Adds a list of Document objects to the vector store after processing.
|
||||||
|
"""
|
||||||
|
documents = document_list
|
||||||
documents_nodes = run_transformations(
|
documents_nodes = run_transformations(
|
||||||
documents, # type: ignore
|
documents, # type: ignore
|
||||||
self.vs_index._transformations,
|
self.vs_index._transformations,
|
||||||
show_progress=True
|
show_progress=True
|
||||||
)
|
)
|
||||||
self.vs_index.insert_nodes(documents_nodes)
|
self.vs_index.insert_nodes(documents_nodes)
|
||||||
if self.debug_mode: self.inspect_vector_store()
|
if self.debug_mode:
|
||||||
|
self.inspect_vector_store()
|
||||||
|
|
||||||
def add_text_to_vector_store(self, text):
|
def add_text_to_vector_store(self, text: str):
|
||||||
node = TextNode(text=text)
|
node = TextNode(text=text)
|
||||||
documents_nodes = run_transformations(
|
documents_nodes = run_transformations(
|
||||||
[node],
|
[node],
|
||||||
@@ -109,14 +109,16 @@ class LlamaIndexRagWorker(SaveLoad):
|
|||||||
show_progress=True
|
show_progress=True
|
||||||
)
|
)
|
||||||
self.vs_index.insert_nodes(documents_nodes)
|
self.vs_index.insert_nodes(documents_nodes)
|
||||||
if self.debug_mode: self.inspect_vector_store()
|
if self.debug_mode:
|
||||||
|
self.inspect_vector_store()
|
||||||
|
|
||||||
def remember_qa(self, question, answer):
|
def remember_qa(self, question, answer):
|
||||||
formatted_str = QUESTION_ANSWER_RECORD.format(question=question, answer=answer)
|
formatted_str = QUESTION_ANSWER_RECORD.format(question=question, answer=answer)
|
||||||
self.add_text_to_vector_store(formatted_str)
|
self.add_text_to_vector_store(formatted_str)
|
||||||
|
|
||||||
def retrieve_from_store_with_query(self, query):
|
def retrieve_from_store_with_query(self, query):
|
||||||
if self.debug_mode: self.inspect_vector_store()
|
if self.debug_mode:
|
||||||
|
self.inspect_vector_store()
|
||||||
retriever = self.vs_index.as_retriever()
|
retriever = self.vs_index.as_retriever()
|
||||||
return retriever.retrieve(query)
|
return retriever.retrieve(query)
|
||||||
|
|
||||||
@@ -128,3 +130,9 @@ class LlamaIndexRagWorker(SaveLoad):
|
|||||||
buf = "\n".join(([f"(No.{i+1} | score {n.score:.3f}): {n.text}" for i, n in enumerate(nodes)]))
|
buf = "\n".join(([f"(No.{i+1} | score {n.score:.3f}): {n.text}" for i, n in enumerate(nodes)]))
|
||||||
if self.debug_mode: logger.info(buf)
|
if self.debug_mode: logger.info(buf)
|
||||||
return buf
|
return buf
|
||||||
|
|
||||||
|
def purge_vector_store(self):
|
||||||
|
"""
|
||||||
|
Purges the current vector store and creates a new one.
|
||||||
|
"""
|
||||||
|
self.purge()
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
import os
|
||||||
|
from llama_index.core import SimpleDirectoryReader
|
||||||
|
|
||||||
|
supports_format = ['.csv', '.docx', '.epub', '.ipynb', '.mbox', '.md', '.pdf', '.txt', '.ppt',
|
||||||
|
'.pptm', '.pptx']
|
||||||
|
|
||||||
|
|
||||||
|
# 修改后的 extract_text 函数,结合 SimpleDirectoryReader 和自定义解析逻辑
|
||||||
|
def extract_text(file_path):
|
||||||
|
_, ext = os.path.splitext(file_path.lower())
|
||||||
|
|
||||||
|
# 使用 SimpleDirectoryReader 处理它支持的文件格式
|
||||||
|
if ext in supports_format:
|
||||||
|
try:
|
||||||
|
reader = SimpleDirectoryReader(input_files=[file_path])
|
||||||
|
documents = reader.load_data()
|
||||||
|
if len(documents) > 0:
|
||||||
|
return documents[0].text
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return None
|
||||||
在新工单中引用
屏蔽一个用户