镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
upgrade searxng
这个提交包含在:
@@ -4,27 +4,43 @@ import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from request_llms.bridge_all import model_info
|
||||
import urllib.request
|
||||
import random
|
||||
from functools import lru_cache
|
||||
from check_proxy import check_proxy
|
||||
|
||||
@lru_cache
|
||||
def get_auth_ip():
|
||||
try:
|
||||
external_ip = urllib.request.urlopen('https://v4.ident.me/').read().decode('utf8')
|
||||
return external_ip
|
||||
except:
|
||||
return '114.114.114.114'
|
||||
ip = check_proxy(None, return_ip=True)
|
||||
if ip is None:
|
||||
return '114.114.114.' + str(random.randint(1, 10))
|
||||
return ip
|
||||
|
||||
def searxng_request(query, proxies, categories='general', searxng_url=None):
|
||||
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
|
||||
if searxng_url is None:
|
||||
url = get_conf("SEARXNG_URL")
|
||||
else:
|
||||
url = searxng_url
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
'categories': categories
|
||||
}
|
||||
|
||||
if engines is None:
|
||||
engines = 'bing'
|
||||
|
||||
if categories == 'general':
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
'engines': engines,
|
||||
}
|
||||
elif categories == 'science':
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
'categories': 'science'
|
||||
}
|
||||
else:
|
||||
raise ValueError('不支持的检索类型')
|
||||
|
||||
headers = {
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
|
||||
@@ -32,12 +48,13 @@ def searxng_request(query, proxies, categories='general', searxng_url=None):
|
||||
'X-Real-IP': get_auth_ip()
|
||||
}
|
||||
results = []
|
||||
response = requests.post(url, params=params, headers=headers, proxies=proxies)
|
||||
response = requests.post(url, params=params, headers=headers, proxies=proxies, timeout=30)
|
||||
if response.status_code == 200:
|
||||
json_result = response.json()
|
||||
for result in json_result['results']:
|
||||
item = {
|
||||
"title": result.get("title", ""),
|
||||
"source": result.get("engines", "unknown"),
|
||||
"content": result.get("content", ""),
|
||||
"link": result["url"],
|
||||
}
|
||||
@@ -80,7 +97,7 @@ def scrape_text(url, proxies) -> str:
|
||||
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append((f"请结合互联网信息回答以下问题:{txt}", None))
|
||||
chatbot.append((f"请结合互联网信息回答以下问题:{txt}", "检索中..."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# ------------- < 第1步:爬取搜索引擎的结果 > -------------
|
||||
@@ -88,11 +105,12 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
||||
proxies = get_conf('proxies')
|
||||
categories = plugin_kwargs.get('categories', 'general')
|
||||
searxng_url = plugin_kwargs.get('searxng_url', None)
|
||||
urls = searxng_request(txt, proxies, categories, searxng_url)
|
||||
engines = plugin_kwargs.get('engine', None)
|
||||
urls = searxng_request(txt, proxies, categories, searxng_url, engines=engines)
|
||||
history = []
|
||||
if len(urls) == 0:
|
||||
chatbot.append((f"结论:{txt}",
|
||||
"[Local Message] 受到google限制,无法从google获取信息!"))
|
||||
"[Local Message] 受到限制,无法从searxng获取信息!请尝试更换搜索引擎。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
# ------------- < 第2步:依次访问网页 > -------------
|
||||
@@ -100,9 +118,10 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
||||
chatbot.append([f"联网检索中 ...", None])
|
||||
for index, url in enumerate(urls[:max_search_result]):
|
||||
res = scrape_text(url['link'], proxies)
|
||||
history.extend([f"第{index}份搜索结果:", res])
|
||||
prefix = f"第{index}份搜索结果 [源自{url['source'][0]}搜索] ({url['title'][:25]}):"
|
||||
history.extend([prefix, res])
|
||||
res_squeeze = res.replace('\n', '...')
|
||||
chatbot[-1] = [f"第{index}份搜索结果:\n\n" + res_squeeze[:500] + "......", None]
|
||||
chatbot[-1] = [prefix + "\n\n" + res_squeeze[:500] + "......", None]
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# ------------- < 第3步:ChatGPT综合 > -------------
|
||||
|
||||
在新工单中引用
屏蔽一个用户