implement doc_fns

这个提交包含在:
binary-husky
2025-06-04 00:20:09 +08:00
父节点 725f60fba3
当前提交 f42aad5093
共有 21 个文件被更改,包括 5828 次插入0 次删除

查看文件

@@ -0,0 +1,6 @@
import nltk
nltk.data.path.append('~/nltk_data')
nltk.download('averaged_perceptron_tagger', download_dir='~/nltk_data',
)
nltk.download('punkt', download_dir='~/nltk_data',
)

查看文件

@@ -0,0 +1,286 @@
from __future__ import annotations
import pandas as pd
import numpy as np
from pathlib import Path
from typing import Optional, List, Set, Dict, Union, Iterator, Tuple
from dataclasses import dataclass, field
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
import chardet
from functools import lru_cache
import os
@dataclass
class ExtractorConfig:
"""提取器配置类"""
encoding: str = 'auto'
na_filter: bool = True
skip_blank_lines: bool = True
chunk_size: int = 10000
max_workers: int = 4
preserve_format: bool = True
read_all_sheets: bool = True # 新增:是否读取所有工作表
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
'remove_extra_spaces': True,
'normalize_whitespace': False,
'remove_special_chars': False,
'lowercase': False
})
class ExcelTextExtractor:
"""增强的Excel格式文件文本内容提取器"""
SUPPORTED_EXTENSIONS: Set[str] = {
'.xlsx', '.xls', '.csv', '.tsv', '.xlsm', '.xltx', '.xltm', '.ods'
}
def __init__(self, config: Optional[ExtractorConfig] = None):
self.config = config or ExtractorConfig()
self._setup_logging()
self._detect_encoding = lru_cache(maxsize=128)(self._detect_encoding)
def _setup_logging(self) -> None:
"""配置日志记录器"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger(__name__)
fh = logging.FileHandler('excel_extractor.log')
fh.setLevel(logging.ERROR)
self.logger.addHandler(fh)
def _detect_encoding(self, file_path: Path) -> str:
if self.config.encoding != 'auto':
return self.config.encoding
try:
with open(file_path, 'rb') as f:
raw_data = f.read(10000)
result = chardet.detect(raw_data)
return result['encoding'] or 'utf-8'
except Exception as e:
self.logger.warning(f"Encoding detection failed: {e}. Using utf-8")
return 'utf-8'
def _validate_file(self, file_path: Union[str, Path]) -> Path:
path = Path(file_path).resolve()
if not path.exists():
raise ValueError(f"File not found: {path}")
if not path.is_file():
raise ValueError(f"Not a file: {path}")
if not os.access(path, os.R_OK):
raise PermissionError(f"No read permission: {path}")
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
raise ValueError(
f"Unsupported format: {path.suffix}. "
f"Supported: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
)
return path
def _format_value(self, value: Any) -> str:
if pd.isna(value) or value is None:
return ''
if isinstance(value, (int, float)):
return str(value)
return str(value).strip()
def _process_chunk(self, chunk: pd.DataFrame, columns: Optional[List[str]] = None, sheet_name: str = '') -> str:
"""处理数据块,新增sheet_name参数"""
try:
if columns:
chunk = chunk[columns]
if self.config.preserve_format:
formatted_chunk = chunk.applymap(self._format_value)
rows = []
# 添加工作表名称作为标题
if sheet_name:
rows.append(f"[Sheet: {sheet_name}]")
# 添加表头
headers = [str(col) for col in formatted_chunk.columns]
rows.append('\t'.join(headers))
# 添加数据行
for _, row in formatted_chunk.iterrows():
rows.append('\t'.join(row.values))
return '\n'.join(rows)
else:
flat_values = (
chunk.astype(str)
.replace({'nan': '', 'None': '', 'NaN': ''})
.values.flatten()
)
return ' '.join(v for v in flat_values if v)
except Exception as e:
self.logger.error(f"Error processing chunk: {e}")
raise
def _read_file(self, file_path: Path) -> Union[pd.DataFrame, Iterator[pd.DataFrame], Dict[str, pd.DataFrame]]:
"""读取文件,支持多工作表"""
try:
encoding = self._detect_encoding(file_path)
if file_path.suffix.lower() in {'.csv', '.tsv'}:
sep = '\t' if file_path.suffix.lower() == '.tsv' else ','
# 对大文件使用分块读取
if file_path.stat().st_size > self.config.chunk_size * 1024:
return pd.read_csv(
file_path,
encoding=encoding,
na_filter=self.config.na_filter,
skip_blank_lines=self.config.skip_blank_lines,
sep=sep,
chunksize=self.config.chunk_size,
on_bad_lines='warn'
)
else:
return pd.read_csv(
file_path,
encoding=encoding,
na_filter=self.config.na_filter,
skip_blank_lines=self.config.skip_blank_lines,
sep=sep
)
else:
# Excel文件处理,支持多工作表
if self.config.read_all_sheets:
# 读取所有工作表
return pd.read_excel(
file_path,
na_filter=self.config.na_filter,
keep_default_na=self.config.na_filter,
engine='openpyxl',
sheet_name=None # None表示读取所有工作表
)
else:
# 只读取第一个工作表
return pd.read_excel(
file_path,
na_filter=self.config.na_filter,
keep_default_na=self.config.na_filter,
engine='openpyxl',
sheet_name=0 # 读取第一个工作表
)
except Exception as e:
self.logger.error(f"Error reading file {file_path}: {e}")
raise
def extract_text(
self,
file_path: Union[str, Path],
columns: Optional[List[str]] = None,
separator: str = '\n'
) -> str:
"""提取文本,支持多工作表"""
try:
path = self._validate_file(file_path)
self.logger.info(f"Processing: {path}")
reader = self._read_file(path)
texts = []
# 处理Excel多工作表
if isinstance(reader, dict):
for sheet_name, df in reader.items():
sheet_text = self._process_chunk(df, columns, sheet_name)
if sheet_text:
texts.append(sheet_text)
return separator.join(texts)
# 处理单个DataFrame
elif isinstance(reader, pd.DataFrame):
return self._process_chunk(reader, columns)
# 处理DataFrame迭代器
else:
with ThreadPoolExecutor(max_workers=self.config.max_workers) as executor:
futures = {
executor.submit(self._process_chunk, chunk, columns): i
for i, chunk in enumerate(reader)
}
chunk_texts = []
for future in as_completed(futures):
try:
text = future.result()
if text:
chunk_texts.append((futures[future], text))
except Exception as e:
self.logger.error(f"Error in chunk {futures[future]}: {e}")
# 按块的顺序排序
chunk_texts.sort(key=lambda x: x[0])
texts = [text for _, text in chunk_texts]
# 合并文本,保留格式
if texts and self.config.preserve_format:
result = texts[0] # 第一块包含表头
if len(texts) > 1:
# 跳过后续块的表头行
for text in texts[1:]:
result += '\n' + '\n'.join(text.split('\n')[1:])
return result
else:
return separator.join(texts)
except Exception as e:
self.logger.error(f"Extraction failed: {e}")
raise
@staticmethod
def get_supported_formats() -> List[str]:
"""获取支持的文件格式列表"""
return sorted(ExcelTextExtractor.SUPPORTED_EXTENSIONS)
def main():
"""主函数:演示用法"""
config = ExtractorConfig(
encoding='auto',
preserve_format=True,
read_all_sheets=True, # 启用多工作表读取
text_cleanup={
'remove_extra_spaces': True,
'normalize_whitespace': False,
'remove_special_chars': False,
'lowercase': False
}
)
extractor = ExcelTextExtractor(config)
try:
sample_file = 'example.xlsx'
if Path(sample_file).exists():
text = extractor.extract_text(
sample_file,
columns=['title', 'content']
)
print("提取的文本:")
print(text)
else:
print(f"示例文件 {sample_file} 不存在")
print("\n支持的格式:", extractor.get_supported_formats())
except Exception as e:
print(f"错误: {e}")
if __name__ == "__main__":
main()

查看文件

@@ -0,0 +1,359 @@
from __future__ import annotations
from pathlib import Path
from typing import Optional, Set, Dict, Union, List
from dataclasses import dataclass, field
import logging
import os
import re
import subprocess
import tempfile
import shutil
@dataclass
class MarkdownConverterConfig:
"""PDF 到 Markdown 转换器配置类
Attributes:
extract_images: 是否提取图片
extract_tables: 是否尝试保留表格结构
extract_code_blocks: 是否识别代码块
extract_math: 是否转换数学公式
output_dir: 输出目录路径
image_dir: 图片保存目录路径
paragraph_separator: 段落之间的分隔符
text_cleanup: 文本清理选项字典
docintel_endpoint: Document Intelligence端点URL (可选)
enable_plugins: 是否启用插件
llm_client: LLM客户端对象 (例如OpenAI client)
llm_model: 要使用的LLM模型名称
"""
extract_images: bool = True
extract_tables: bool = True
extract_code_blocks: bool = True
extract_math: bool = True
output_dir: str = ""
image_dir: str = "images"
paragraph_separator: str = '\n\n'
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
'remove_extra_spaces': True,
'normalize_whitespace': True,
'remove_special_chars': False,
'lowercase': False
})
docintel_endpoint: str = ""
enable_plugins: bool = False
llm_client: Optional[object] = None
llm_model: str = ""
class MarkdownConverter:
"""PDF 到 Markdown 转换器
使用 markitdown 库实现 PDF 到 Markdown 的转换,支持多种配置选项。
"""
SUPPORTED_EXTENSIONS: Set[str] = {
'.pdf',
}
def __init__(self, config: Optional[MarkdownConverterConfig] = None):
"""初始化转换器
Args:
config: 转换器配置对象,如果为None则使用默认配置
"""
self.config = config or MarkdownConverterConfig()
self._setup_logging()
# 检查是否安装了 markitdown
self._check_markitdown_installation()
def _setup_logging(self) -> None:
"""配置日志记录器"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger(__name__)
# 添加文件处理器
fh = logging.FileHandler('markdown_converter.log')
fh.setLevel(logging.ERROR)
self.logger.addHandler(fh)
def _check_markitdown_installation(self) -> None:
"""检查是否安装了 markitdown"""
try:
# 尝试导入 markitdown 库
from markitdown import MarkItDown
self.logger.info("markitdown 库已安装")
except ImportError:
self.logger.warning("markitdown 库未安装,尝试安装...")
try:
subprocess.check_call(["pip", "install", "markitdown"])
self.logger.info("markitdown 库安装成功")
from markitdown import MarkItDown
except (subprocess.SubprocessError, ImportError):
self.logger.error("无法安装 markitdown 库,请手动安装")
self.markitdown_available = False
return
self.markitdown_available = True
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
"""验证文件
Args:
file_path: 文件路径
max_size_mb: 允许的最大文件大小(MB)
Returns:
Path: 验证后的Path对象
Raises:
ValueError: 文件不存在、格式不支持或大小超限
PermissionError: 没有读取权限
"""
path = Path(file_path).resolve()
if not path.exists():
raise ValueError(f"文件不存在: {path}")
if not path.is_file():
raise ValueError(f"不是一个文件: {path}")
if not os.access(path, os.R_OK):
raise PermissionError(f"没有读取权限: {path}")
file_size_mb = path.stat().st_size / (1024 * 1024)
if file_size_mb > max_size_mb:
raise ValueError(
f"文件大小 ({file_size_mb:.1f}MB) 超过限制 {max_size_mb}MB"
)
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
raise ValueError(
f"不支持的格式: {path.suffix}. "
f"支持的格式: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
)
return path
def _cleanup_text(self, text: str) -> str:
"""清理文本
Args:
text: 原始文本
Returns:
str: 清理后的文本
"""
if self.config.text_cleanup['remove_extra_spaces']:
text = ' '.join(text.split())
if self.config.text_cleanup['normalize_whitespace']:
text = text.replace('\t', ' ').replace('\r', '\n')
if self.config.text_cleanup['lowercase']:
text = text.lower()
return text.strip()
@staticmethod
def get_supported_formats() -> List[str]:
"""获取支持的文件格式列表"""
return sorted(MarkdownConverter.SUPPORTED_EXTENSIONS)
def convert_to_markdown(
self,
file_path: Union[str, Path],
output_path: Optional[Union[str, Path]] = None
) -> str:
"""将 PDF 转换为 Markdown
Args:
file_path: PDF 文件路径
output_path: 输出 Markdown 文件路径,如果为 None 则返回内容而不保存
Returns:
str: 转换后的 Markdown 内容
Raises:
Exception: 转换过程中的错误
"""
try:
path = self._validate_file(file_path)
self.logger.info(f"处理: {path}")
if not self.markitdown_available:
raise ImportError("markitdown 库未安装,无法进行转换")
# 导入 markitdown 库
from markitdown import MarkItDown
# 准备输出目录
if output_path:
output_path = Path(output_path)
output_dir = output_path.parent
output_dir.mkdir(parents=True, exist_ok=True)
else:
# 创建临时目录作为输出目录
temp_dir = tempfile.mkdtemp()
output_dir = Path(temp_dir)
output_path = output_dir / f"{path.stem}.md"
# 图片目录
image_dir = output_dir / self.config.image_dir
image_dir.mkdir(parents=True, exist_ok=True)
# 创建 MarkItDown 实例并进行转换
if self.config.docintel_endpoint:
md = MarkItDown(docintel_endpoint=self.config.docintel_endpoint)
elif self.config.llm_client and self.config.llm_model:
md = MarkItDown(
enable_plugins=self.config.enable_plugins,
llm_client=self.config.llm_client,
llm_model=self.config.llm_model
)
else:
md = MarkItDown(enable_plugins=self.config.enable_plugins)
# 执行转换
result = md.convert(str(path))
markdown_content = result.text_content
# 清理文本
markdown_content = self._cleanup_text(markdown_content)
# 如果需要保存到文件
if output_path:
with open(output_path, 'w', encoding='utf-8') as f:
f.write(markdown_content)
self.logger.info(f"转换成功,输出到: {output_path}")
return markdown_content
except Exception as e:
self.logger.error(f"转换失败: {e}")
raise
finally:
# 如果使用了临时目录且没有指定输出路径,则清理临时目录
if 'temp_dir' in locals() and not output_path:
shutil.rmtree(temp_dir, ignore_errors=True)
def convert_to_markdown_and_save(
self,
file_path: Union[str, Path],
output_path: Union[str, Path]
) -> Path:
"""将 PDF 转换为 Markdown 并保存到指定路径
Args:
file_path: PDF 文件路径
output_path: 输出 Markdown 文件路径
Returns:
Path: 输出文件的 Path 对象
Raises:
Exception: 转换过程中的错误
"""
self.convert_to_markdown(file_path, output_path)
return Path(output_path)
def batch_convert(
self,
file_paths: List[Union[str, Path]],
output_dir: Union[str, Path]
) -> List[Path]:
"""批量转换多个 PDF 文件为 Markdown
Args:
file_paths: PDF 文件路径列表
output_dir: 输出目录路径
Returns:
List[Path]: 输出文件路径列表
Raises:
Exception: 转换过程中的错误
"""
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
output_paths = []
for file_path in file_paths:
path = Path(file_path)
output_path = output_dir / f"{path.stem}.md"
try:
self.convert_to_markdown(file_path, output_path)
output_paths.append(output_path)
self.logger.info(f"成功转换: {path} -> {output_path}")
except Exception as e:
self.logger.error(f"转换失败 {path}: {e}")
return output_paths
def main():
"""主函数:演示用法"""
# 配置
config = MarkdownConverterConfig(
extract_images=True,
extract_tables=True,
extract_code_blocks=True,
extract_math=True,
enable_plugins=False,
text_cleanup={
'remove_extra_spaces': True,
'normalize_whitespace': True,
'remove_special_chars': False,
'lowercase': False
}
)
# 创建转换器
converter = MarkdownConverter(config)
# 使用示例
try:
# 替换为实际的文件路径
sample_file = './crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf'
if Path(sample_file).exists():
# 转换为 Markdown 并打印内容
markdown_content = converter.convert_to_markdown(sample_file)
print("转换后的 Markdown 内容:")
print(markdown_content[:500] + "...") # 只打印前500个字符
# 转换并保存到文件
output_file = f"./output_{Path(sample_file).stem}.md"
output_path = converter.convert_to_markdown_and_save(sample_file, output_file)
print(f"\n已保存到: {output_path}")
# 使用LLM增强的示例 (需要添加相应的导入和配置)
# try:
# from openai import OpenAI
# client = OpenAI()
# llm_config = MarkdownConverterConfig(
# llm_client=client,
# llm_model="gpt-4o"
# )
# llm_converter = MarkdownConverter(llm_config)
# llm_result = llm_converter.convert_to_markdown("example.jpg")
# print("LLM增强的结果:")
# print(llm_result[:500] + "...")
# except ImportError:
# print("未安装OpenAI库,跳过LLM示例")
else:
print(f"示例文件 {sample_file} 不存在")
print("\n支持的格式:", converter.get_supported_formats())
except Exception as e:
print(f"错误: {e}")
if __name__ == "__main__":
main()

查看文件

@@ -0,0 +1,493 @@
from __future__ import annotations
from pathlib import Path
from typing import Optional, Set, Dict, Union, List
from dataclasses import dataclass, field
import logging
import os
import re
from unstructured.partition.auto import partition
from unstructured.documents.elements import (
Text, Title, NarrativeText, ListItem, Table,
Footer, Header, PageBreak, Image, Address
)
@dataclass
class PaperMetadata:
"""论文元数据类"""
title: str = ""
authors: List[str] = field(default_factory=list)
affiliations: List[str] = field(default_factory=list)
journal: str = ""
volume: str = ""
issue: str = ""
year: str = ""
doi: str = ""
date: str = ""
publisher: str = ""
conference: str = ""
abstract: str = ""
keywords: List[str] = field(default_factory=list)
@dataclass
class ExtractorConfig:
"""元数据提取器配置类"""
paragraph_separator: str = '\n\n'
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
'remove_extra_spaces': True,
'normalize_whitespace': True,
'remove_special_chars': False,
'lowercase': False
})
class PaperMetadataExtractor:
"""论文元数据提取器
使用unstructured库从多种文档格式中提取论文的标题、作者、摘要等元数据信息。
"""
SUPPORTED_EXTENSIONS: Set[str] = {
'.pdf', '.docx', '.doc', '.txt', '.ppt', '.pptx',
'.xlsx', '.xls', '.md', '.org', '.odt', '.rst',
'.rtf', '.epub', '.html', '.xml', '.json'
}
# 定义论文各部分的关键词模式
SECTION_PATTERNS = {
'abstract': r'\b(摘要|abstract|summary|概要|résumé|zusammenfassung|аннотация)\b',
'keywords': r'\b(关键词|keywords|key\s+words|关键字|mots[- ]clés|schlüsselwörter|ключевые слова)\b',
}
def __init__(self, config: Optional[ExtractorConfig] = None):
"""初始化提取器
Args:
config: 提取器配置对象,如果为None则使用默认配置
"""
self.config = config or ExtractorConfig()
self._setup_logging()
def _setup_logging(self) -> None:
"""配置日志记录器"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger(__name__)
# 添加文件处理器
fh = logging.FileHandler('paper_metadata_extractor.log')
fh.setLevel(logging.ERROR)
self.logger.addHandler(fh)
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
"""验证文件
Args:
file_path: 文件路径
max_size_mb: 允许的最大文件大小(MB)
Returns:
Path: 验证后的Path对象
Raises:
ValueError: 文件不存在、格式不支持或大小超限
PermissionError: 没有读取权限
"""
path = Path(file_path).resolve()
if not path.exists():
raise ValueError(f"文件不存在: {path}")
if not path.is_file():
raise ValueError(f"不是文件: {path}")
if not os.access(path, os.R_OK):
raise PermissionError(f"没有读取权限: {path}")
file_size_mb = path.stat().st_size / (1024 * 1024)
if file_size_mb > max_size_mb:
raise ValueError(
f"文件大小 ({file_size_mb:.1f}MB) 超过限制 {max_size_mb}MB"
)
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
raise ValueError(
f"不支持的文件格式: {path.suffix}. "
f"支持的格式: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
)
return path
def _cleanup_text(self, text: str) -> str:
"""清理文本
Args:
text: 原始文本
Returns:
str: 清理后的文本
"""
if self.config.text_cleanup['remove_extra_spaces']:
text = ' '.join(text.split())
if self.config.text_cleanup['normalize_whitespace']:
text = text.replace('\t', ' ').replace('\r', '\n')
if self.config.text_cleanup['lowercase']:
text = text.lower()
return text.strip()
@staticmethod
def get_supported_formats() -> List[str]:
"""获取支持的文件格式列表"""
return sorted(PaperMetadataExtractor.SUPPORTED_EXTENSIONS)
def extract_metadata(self, file_path: Union[str, Path], strategy: str = "fast") -> PaperMetadata:
"""提取论文元数据
Args:
file_path: 文件路径
strategy: 提取策略 ("fast""accurate")
Returns:
PaperMetadata: 提取的论文元数据
Raises:
Exception: 提取过程中的错误
"""
try:
path = self._validate_file(file_path)
self.logger.info(f"正在处理: {path}")
# 使用unstructured库分解文档
elements = partition(
str(path),
strategy=strategy,
include_metadata=True,
nlp=False,
)
# 提取元数据
metadata = PaperMetadata()
# 提取标题和作者
self._extract_title_and_authors(elements, metadata)
# 提取摘要和关键词
self._extract_abstract_and_keywords(elements, metadata)
# 提取其他元数据
self._extract_additional_metadata(elements, metadata)
return metadata
except Exception as e:
self.logger.error(f"元数据提取失败: {e}")
raise
def _extract_title_and_authors(self, elements, metadata: PaperMetadata) -> None:
"""从文档中提取标题和作者信息 - 改进版"""
# 收集所有潜在的标题候选
title_candidates = []
all_text = []
raw_text = []
# 首先收集文档前30个元素的文本,用于辅助判断
for i, element in enumerate(elements[:30]):
if isinstance(element, (Text, Title, NarrativeText)):
text = str(element).strip()
if text:
all_text.append(text)
raw_text.append(text)
# 打印出原始文本,用于调试
print("原始文本前10行:")
for i, text in enumerate(raw_text[:10]):
print(f"{i}: {text}")
# 1. 尝试查找连续的标题片段并合并它们
i = 0
while i < len(all_text) - 1:
current = all_text[i]
next_text = all_text[i + 1]
# 检查是否存在标题分割情况:一行以冒号结尾,下一行像是标题的延续
if current.endswith(':') and len(current) < 50 and len(next_text) > 5 and next_text[0].isupper():
# 合并这两行文本
combined_title = f"{current} {next_text}"
# 查找合并前的文本并替换
all_text[i] = combined_title
all_text.pop(i + 1)
# 给合并后的标题很高的分数
title_candidates.append((combined_title, 15, i))
else:
i += 1
# 2. 首先尝试从标题元素中查找
for i, element in enumerate(elements[:15]): # 只检查前15个元素
if isinstance(element, Title):
title_text = str(element).strip()
# 排除常见的非标题内容
if title_text.lower() not in ['abstract', '摘要', 'introduction', '引言']:
# 计算标题分数(越高越可能是真正的标题)
score = self._evaluate_title_candidate(title_text, i, element)
title_candidates.append((title_text, score, i))
# 3. 特别处理常见的论文标题格式
for i, text in enumerate(all_text[:15]):
# 特别检查"KIMI K1.5:"类型的前缀标题
if re.match(r'^[A-Z][A-Z0-9\s\.]+(\s+K\d+(\.\d+)?)?:', text):
score = 12 # 给予很高的分数
title_candidates.append((text, score, i))
# 如果下一行也是全大写,很可能是标题的延续
if i+1 < len(all_text) and all_text[i+1].isupper() and len(all_text[i+1]) > 10:
combined_title = f"{text} {all_text[i+1]}"
title_candidates.append((combined_title, 15, i)) # 给合并标题更高分数
# 匹配全大写的标题行
elif text.isupper() and len(text) > 10 and len(text) < 100:
score = 10 - i * 0.5 # 越靠前越可能是标题
title_candidates.append((text, score, i))
# 对标题候选按分数排序并选取最佳候选
if title_candidates:
title_candidates.sort(key=lambda x: x[1], reverse=True)
metadata.title = title_candidates[0][0]
title_position = title_candidates[0][2]
print(f"所有标题候选: {title_candidates[:3]}")
else:
# 如果没有找到合适的标题,使用一个备选策略
for text in all_text[:10]:
if text.isupper() and len(text) > 10 and len(text) < 200: # 大写且适当长度的文本
metadata.title = text
break
title_position = 0
# 提取作者信息 - 改进后的作者提取逻辑
author_candidates = []
# 1. 特别处理"TECHNICAL REPORT OF"之后的行,通常是作者或团队
for i, text in enumerate(all_text):
if "TECHNICAL REPORT" in text.upper() and i+1 < len(all_text):
team_text = all_text[i+1].strip()
if re.search(r'\b(team|group|lab)\b', team_text, re.IGNORECASE):
author_candidates.append((team_text, 15))
# 2. 查找包含Team的文本
for text in all_text[:20]:
if "Team" in text and len(text) < 30:
# 这很可能是团队名
author_candidates.append((text, 12))
# 添加作者到元数据
if author_candidates:
# 按分数排序
author_candidates.sort(key=lambda x: x[1], reverse=True)
# 去重
seen_authors = set()
for author, _ in author_candidates:
if author.lower() not in seen_authors and not author.isdigit():
seen_authors.add(author.lower())
metadata.authors.append(author)
# 如果没有找到作者,尝试查找隶属机构信息中的团队名称
if not metadata.authors:
for text in all_text[:20]:
if re.search(r'\b(team|group|lab|laboratory|研究组|团队)\b', text, re.IGNORECASE):
if len(text) < 50: # 避免太长的文本
metadata.authors.append(text.strip())
break
# 提取隶属机构信息
for i, element in enumerate(elements[:30]):
element_text = str(element).strip()
if re.search(r'(university|institute|department|school|laboratory|college|center|centre|\d{5,}|^[a-zA-Z]+@|学院|大学|研究所|研究院)', element_text, re.IGNORECASE):
# 可能是隶属机构
if element_text not in metadata.affiliations and len(element_text) > 10:
metadata.affiliations.append(element_text)
def _evaluate_title_candidate(self, text, position, element):
"""评估标题候选项的可能性分数"""
score = 0
# 位置因素:越靠前越可能是标题
score += max(0, 10 - position) * 0.5
# 长度因素:标题通常不会太短也不会太长
if 10 <= len(text) <= 150:
score += 3
elif len(text) < 10:
score -= 2
elif len(text) > 150:
score -= 3
# 格式因素
if text.isupper(): # 全大写可能是标题
score += 2
if re.match(r'^[A-Z]', text): # 首字母大写
score += 1
if ':' in text: # 标题常包含冒号
score += 1.5
# 内容因素
if re.search(r'\b(scaling|learning|model|approach|method|system|framework|analysis)\b', text.lower()):
score += 2 # 包含常见的学术论文关键词
# 避免误判
if re.match(r'^\d+$', text): # 纯数字
score -= 10
if re.search(r'^(http|www|doi)', text.lower()): # URL或DOI
score -= 5
if len(text.split()) <= 2 and len(text) < 15: # 太短的短语
score -= 3
# 元数据因素(如果有)
if hasattr(element, 'metadata') and element.metadata:
# 修复正确处理ElementMetadata对象
try:
# 尝试通过getattr安全地获取属性
font_size = getattr(element.metadata, 'font_size', None)
if font_size is not None and font_size > 14: # 假设标准字体大小是12
score += 3
font_weight = getattr(element.metadata, 'font_weight', None)
if font_weight == 'bold':
score += 2 # 粗体加分
except (AttributeError, TypeError):
# 如果metadata的访问方式不正确,尝试其他可能的访问方式
try:
metadata_dict = element.metadata.__dict__ if hasattr(element.metadata, '__dict__') else {}
if 'font_size' in metadata_dict and metadata_dict['font_size'] > 14:
score += 3
if 'font_weight' in metadata_dict and metadata_dict['font_weight'] == 'bold':
score += 2
except Exception:
# 如果所有尝试都失败,忽略元数据处理
pass
return score
def _extract_abstract_and_keywords(self, elements, metadata: PaperMetadata) -> None:
"""从文档中提取摘要和关键词"""
abstract_found = False
keywords_found = False
abstract_text = []
for i, element in enumerate(elements):
element_text = str(element).strip().lower()
# 寻找摘要部分
if not abstract_found and (
isinstance(element, Title) and
re.search(self.SECTION_PATTERNS['abstract'], element_text, re.IGNORECASE)
):
abstract_found = True
continue
# 如果找到摘要部分,收集内容直到遇到关键词部分或新章节
if abstract_found and not keywords_found:
# 检查是否遇到关键词部分或新章节
if (
isinstance(element, Title) or
re.search(self.SECTION_PATTERNS['keywords'], element_text, re.IGNORECASE) or
re.match(r'\b(introduction|引言|method|方法)\b', element_text, re.IGNORECASE)
):
keywords_found = re.search(self.SECTION_PATTERNS['keywords'], element_text, re.IGNORECASE)
abstract_found = False # 停止收集摘要
else:
# 收集摘要文本
if isinstance(element, (Text, NarrativeText)) and element_text:
abstract_text.append(element_text)
# 如果找到关键词部分,提取关键词
if keywords_found and not abstract_found and not metadata.keywords:
if isinstance(element, (Text, NarrativeText)):
# 清除可能的"关键词:"/"Keywords:"前缀
cleaned_text = re.sub(r'^\s*(关键词|keywords|key\s+words)\s*[:]\s*', '', element_text, flags=re.IGNORECASE)
# 尝试按不同分隔符分割
for separator in [';', '', ',', '']:
if separator in cleaned_text:
metadata.keywords = [k.strip() for k in cleaned_text.split(separator) if k.strip()]
break
# 如果未能分割,将整个文本作为一个关键词
if not metadata.keywords and cleaned_text:
metadata.keywords = [cleaned_text]
keywords_found = False # 已提取关键词,停止处理
# 设置摘要文本
if abstract_text:
metadata.abstract = self.config.paragraph_separator.join(abstract_text)
def _extract_additional_metadata(self, elements, metadata: PaperMetadata) -> None:
"""提取其他元数据信息"""
for element in elements[:30]: # 只检查文档前部分
element_text = str(element).strip()
# 尝试匹配DOI
doi_match = re.search(r'(doi|DOI):\s*(10\.\d{4,}\/[a-zA-Z0-9.-]+)', element_text)
if doi_match and not metadata.doi:
metadata.doi = doi_match.group(2)
# 尝试匹配日期
date_match = re.search(r'(published|received|accepted|submitted):\s*(\d{1,2}\s+[a-zA-Z]+\s+\d{4}|\d{4}[-/]\d{1,2}[-/]\d{1,2})', element_text, re.IGNORECASE)
if date_match and not metadata.date:
metadata.date = date_match.group(2)
# 尝试匹配年份
year_match = re.search(r'\b(19|20)\d{2}\b', element_text)
if year_match and not metadata.year:
metadata.year = year_match.group(0)
# 尝试匹配期刊/会议名称
journal_match = re.search(r'(journal|conference):\s*([^,;.]+)', element_text, re.IGNORECASE)
if journal_match:
if "journal" in journal_match.group(1).lower() and not metadata.journal:
metadata.journal = journal_match.group(2).strip()
elif not metadata.conference:
metadata.conference = journal_match.group(2).strip()
def main():
"""主函数:演示用法"""
# 创建提取器
extractor = PaperMetadataExtractor()
# 使用示例
try:
# 替换为实际的文件路径
sample_file = '/Users/boyin.liu/Documents/示例文档/论文/3.pdf'
if Path(sample_file).exists():
metadata = extractor.extract_metadata(sample_file)
print("提取的元数据:")
print(f"标题: {metadata.title}")
print(f"作者: {', '.join(metadata.authors)}")
print(f"机构: {', '.join(metadata.affiliations)}")
print(f"摘要: {metadata.abstract[:200]}...")
print(f"关键词: {', '.join(metadata.keywords)}")
print(f"DOI: {metadata.doi}")
print(f"日期: {metadata.date}")
print(f"年份: {metadata.year}")
print(f"期刊: {metadata.journal}")
print(f"会议: {metadata.conference}")
else:
print(f"示例文件 {sample_file} 不存在")
print("\n支持的格式:", extractor.get_supported_formats())
except Exception as e:
print(f"错误: {e}")
if __name__ == "__main__":
main()

查看文件

@@ -0,0 +1,86 @@
from pathlib import Path
from crazy_functions.doc_fns.read_fns.unstructured_all.paper_structure_extractor import PaperStructureExtractor
def extract_and_save_as_markdown(paper_path, output_path=None):
"""
提取论文结构并保存为Markdown格式
参数:
paper_path: 论文文件路径
output_path: 输出的Markdown文件路径,如果不指定,将使用与输入相同的文件名但扩展名为.md
返回:
保存的Markdown文件路径
"""
# 创建提取器
extractor = PaperStructureExtractor()
# 解析文件路径
paper_path = Path(paper_path)
# 如果未指定输出路径,使用相同文件名但扩展名为.md
if output_path is None:
output_path = paper_path.with_suffix('.md')
else:
output_path = Path(output_path)
# 确保输出目录存在
output_path.parent.mkdir(parents=True, exist_ok=True)
print(f"正在处理论文: {paper_path}")
try:
# 提取论文结构
paper = extractor.extract_paper_structure(paper_path)
# 生成Markdown内容
markdown_content = extractor.generate_markdown(paper)
# 保存到文件
with open(output_path, 'w', encoding='utf-8') as f:
f.write(markdown_content)
print(f"已成功保存Markdown文件: {output_path}")
# 打印摘要信息
print("\n论文摘要信息:")
print(f"标题: {paper.metadata.title}")
print(f"作者: {', '.join(paper.metadata.authors)}")
print(f"关键词: {', '.join(paper.keywords)}")
print(f"章节数: {len(paper.sections)}")
print(f"图表数: {len(paper.figures)}")
print(f"表格数: {len(paper.tables)}")
print(f"公式数: {len(paper.formulas)}")
print(f"参考文献数: {len(paper.references)}")
return output_path
except Exception as e:
print(f"处理论文时出错: {e}")
import traceback
traceback.print_exc()
return None
# 使用示例
if __name__ == "__main__":
# 替换为实际的论文文件路径
sample_paper = "crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf"
# 可以指定输出路径,也可以使用默认路径
# output_file = "/path/to/output/paper_structure.md"
# extract_and_save_as_markdown(sample_paper, output_file)
# 使用默认输出路径(与输入文件同名但扩展名为.md
extract_and_save_as_markdown(sample_paper)
# # 批量处理多个论文的示例
# paper_dir = Path("/path/to/papers/folder")
# output_dir = Path("/path/to/output/folder")
#
# # 确保输出目录存在
# output_dir.mkdir(parents=True, exist_ok=True)
#
# # 处理目录中的所有PDF文件
# for paper_file in paper_dir.glob("*.pdf"):
# output_file = output_dir / f"{paper_file.stem}.md"
# extract_and_save_as_markdown(paper_file, output_file)

查看文件

@@ -0,0 +1,275 @@
from __future__ import annotations
from pathlib import Path
from typing import Optional, Set, Dict, Union, List
from dataclasses import dataclass, field
import logging
import os
from unstructured.partition.auto import partition
from unstructured.documents.elements import (
Text, Title, NarrativeText, ListItem, Table,
Footer, Header, PageBreak, Image, Address
)
@dataclass
class TextExtractorConfig:
"""通用文档提取器配置类
Attributes:
extract_headers_footers: 是否提取页眉页脚
extract_tables: 是否提取表格内容
extract_lists: 是否提取列表内容
extract_titles: 是否提取标题
paragraph_separator: 段落之间的分隔符
text_cleanup: 文本清理选项字典
"""
extract_headers_footers: bool = False
extract_tables: bool = True
extract_lists: bool = True
extract_titles: bool = True
paragraph_separator: str = '\n\n'
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
'remove_extra_spaces': True,
'normalize_whitespace': True,
'remove_special_chars': False,
'lowercase': False
})
class UnstructuredTextExtractor:
"""通用文档文本内容提取器
使用 unstructured 库支持多种文档格式的文本提取,提供统一的接口和配置选项。
"""
SUPPORTED_EXTENSIONS: Set[str] = {
# 文档格式
'.pdf', '.docx', '.doc', '.txt',
# 演示文稿
'.ppt', '.pptx',
# 电子表格
'.xlsx', '.xls', '.csv',
# 图片
'.png', '.jpg', '.jpeg', '.tiff',
# 邮件
'.eml', '.msg', '.p7s',
# Markdown
".md",
# Org Mode
".org",
# Open Office
".odt",
# reStructured Text
".rst",
# Rich Text
".rtf",
# TSV
".tsv",
# EPUB
'.epub',
# 其他格式
'.html', '.xml', '.json',
}
def __init__(self, config: Optional[TextExtractorConfig] = None):
"""初始化提取器
Args:
config: 提取器配置对象,如果为None则使用默认配置
"""
self.config = config or TextExtractorConfig()
self._setup_logging()
def _setup_logging(self) -> None:
"""配置日志记录器"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger(__name__)
# 添加文件处理器
fh = logging.FileHandler('text_extractor.log')
fh.setLevel(logging.ERROR)
self.logger.addHandler(fh)
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
"""验证文件
Args:
file_path: 文件路径
max_size_mb: 允许的最大文件大小(MB)
Returns:
Path: 验证后的Path对象
Raises:
ValueError: 文件不存在、格式不支持或大小超限
PermissionError: 没有读取权限
"""
path = Path(file_path).resolve()
if not path.exists():
raise ValueError(f"File not found: {path}")
if not path.is_file():
raise ValueError(f"Not a file: {path}")
if not os.access(path, os.R_OK):
raise PermissionError(f"No read permission: {path}")
file_size_mb = path.stat().st_size / (1024 * 1024)
if file_size_mb > max_size_mb:
raise ValueError(
f"File size ({file_size_mb:.1f}MB) exceeds limit of {max_size_mb}MB"
)
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
raise ValueError(
f"Unsupported format: {path.suffix}. "
f"Supported: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
)
return path
def _cleanup_text(self, text: str) -> str:
"""清理文本
Args:
text: 原始文本
Returns:
str: 清理后的文本
"""
if self.config.text_cleanup['remove_extra_spaces']:
text = ' '.join(text.split())
if self.config.text_cleanup['normalize_whitespace']:
text = text.replace('\t', ' ').replace('\r', '\n')
if self.config.text_cleanup['lowercase']:
text = text.lower()
return text.strip()
def _should_extract_element(self, element) -> bool:
"""判断是否应该提取某个元素
Args:
element: 文档元素
Returns:
bool: 是否应该提取
"""
if isinstance(element, (Text, NarrativeText)):
return True
if isinstance(element, Title) and self.config.extract_titles:
return True
if isinstance(element, ListItem) and self.config.extract_lists:
return True
if isinstance(element, Table) and self.config.extract_tables:
return True
if isinstance(element, (Header, Footer)) and self.config.extract_headers_footers:
return True
return False
@staticmethod
def get_supported_formats() -> List[str]:
"""获取支持的文件格式列表"""
return sorted(UnstructuredTextExtractor.SUPPORTED_EXTENSIONS)
def extract_text(
self,
file_path: Union[str, Path],
strategy: str = "fast"
) -> str:
"""提取文本
Args:
file_path: 文件路径
strategy: 提取策略 ("fast""accurate")
Returns:
str: 提取的文本内容
Raises:
Exception: 提取过程中的错误
"""
try:
path = self._validate_file(file_path)
self.logger.info(f"Processing: {path}")
# 修改这里:添加 nlp=False 参数来禁用 NLTK
elements = partition(
str(path),
strategy=strategy,
include_metadata=True,
nlp=True,
)
# 其余代码保持不变
text_parts = []
for element in elements:
if self._should_extract_element(element):
text = str(element)
cleaned_text = self._cleanup_text(text)
if cleaned_text:
if isinstance(element, (Header, Footer)):
prefix = "[Header] " if isinstance(element, Header) else "[Footer] "
text_parts.append(f"{prefix}{cleaned_text}")
else:
text_parts.append(cleaned_text)
return self.config.paragraph_separator.join(text_parts)
except Exception as e:
self.logger.error(f"Extraction failed: {e}")
raise
def main():
"""主函数:演示用法"""
# 配置
config = TextExtractorConfig(
extract_headers_footers=True,
extract_tables=True,
extract_lists=True,
extract_titles=True,
text_cleanup={
'remove_extra_spaces': True,
'normalize_whitespace': True,
'remove_special_chars': False,
'lowercase': False
}
)
# 创建提取器
extractor = UnstructuredTextExtractor(config)
# 使用示例
try:
# 替换为实际的文件路径
sample_file = './crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf'
if Path(sample_file).exists() or True:
text = extractor.extract_text(sample_file)
print("提取的文本:")
print(text)
else:
print(f"示例文件 {sample_file} 不存在")
print("\n支持的格式:", extractor.get_supported_formats())
except Exception as e:
print(f"错误: {e}")
if __name__ == "__main__":
main()

查看文件

@@ -0,0 +1,219 @@
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, Optional, Union
from urllib.parse import urlparse
import logging
import trafilatura
import requests
from pathlib import Path
@dataclass
class WebExtractorConfig:
"""网页内容提取器配置类
Attributes:
extract_comments: 是否提取评论
extract_tables: 是否提取表格
extract_links: 是否保留链接信息
paragraph_separator: 段落分隔符
timeout: 网络请求超时时间(秒)
max_retries: 最大重试次数
user_agent: 自定义User-Agent
text_cleanup: 文本清理选项
"""
extract_comments: bool = False
extract_tables: bool = True
extract_links: bool = False
paragraph_separator: str = '\n\n'
timeout: int = 10
max_retries: int = 3
user_agent: str = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
'remove_extra_spaces': True,
'normalize_whitespace': True,
'remove_special_chars': False,
'lowercase': False
})
class WebTextExtractor:
"""网页文本内容提取器
使用trafilatura库提取网页中的主要文本内容,去除广告、导航等无关内容。
"""
def __init__(self, config: Optional[WebExtractorConfig] = None):
"""初始化提取器
Args:
config: 提取器配置对象,如果为None则使用默认配置
"""
self.config = config or WebExtractorConfig()
self._setup_logging()
def _setup_logging(self) -> None:
"""配置日志记录器"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger(__name__)
# 添加文件处理器
fh = logging.FileHandler('web_extractor.log')
fh.setLevel(logging.ERROR)
self.logger.addHandler(fh)
def _validate_url(self, url: str) -> bool:
"""验证URL格式是否有效
Args:
url: 网页URL
Returns:
bool: URL是否有效
"""
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
return False
def _download_webpage(self, url: str) -> Optional[str]:
"""下载网页内容
Args:
url: 网页URL
Returns:
Optional[str]: 网页HTML内容,失败返回None
Raises:
Exception: 下载失败时抛出异常
"""
headers = {'User-Agent': self.config.user_agent}
for attempt in range(self.config.max_retries):
try:
response = requests.get(
url,
headers=headers,
timeout=self.config.timeout
)
response.raise_for_status()
return response.text
except requests.RequestException as e:
self.logger.warning(f"Attempt {attempt + 1} failed: {e}")
if attempt == self.config.max_retries - 1:
raise Exception(f"Failed to download webpage after {self.config.max_retries} attempts: {e}")
return None
def _cleanup_text(self, text: str) -> str:
"""清理文本
Args:
text: 原始文本
Returns:
str: 清理后的文本
"""
if not text:
return ""
if self.config.text_cleanup['remove_extra_spaces']:
text = ' '.join(text.split())
if self.config.text_cleanup['normalize_whitespace']:
text = text.replace('\t', ' ').replace('\r', '\n')
if self.config.text_cleanup['lowercase']:
text = text.lower()
return text.strip()
def extract_text(self, url: str) -> str:
"""提取网页文本内容
Args:
url: 网页URL
Returns:
str: 提取的文本内容
Raises:
ValueError: URL无效时抛出
Exception: 提取失败时抛出
"""
try:
if not self._validate_url(url):
raise ValueError(f"Invalid URL: {url}")
self.logger.info(f"Processing URL: {url}")
# 下载网页
html_content = self._download_webpage(url)
if not html_content:
raise Exception("Failed to download webpage")
# 配置trafilatura提取选项
extract_config = {
'include_comments': self.config.extract_comments,
'include_tables': self.config.extract_tables,
'include_links': self.config.extract_links,
'no_fallback': False, # 允许使用后备提取器
}
# 提取文本
extracted_text = trafilatura.extract(
html_content,
**extract_config
)
if not extracted_text:
raise Exception("No content could be extracted")
# 清理文本
cleaned_text = self._cleanup_text(extracted_text)
return cleaned_text
except Exception as e:
self.logger.error(f"Extraction failed: {e}")
raise
def main():
"""主函数:演示用法"""
# 配置
config = WebExtractorConfig(
extract_comments=False,
extract_tables=True,
extract_links=False,
timeout=10,
text_cleanup={
'remove_extra_spaces': True,
'normalize_whitespace': True,
'remove_special_chars': False,
'lowercase': False
}
)
# 创建提取器
extractor = WebTextExtractor(config)
# 使用示例
try:
# 替换为实际的URL
sample_url = 'https://arxiv.org/abs/2412.00036'
text = extractor.extract_text(sample_url)
print("提取的文本:")
print(text)
except Exception as e:
print(f"错误: {e}")
if __name__ == "__main__":
main()