安装试验

  • llama-index
  • llama-index-llms-ollama
  • llama-index-embeddings-ollama
  • llama-index-readers-file 可以读取多种文档,默认只能存文本 结合 SimpleDirectoryReader 一起使用解析文档
  • python-docx 解析docx文档
  • pandas openpyxl xlrd 解析xlsx文档
  • chromadb llama-index-vector-stores-chroma
  • llama-index-retrievers-bm25 jieba (pickle为内置模块无需安装) 1万以下文档
documents = SimpleDirectoryReader(
    input_dir="data",
    extensions=[".docx", ".xlsx"],  # 精准筛选格式,避免读取无关文件
    recursive=True  # 可选:是否递归读取子目录内的文件(True=递归,False=仅根目录)
).load_data()

文件解析

pip install llama-index-readers-file 
pip install python-docx pandas  openpyxl  xlrd

核心安装

pip install llama-index
pip install llama-index-embeddings-ollama llama-index-llms-ollama

外置保存

pip install llama-index-retrievers-bm25 jieba pickle
pip install chromadb llama-index-vector-stores-chroma 

读取文件创建关键词BM25 可以增量但是删除修改要重建

from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.retrievers import bm25
import jieba

def chinese_tokenizer(text: str) -> List[str]:
    return list(jieba.cut(text))

# 1. 读文档 切分
documents = SimpleDirectoryReader("data").load_data()
splitter = SentenceSplitter(chunk_size=512, chunk_overlap=30)
new_nodes = splitter.get_nodes_from_documents(documents)


retriever = bm25.BM25Retriever(nodes=new_nodes, similarity_top_k=10)
retriever.tokenizer = chinese_tokenizer

#此处可以省略
# 3. 默认查询
results = retriever.retrieve("author growing up")
print(f"召回 {len(results)} 条")

#新增多个文件切分

input_files = ["data/file1.docx", "data/file2.txt", "data/file3.pdf"]
docs = SimpleDirectoryReader(input_files=input_files).load_data()

# 2. 直接切分(无需区分格式)
splitter = SentenceSplitter(chunk_size=512, chunk_overlap=30)
nodes = splitter.get_nodes_from_documents(docs)  # 一步到位

#重新加入文件
retriever = bm25.BM25Retriever(nodes=new_nodes, similarity_top_k=10)
retriever.tokenizer = chinese_tokenizer

#此处可以省略
# 3. 默认查询
results = retriever.retrieve("author growing up??")
print(f"召回 {len(results)} 条")


#上面第二步开始即可调用
# 1. pickle存储
import pickle

with open("./idx/bm25.pkl", "rb") as f:
    bm25_retriever = pickle.load(f)

# 可热调参数
bm25_retriever.similarity_top_k = 15
##-----------

from llama_index.core import SimpleDirectoryReader

# 1. 读新增文件
new_docs = SimpleDirectoryReader("new_data").load_data()

# 2. 增量加入(官方支持 add_nodes)

# 4. ✅ 必须先切成 Node(官方示例缺失这一步)
splitter = SentenceSplitter(chunk_size=512, chunk_overlap=30)
new_nodes = splitter.get_nodes_from_documents(new_docs)

# 5. ✅ 只能重建


# 3.  都使用PICKLE覆盖
Path("./idx").mkdir(exist_ok=True)
with open("./idx/bm25.pkl", "wb") as f:
    pickle.dump({"nodes": nodes, "top_k": 10, "tokenizer": chinese_tokenizer}, f)

# 查询(官方示例 ⑤ —— retrieve)
results = bm25_retriever.retrieve("作者童年做了什么")
for node in results:
    print(node.text[:200], node.score)


#融合
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core import VectorStoreIndex

# 1. 向量检索器
vector_index = VectorStoreIndex.from_documents(documents)
vector_retriever = vector_index.as_retriever(similarity_top_k=10)

# 2. BM25 检索器(已加载)pickle.load加载
bm25_retriever = pickle.load(open("./idx/bm25.pkl", "rb"))





# 3. 融合
fusion_retriever = QueryFusionRetriever(
    [vector_retriever, bm25_retriever],
    similarity_top_k=10,
)

# 4. 后处理链(同前)
query_engine = RetrieverQueryEngine(
    retriever=fusion_retriever,
    node_postprocessors=[...],
)





中文优化 jieba 要在from_documents中加入tokenizer=chinese_tokenizer

import jieba

def chinese_tokenizer(text: str) -> List[str]:
    return list(jieba.cut(text))

# 1. 读取全部文档(含增量目录)
all_docs = (SimpleDirectoryReader("data").load_data() +
            SimpleDirectoryReader("new_data").load_data())
nodes = SentenceSplitter(chunk_size=512, chunk_overlap=30).get_nodes_from_documents(all_docs)

bm25.tokenizer = chinese_tokenizer
retriever = bm25.BM25Retriever(nodes=nodes, similarity_top_k=10)

生成一个简单能用的:

# -*- coding: utf-8 -*-
# 0.6.0 实测接口:只有 retrieve,没有 add/add_nodes —— 只能“一次性重建”
import pickle, jieba
from pathlib import Path
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.retrievers import bm25

def chinese_tokenizer(text: str) -> list[str]:
    return list(jieba.cut(text))

# 1. 读取全部文档(含增量目录)
all_docs = (SimpleDirectoryReader("data").load_data() +
            SimpleDirectoryReader("new_data").load_data())

# 2. 切节点
nodes = SentenceSplitter(chunk_size=512, chunk_overlap=30).get_nodes_from_documents(all_docs)

# 3. 建 retriever

retriever = bm25.BM25Retriever(nodes=nodes, similarity_top_k=10)
retriever.tokenizer = chinese_tokenizer
#retriever.stemmer = None
# 4. 落盘
Path("./idx").mkdir(exist_ok=True)
with open("./idx/bm25.pkl", "wb") as f:
    pickle.dump({"nodes": nodes, "top_k": 10, "tokenizer": chinese_tokenizer}, f)

# 5. 查询
for n in retriever.retrieve("what is look?"):
    print(n.score, n.text[:200])
  1. 按权限标签/用户把原始文档切成 N 个物理子文件夹(或数据库分区)→
  2. 每个子集分别建 BM25 pickle + 向量索引
  3. 查询时先根据当前用户权限选对应的 pickle & 向量库 →
  4. 用同一套混合逻辑(RRF/加权)召回 →
  5. 最终返回结果。

这样:

  • BM25 召回范围天然受限于子集,不会越权
  • 向量侧仍保留 metadata,可做更细粒度二次过滤展示字段
  • 子库之间完全隔离,加用户/改权限只需新增/重建对应子库,不影响别人。

新的 添加节点 测试通过BM25版本0.6.0

from typing import List
import jieba
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.retrievers.bm25 import BM25Retriever

# ===================== 核心适配:重写BM25Retriever类以支持中文分词 =====================
class ChineseBM25Retriever(BM25Retriever):
    """适配0.6.0版本的中文BM25检索器(重写分词方法)"""
    def _tokenize(self, text: str) -> List[str]:
        """重写内部分词方法,替换为jieba中文分词"""
        # 过滤空字符串,避免影响BM25计算
        return [word for word in jieba.cut(text) if word.strip()]

# ===================== 1. 初始加载文档 + 切分 + 初始化检索器 =====================
# 加载初始目录下的所有文档
documents = SimpleDirectoryReader("data").load_data()
# 统一初始化切分器(仅初始化一次,保证切分规则一致)
splitter = SentenceSplitter(chunk_size=512, chunk_overlap=30)
# 切分初始文档为节点
original_nodes = splitter.get_nodes_from_documents(documents)

# 【关键修改】使用自定义的中文BM25检索器(0.6.0版本无tokenizer参数)
retriever = ChineseBM25Retriever(
    nodes=original_nodes, 
    similarity_top_k=10  # 仅保留支持的参数
)

# 初始查询
first_results = retriever.retrieve("what is look")
print(f"初始查询 - 召回 {len(first_results)} 条")

# ===================== 2. 新增多个文件 + 切分 + 增量追加 =====================
# 加载新增的混合格式文件
input_files = ["new_data/fa.txt", "new_data/fa2.txt"]
new_docs = SimpleDirectoryReader(input_files=input_files).load_data()
# 用同一个切分器切分新增文件
new_nodes = splitter.get_nodes_from_documents(new_docs)

# 合并所有节点后重新初始化检索器(0.6.0版本无nodes属性,只能重新初始化)
all_nodes = original_nodes + new_nodes
retriever = ChineseBM25Retriever(
    nodes=all_nodes,
    similarity_top_k=10
)

# ===================== 3. 新增文件后重新查询 =====================
second_results = retriever.retrieve("what is look?")
print(f"新增文件后查询 - 召回 {len(second_results)} 条")

# 验证节点数
print(f"\n初始节点数:{len(original_nodes)}")
print(f"新增节点数:{len(new_nodes)}")
print(f"当前检索器总节点数:{len(all_nodes)}")
此条目发表在None分类目录。将固定链接加入收藏夹。

发表回复