python打包 pyinstaller PIL

带logo 无黑框打包

pyinstaller -F -w -i logo.ico x.py

无logo 无黑框

pyinstaller -F -w x.py

普通打包 一个 单文件exe 有黑框

pyinstaller -F x.py

普通打包 非单文件exe有黑框

pyinstaller x.py

PIL 打包坑点 需要指定 字体

#否则会失败

options = {
    'module_width': 0.24,
    'module_height':6.0,
    'font_size': 8,
    'text_distance': 3,
    'quiet_zone':4,
    'font_path': r'C:\Windows\Fonts\arial.ttf',  # Arial字体(纯数字/英文)
}
发表在 None | 留下评论

Tkinter 输入回车返回

import tkinter as tk
from tkinter import messagebox

def submit_content(event=None):
    """
    提交输入框内容的核心函数
    event 参数:绑定键盘事件时会自动传入,需保留(设为默认None兼容按钮点击)
    """
    # 获取输入框内容并去除首尾空格
    content = entry.get().strip()
    
    # 空值校验
    if not content:
        messagebox.warning("提示", "输入框不能为空!")
        # 清空输入框并让输入框重新获得焦点
        entry.delete(0, tk.END)
        entry.focus()
        return
    
    # 处理提交逻辑(这里仅做演示,你可以替换为自己的业务代码)
    messagebox.showinfo("提交成功", f"你提交的内容是:{content}")
    
    # 提交后清空输入框并保持焦点,方便继续输入
    entry.delete(0, tk.END)
    entry.focus()

# 创建主窗口
root = tk.Tk()
root.title("回车提交输入框内容")
root.geometry("400x200")

# 创建输入框
entry = tk.Entry(
    root,
    font=("Arial", 14),
    width=30
)
entry.pack(pady=30)

# 核心:绑定回车键事件(<Return> 对应回车键)
entry.bind("<Return>", submit_content)

# 可选:添加提交按钮(兼容鼠标点击场景)
submit_btn = tk.Button(
    root,
    text="提交",
    font=("Arial", 12),
    command=submit_content  # 直接调用函数,无需传参(因为event设为默认None)
)
submit_btn.pack()

# 让输入框默认获得焦点,打开窗口即可直接输入
entry.focus()

# 运行主循环
root.mainloop()
发表在 None | 留下评论

打印机 python使用 Pillow pywin32

import barcode
from barcode.writer import ImageWriter
from PIL import Image,ImageWin
import win32print
import win32ui

options = {
    'module_width': 0.24,
    'module_height':6.0,
    'font_size': 8,
    'text_distance': 3,
    'quiet_zone':4,
}


def print_img(image_path):
    mr_id=0
    has_750=False
    done=False
    printers = [printer[2] for printer in win32print.EnumPrinters(2)]
    for i, printer in enumerate(printers):
        # print(f"{i+1}: {printer}")
        if '750' in printer:
            mr_id=i
            has_750=True
            printer_name=printers[i]
    if has_750:
        print("已经提交给打印机",printer_name,"打印条码")
        image = Image.open(image_path)
        hDC = win32ui.CreateDC()
        hDC.CreatePrinterDC(printer_name)
        hDC.StartDoc(image_path)
        hDC.StartPage()
        dib = ImageWin.Dib(image)
        dib.draw(hDC.GetHandleOutput(), (0, 0, image.width, image.height))
        hDC.EndPage()
        hDC.EndDoc()
        del hDC
        done=True
    return done

def makeCode(code):
    Code128 = barcode.get_barcode_class('code128')
    code128 = Code128(code, writer=ImageWriter())
    filename = code128.save('print_swap',options=options)
    image_path=filename
    print_img(image_path)


code='HTA101CWGL0120260001'
makeCode(code)


发表在 None | 留下评论

llamaindex 向量化VectorStoreIndex

1.使用VectorStoreIndex简单实用:VectorStoreIndex.from_documents(documents)读取文档。Documents

2.使用读取切分好的节点(常用)VectorStoreIndex(nodes) 切分好的节点。

不常用:查询query_engine=index.as_query_engine(similarity_top=3) 查询 query_engine.query("xxxxxx?") 即可


chroma设置

首先需要导入

import chromadb
from llama_index.vector_stores.chroma import ChromaVectorStore


from llama_index.core import SimpleDirectoryReader,VectorStoreIndex,Settings,StorageContext,load_index_from_storage

创建chroma存储目录以及集合名词

# chroma初始化 存储目录
chroma_client=chromadb.PersistentClient(path="./storage/chroma")
#chroma的向量集合
chroma_context=chroma_client.get_or_create_collection("My_collection")

使用llama_index加入刚刚创建的db 中的合集转为chromavectorstore对象并最终转为Storage对象用于处理

# 使用llama_index 来使用chroma库 导入建刚刚创建的chroma集合 导入向量库
vector_store=ChromaVectorStore(chroma_collection=chroma_context)
#整合向量存储的方法 用于下面的保存操作 index=VectorStoreIndex(new_nodes,storage_context=storage_context)
storage_context=StorageContext.from_defaults(vector_store=vector_store)

向量化 及关联使用storage对象,最终使用persist方法存储 (persist_dir 其中还存储向量与节点关系数据【Chroma 存向量,persist 存关联】)

index=VectorStoreIndex(new_nodes,storage_context=storage_context)
# 该处负责存储,需要执行后才能存储 且制定了目录。必须使用persist_dir 其中还存储向量与节点关系数据
index.storage_context.persist(persist_dir="./storage")

载入chroma的合集

#并将对象转唯llamaindex到ChromeVectorStore向量对象

loaded_client=chromadb.PersistentClient(path='./storage/chroma')
loaded_collection=loaded_client.get_collection("My_collection")

#载入chroma文件中的collection到向量对象

loaded_vector_store=ChromaVectorStore(chroma_collection=loaded_collection)

# 使用StorageContext 载入转为Storage对象用于载入

loaded_storage_context=StorageContext.from_defaults(

    persist_dir="./storage",

    vector_store=loaded_vector_store

)

#将storage对象转为可用的index,后续用于查询

index=load_index_from_storage(loaded_storage_context)

query_engine=index.as_query_engine(similarity_top_k=3)

ee=query_engine.query("where is Link")

print(ee)

整体DEMO代码

from llama_index.core import SimpleDirectoryReader,VectorStoreIndex,Settings,StorageContext,load_index_from_storage
from llama_index.core.node_parser import SentenceSplitter
from llama_index.retrievers import bm25
# from llama_index.core.retrievers import BM25Retriever
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.ollama import OllamaEmbedding
import jieba
import time
import joblib
import chromadb
from llama_index.vector_stores.chroma import ChromaVectorStore


OLLAMA_URL="http://127.0.0.1:11434"
EMBED_MODEL="qwen3-embedding:0.6b"
LLM_MODEL="qwen3:0.6b"

Settings.embed_model=OllamaEmbedding(
    model_name=EMBED_MODEL,base_url=OLLAMA_URL
)
Settings.llm=Ollama(
    model=LLM_MODEL,base_url=OLLAMA_URL
)

# chroma初始化 存储目录
chroma_client=chromadb.PersistentClient(path="./storage/chroma")
#chroma的向量集合
chroma_context=chroma_client.get_or_create_collection("My_collection")

# 使用llama_index 来使用chroma库 导入建刚刚创建的chroma集合 导入向量库
vector_store=ChromaVectorStore(chroma_collection=chroma_context)
#整合向量存储的方法 用于下面的保存操作 index=VectorStoreIndex(new_nodes,storage_context=storage_context)
storage_context=StorageContext.from_defaults(vector_store=vector_store)

documents=SimpleDirectoryReader("data").load_data()

print(documents)

splitter=SentenceSplitter(chunk_size=512,chunk_overlap=30)

new_nodesx=splitter.get_nodes_from_documents(documents)

print(new_nodesx)

documents=SimpleDirectoryReader(input_files=["add.docx",]).load_data()

print(documents)

documents[0].metadata['MAC']='adgfa-192ga'
documents[0].metadata['document_id']=documents[0].id_

print(documents)
new_nodes=splitter.get_nodes_from_documents(documents)
print(new_nodes)

class ChineseBM25Retriever(bm25.BM25Retriever):
    def _tokenize(self,text):
        return [w for w in jieba.cut(text) if w.strip()]

c=time.time()
retriever=ChineseBM25Retriever(nodes=new_nodes,similarity_top_k=10)
print(time.time()-c,retriever)

# c=time.time()
# retriever=ChineseBM25Retriever(nodes=new_nodesx,similarity_top_k=10)
# print(time.time()-c,retriever)
retriever.persist("./bm25_retriever")


retrieved_nodes = retriever.retrieve(
    "What is link?"
)
for node in retrieved_nodes:
    print(node)


del retriever
retriever = bm25.BM25Retriever.from_persist_dir("./bm25_retriever")

print("Reload BM25 from disk")
retrieved_nodes = retriever.retrieve(
    "What is link?"
)
for node in retrieved_nodes:
    print(node)


index=VectorStoreIndex(new_nodes,storage_context=storage_context)
# 该处负责存储,需要执行后才能存储 且制定了目录。必须
index.storage_context.persist(persist_dir="./storage")

query_engine=index.as_query_engine(similarity_top_k=3)
ee=query_engine.query("where is Link")
print(ee)

print('Load')

#载入chroma文件中的collection到向量对象
loaded_client=chromadb.PersistentClient(path='./storage/chroma')
loaded_collection=loaded_client.get_collection("My_collection")
#并将对象转唯llamaindex到ChromeVectorStore向量对象
loaded_vector_store=ChromaVectorStore(chroma_collection=loaded_collection)

# 使用StoragContext 载入
loaded_storage_context=StorageContext.from_defaults(
    persist_dir="./storage",
    vector_store=loaded_vector_store
)

#转为可用的index
index=load_index_from_storage(loaded_storage_context)
query_engine=index.as_query_engine(similarity_top_k=3)
ee=query_engine.query("where is Link")
print(ee)
发表在 None | 留下评论

Llamaindex BM25 实验

还是得实践和查看官方文档,https://developers.llamaindex.ai/python/examples/retrievers/bm25_retriever/ 之前学习被AI坑了好久全是AI幻觉,下面的是2026-01-07 可以运行代码。(Name: llama-index Version: 0.14.10;Name: llama-index-retrievers-bm25 Version: 0.6.5)
主要坑点BM25的存储 retriever.persist(“./bm25_retriever”)和载入retriever = bm25.BM25Retriever.from_persist_dir(“./bm25_retriever”)
AI老说这个是已经失效了,让我用pickle,但是BM25 实现算法又不能全部pickle下来,后来想到直接用node去直接生成BM25,但是BM25应对中文需要jieba分词,分词效率每秒100万字,且需要启动时间,所以如果500片2000千字的文章转换成的节点至少要2秒左右才能生成BM25 的retriever对象!AI害人不浅,全是幻觉。

from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.retrievers import bm25
# from llama_index.core.retrievers import BM25Retriever
import jieba
import time
import joblib
documents=SimpleDirectoryReader("data").load_data()

print(documents)

splitter=SentenceSplitter(chunk_size=512,chunk_overlap=30)

new_nodesx=splitter.get_nodes_from_documents(documents)

print(new_nodesx)

documents=SimpleDirectoryReader(input_files=["add.docx",]).load_data()

print(documents)

documents[0].metadata['MAC']='adgfa-192ga'
documents[0].metadata['document_id']=documents[0].id_

print(documents)
new_nodes=splitter.get_nodes_from_documents(documents)
print(new_nodes)

class ChineseBM25Retriever(bm25.BM25Retriever):
    def _tokenize(self,text):
        return [w for w in jieba.cut(text) if w.strip()]

c=time.time()
retriever=ChineseBM25Retriever(nodes=new_nodes,similarity_top_k=10)
print(time.time()-c,retriever)

# c=time.time()
# retriever=ChineseBM25Retriever(nodes=new_nodesx,similarity_top_k=10)
# print(time.time()-c,retriever)
retriever.persist("./bm25_retriever")


retrieved_nodes = retriever.retrieve(
    "What is link?"
)
for node in retrieved_nodes:
    print(node)


del retriever
retriever = bm25.BM25Retriever.from_persist_dir("./bm25_retriever")

print("Reload BM25 from disk")
retrieved_nodes = retriever.retrieve(
    "What is link?"
)
for node in retrieved_nodes:
    print(node)

执行结果:

(base) EgoistdeMacBook-Pro:rag maysrp$ python doc.py
/opt/anaconda3/lib/python3.13/site-packages/jieba/_compat.py:18: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.
  import pkg_resources
2026-01-07 22:47:06,574 - INFO - NumExpr defaulting to 12 threads.
[Document(id_='de9ebe13-a86b-4331-a6aa-8046de5263d1', embedding=None, metadata={'file_name': 'Suzhou.docx', 'file_path': '/Users/maysrp/rag/data/Suzhou.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10159, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={}, metadata_template='{key}: {value}', metadata_separator='\n', text_resource=MediaResource(embeddings=None, data=None, text='Suzhou is Link’s best city in her live.', path=None, url=None, mimetype=None), image_resource=None, audio_resource=None, video_resource=None, text_template='{metadata_str}\n\n{content}'), Document(id_='92e769b9-e293-48e2-a5ac-743a753b167a', embedding=None, metadata={'file_name': 'game.docx', 'file_path': '/Users/maysrp/rag/data/game.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10138, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={}, metadata_template='{key}: {value}', metadata_separator='\n', text_resource=MediaResource(embeddings=None, data=None, text='Link love play Switch game!', path=None, url=None, mimetype=None), image_resource=None, audio_resource=None, video_resource=None, text_template='{metadata_str}\n\n{content}'), Document(id_='7cc63b7a-19fb-4d27-b58d-266101e2c49f', embedding=None, metadata={'file_name': 'sutdent.docx', 'file_path': '/Users/maysrp/rag/data/sutdent.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10142, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={}, metadata_template='{key}: {value}', metadata_separator='\n', text_resource=MediaResource(embeddings=None, data=None, text='Link is high school sutdent!', path=None, url=None, mimetype=None), image_resource=None, audio_resource=None, video_resource=None, text_template='{metadata_str}\n\n{content}'), Document(id_='9d3ee148-060d-4752-9a31-504714eb43ae', embedding=None, metadata={'file_path': '/Users/maysrp/rag/data/test.txt', 'file_name': 'test.txt', 'file_type': 'text/plain', 'file_size': 25, 'creation_date': '2025-12-27', 'last_modified_date': '2025-10-08'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={}, metadata_template='{key}: {value}', metadata_separator='\n', text_resource=MediaResource(embeddings=None, data=None, text='i am a look!\nThanks alot\n', path=None, url=None, mimetype=None), image_resource=None, audio_resource=None, video_resource=None, text_template='{metadata_str}\n\n{content}')]
[TextNode(id_='98437de2-9ad7-451d-aa2f-c72c778866f7', embedding=None, metadata={'file_name': 'Suzhou.docx', 'file_path': '/Users/maysrp/rag/data/Suzhou.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10159, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='de9ebe13-a86b-4331-a6aa-8046de5263d1', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'file_name': 'Suzhou.docx', 'file_path': '/Users/maysrp/rag/data/Suzhou.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10159, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, hash='22704cae161d866fe704741b55317fb4f2baf40fd456bfee0c030bd4a9f37e81')}, metadata_template='{key}: {value}', metadata_separator='\n', text='Suzhou is Link’s best city in her live.', mimetype='text/plain', start_char_idx=0, end_char_idx=39, metadata_seperator='\n', text_template='{metadata_str}\n\n{content}'), TextNode(id_='aebe5d8c-e8e0-4e91-8641-2287c045a062', embedding=None, metadata={'file_name': 'game.docx', 'file_path': '/Users/maysrp/rag/data/game.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10138, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='92e769b9-e293-48e2-a5ac-743a753b167a', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'file_name': 'game.docx', 'file_path': '/Users/maysrp/rag/data/game.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10138, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, hash='a4ef473a6499b1e9a6d082cc6e859661785a05b99aeaf00c34acfb969565e11f')}, metadata_template='{key}: {value}', metadata_separator='\n', text='Link love play Switch game!', mimetype='text/plain', start_char_idx=0, end_char_idx=27, metadata_seperator='\n', text_template='{metadata_str}\n\n{content}'), TextNode(id_='dcf48ae6-1d63-4ccf-b21f-ba12852a59df', embedding=None, metadata={'file_name': 'sutdent.docx', 'file_path': '/Users/maysrp/rag/data/sutdent.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10142, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='7cc63b7a-19fb-4d27-b58d-266101e2c49f', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'file_name': 'sutdent.docx', 'file_path': '/Users/maysrp/rag/data/sutdent.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10142, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, hash='dce4f61a4f5f77866c8f3baa92c20997bad74b8ea4b00b6bdbcdca6b5fd240ce')}, metadata_template='{key}: {value}', metadata_separator='\n', text='Link is high school sutdent!', mimetype='text/plain', start_char_idx=0, end_char_idx=28, metadata_seperator='\n', text_template='{metadata_str}\n\n{content}'), TextNode(id_='affc51cf-137b-4306-a047-be08aa95ac62', embedding=None, metadata={'file_path': '/Users/maysrp/rag/data/test.txt', 'file_name': 'test.txt', 'file_type': 'text/plain', 'file_size': 25, 'creation_date': '2025-12-27', 'last_modified_date': '2025-10-08'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='9d3ee148-060d-4752-9a31-504714eb43ae', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'file_path': '/Users/maysrp/rag/data/test.txt', 'file_name': 'test.txt', 'file_type': 'text/plain', 'file_size': 25, 'creation_date': '2025-12-27', 'last_modified_date': '2025-10-08'}, hash='34192438b5e50e0927a1f41c8e40814c5c6e2078c7ab4ec91c732c9938944f50')}, metadata_template='{key}: {value}', metadata_separator='\n', text='i am a look!\nThanks alot', mimetype='text/plain', start_char_idx=0, end_char_idx=24, metadata_seperator='\n', text_template='{metadata_str}\n\n{content}')]
[Document(id_='4221065b-b64d-4c8b-8807-94be61940f8e', embedding=None, metadata={'file_name': 'add.docx', 'file_path': 'add.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10121, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={}, metadata_template='{key}: {value}', metadata_separator='\n', text_resource=MediaResource(embeddings=None, data=None, text='Link like  NIkon camera .he has a camre that is name is z8', path=None, url=None, mimetype=None), image_resource=None, audio_resource=None, video_resource=None, text_template='{metadata_str}\n\n{content}')]
[Document(id_='4221065b-b64d-4c8b-8807-94be61940f8e', embedding=None, metadata={'file_name': 'add.docx', 'file_path': 'add.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10121, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03', 'MAC': 'adgfa-192ga', 'document_id': '4221065b-b64d-4c8b-8807-94be61940f8e'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={}, metadata_template='{key}: {value}', metadata_separator='\n', text_resource=MediaResource(embeddings=None, data=None, text='Link like  NIkon camera .he has a camre that is name is z8', path=None, url=None, mimetype=None), image_resource=None, audio_resource=None, video_resource=None, text_template='{metadata_str}\n\n{content}')]
[TextNode(id_='e225ff5c-48db-4e3e-a741-90194180af44', embedding=None, metadata={'file_name': 'add.docx', 'file_path': 'add.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10121, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03', 'MAC': 'adgfa-192ga', 'document_id': '4221065b-b64d-4c8b-8807-94be61940f8e'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='4221065b-b64d-4c8b-8807-94be61940f8e', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'file_name': 'add.docx', 'file_path': 'add.docx', 'file_type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'file_size': 10121, 'creation_date': '2026-01-03', 'last_modified_date': '2026-01-03', 'MAC': 'adgfa-192ga', 'document_id': '4221065b-b64d-4c8b-8807-94be61940f8e'}, hash='d907ec8b73619251ad897fc95f680465711bbec8cda67a3494f0d5e6e0e27686')}, metadata_template='{key}: {value}', metadata_separator='\n', text='Link like  NIkon camera .he has a camre that is name is z8', mimetype='text/plain', start_char_idx=0, end_char_idx=58, metadata_seperator='\n', text_template='{metadata_str}\n\n{content}')]
2026-01-07 22:47:07,035 - DEBUG - Building index from IDs objects
2026-01-07 22:47:07,289 - WARNING - As bm25s.BM25 requires k less than or equal to number of nodes added. Overriding the value of similarity_top_k to number of nodes added.
0.267697811126709 <__main__.ChineseBM25Retriever object at 0x14b823620>
Finding newlines for mmindex: 100%|████████| 2.11k/2.11k [00:00<00:00, 19.1MB/s]
Node ID: e225ff5c-48db-4e3e-a741-90194180af44
Text: Link like  NIkon camera .he has a camre that is name is z8
Score:  0.115

Reload BM25 from disk
Node ID: e225ff5c-48db-4e3e-a741-90194180af44
Text: Link like  NIkon camera .he has a camre that is name is z8
Score:  0.115

发表在 None | 留下评论