confluence

https://python.langchain.com/v0.2/docs/integrations/document_loaders/confluence/

https://www.shakudo.io/blog/building-confluence-kb-qanda-app-langchain-chatgpt

%pip install --user -Uq  atlassian-python-api
%pip install --user -Uq  lxml

limit변수: 총 검색되는 문서 수가 아니라 단일 호출에서 검색되는 문서 수를 지정

from dotenv import load_dotenv
load_dotenv()
from langchain_community.document_loaders import ConfluenceLoader
from bs4 import BeautifulSoup
import os

loader = ConfluenceLoader(
    url="https://oomacorp.atlassian.net/wiki",
    username="byungyong.kim@ooma.com" ,
    api_key=os.environ["CONFLUENCE_API_KEY"],
    space_key="~byungyong.kim",
    include_attachments=False,
    limit=10
)
documents = loader.load()
documents
len(documents)

문서를 split 하기

tiktoken을 사용하여 문서를 split

from langchain.text_splitter import RecursiveCharacterTextSplitter

splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=1000,
    chunk_overlap=200,
)

docs = loader.load_and_split(text_splitter=splitter)
docs

vector store 에 저장

%pip install --user -Uq faiss-cpu

from langchain.vectorstores.faiss import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore

cache_dir = LocalFileStore("./cache/")
embeddings = OpenAIEmbeddings()

cached_embeddings = CacheBackedEmbeddings.from_bytes_store(embeddings, cache_dir)

vector_store = FAISS.from_documents(docs, cached_embeddings)
retriver = vector_store.as_retriever()
docs = retriver.invoke("nexus")
docs

Chain (vector store에서 검색하고 그걸 llm으로 보내기)

from langchain.prompts import ChatPromptTemplate

prompt = ChatPromptTemplate.from_messages(
    [
        ("system",
         """
         You are a helpful AI talking to a human, Answer questions using only the following context.
         If you don't know the answer just say you don't know, don't make it up:
         {context}
         """),
        ("human", "{question}"),
    ]
)

from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
    temperature=0.1,
)
from langchain.schema.runnable import RunnablePassthrough

chain = ({
    "context": retriver,
    "question": RunnablePassthrough(),
}
    | prompt | llm
)

질문을 해서 llm에 보내기

chain.invoke("opennebula에서 주의할사항은 무엇인가요?")

Last updated

Was this helpful?