|
|
import streamlit as st |
|
|
import os |
|
|
import re |
|
|
import time |
|
|
|
|
|
|
|
|
from llama_index.core import VectorStoreIndex, load_index_from_storage, StorageContext |
|
|
from llama_index.core.node_parser import SentenceWindowNodeParser |
|
|
from llama_index.core.postprocessor import MetadataReplacementPostProcessor |
|
|
from llama_index.llms.ollama import Ollama |
|
|
from llama_index.embeddings.ollama import OllamaEmbedding |
|
|
from llama_index.core import PromptTemplate |
|
|
from llama_index.core.indices.postprocessor import SentenceTransformerRerank |
|
|
from sentence_transformers import SentenceTransformer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import pipeline |
|
|
|
|
|
llm = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B") |
|
|
|
|
|
|
|
|
ollama_embedding = SentenceTransformer("nomic-ai/nomic-embed-text-v1.5", trust_remote_code=True) |
|
|
|
|
|
|
|
|
def custom_sentence_splitter(text): |
|
|
return re.split(r"(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s+", text) |
|
|
|
|
|
|
|
|
node_parser = SentenceWindowNodeParser.from_defaults( |
|
|
sentence_splitter=custom_sentence_splitter, |
|
|
window_size=2, |
|
|
window_metadata_key="window", |
|
|
original_text_metadata_key="original_sentence", |
|
|
) |
|
|
|
|
|
|
|
|
index_dir = "./sentence_index" |
|
|
|
|
|
|
|
|
sentence_index = load_index_from_storage( |
|
|
StorageContext.from_defaults(persist_dir=index_dir), |
|
|
embed_model=ollama_embedding |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
rerank = SentenceTransformerRerank( |
|
|
top_n=2, model="BAAI/bge-reranker-base" |
|
|
) |
|
|
|
|
|
|
|
|
query_engine = sentence_index.as_query_engine( |
|
|
llm=llm, |
|
|
similarity_top_k=3, |
|
|
node_postprocessors=[ |
|
|
rerank, |
|
|
MetadataReplacementPostProcessor(target_metadata_key="window")] |
|
|
) |
|
|
|
|
|
|
|
|
qa_prompt_tmpl_str = ( |
|
|
"You are a helpful and professional customer service chatbot. Your job is to provide accurate, " |
|
|
"concise, and friendly responses to customer inquiries based on the provided context." |
|
|
"find the most relevant parts on the Context Information and answer as professional customer service.\n" |
|
|
"---------------------\n" |
|
|
"Context Information:\n" |
|
|
"{context_str}\n" |
|
|
"---------------------\n" |
|
|
"Please ensure your response is relevant, clear, and aligns with company policies.\n" |
|
|
"If the context does not provide sufficient information, politely ask for clarification.\n" |
|
|
"only give the answer for the question do not use words like based on the provided context.\n" |
|
|
"Use a professional yet friendly tone.\n\n" |
|
|
"Customer Query: {query_str}\n" |
|
|
"Response: " |
|
|
) |
|
|
|
|
|
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str) |
|
|
|
|
|
query_engine.update_prompts( |
|
|
{"response_synthesizer:text_qa_template": qa_prompt_tmpl} |
|
|
) |
|
|
|
|
|
with st.sidebar: |
|
|
"π Features:" |
|
|
"β
Instant responses to customer queries" |
|
|
"β
24/7 availability for support" |
|
|
"β
User-friendly and interactive experience" |
|
|
|
|
|
"Need help? Chat with Tonic.lk Support Bot now and get instant assistance! π" |
|
|
|
|
|
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" |
|
|
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)" |
|
|
|
|
|
def stream_response(response): |
|
|
for word in response.response.split(" "): |
|
|
yield word + " " |
|
|
time.sleep(0.02) |
|
|
|
|
|
|
|
|
if "chat_history" not in st.session_state: |
|
|
st.session_state.chat_history = [] |
|
|
|
|
|
|
|
|
st.title("π€π¬Tonic.lk Customer Support(RAG)") |
|
|
|
|
|
|
|
|
for chat in st.session_state.chat_history: |
|
|
with st.chat_message("user" if chat["role"] == "user" else "assistant"): |
|
|
st.markdown(chat["message"]) |
|
|
|
|
|
|
|
|
if user_input := st.chat_input("Type your message..."): |
|
|
|
|
|
st.session_state.chat_history.append({"role": "user", "message": user_input}) |
|
|
with st.chat_message("user"): |
|
|
st.markdown(user_input) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
with st.spinner("Thinking..."): |
|
|
response_container = st.empty() |
|
|
response_text = "" |
|
|
response_obj = query_engine.query(user_input) |
|
|
response_container.write_stream(stream_response(response_obj)) |
|
|
|
|
|
|
|
|
st.session_state.chat_history.append({"role": "assistant", "message": response_obj.response}) |
|
|
|