diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx
index b5b51e8..e306b56 100644
--- a/frontend/src/App.jsx
+++ b/frontend/src/App.jsx
@@ -6,6 +6,7 @@ function App() {
const [messages, setMessages] = useState([]);
const [input, setInput] = useState("");
const messagesEndRef = useRef(null);
+ const [loading, setLoading] = useState(false);
const sendMessage = async () => {
if (!input.trim()) return;
@@ -13,16 +14,21 @@ function App() {
const userMessage = { role: "user", content: input };
setMessages(prev => [...prev, userMessage]);
setInput("");
+ setLoading(true); // 🚀 show loader
- const res = await fetch("/chat", {
- method: "POST",
- headers: { "Content-Type": "application/json" },
- body: JSON.stringify({ user_id: "user1", message: input })
- });
+ try {
+ const res = await fetch("/chat", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ user_id: "user1", message: input })
+ });
- const data = await res.json();
- const botMessage = { role: "assistant", content: data.response };
- setMessages(prev => [...prev, botMessage]);
+ const data = await res.json();
+ const botMessage = { role: "assistant", content: data.response };
+ setMessages(prev => [...prev, botMessage]);
+ } finally {
+ setLoading(false); // ✅ hide loader
+ }
};
useEffect(() => {
@@ -37,11 +43,22 @@ function App() {
- {messages.map((msg, i) => (
-
- {msg.content}
-
- ))}
+ {loading && (
+
+
+ Thinking...
+
+
The model is processing...
+
+ )}
+ {messages.map((msg, i) => (
+
+ ))}
+
diff --git a/services/backend.service b/services/backend.service
new file mode 100644
index 0000000..074e7ca
--- /dev/null
+++ b/services/backend.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=LM Studio proxy Python Backend Service
+After=network.target
+
+[Service]
+User=samuele
+WorkingDirectory=/home/samuele/lm-chat-app/backend
+ExecStart=/usr/bin/env uvicorn main:app --host 0.0.0.0 --port 8000 --reload
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/services/frontend.service b/services/frontend.service
new file mode 100644
index 0000000..a9e14cb
--- /dev/null
+++ b/services/frontend.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=LM Studio proxy Node Frontend Service
+After=network.target
+
+[Service]
+User=samuele
+WorkingDirectory=/home/samuele/lm-chat-app/frontend
+ExecStart=/home/samuele/lm-chat-app/start-frontend.sh
+Restart=always
+RestartSec=1
+StandardOutput=journal
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/services/streamlit.service b/services/streamlit.service
new file mode 100644
index 0000000..c88acac
--- /dev/null
+++ b/services/streamlit.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Streamlit LM Studio Chatbot proxy Service
+After=network.target
+
+[Service]
+User=samuele
+WorkingDirectory=/home/samuele/lm-chat-app/streamlit
+ExecStart=/home/samuele/lm-chat-app/start-streamlit.sh
+Restart=always
+RestartSec=1
+StandardOutput=journal
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/start-streamlit.sh b/start-streamlit.sh
new file mode 100755
index 0000000..2d32d69
--- /dev/null
+++ b/start-streamlit.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+cd /home/samuele/lm-chat-app/streamlit
+streamlit run app.py
+
diff --git a/streamlit/README.md b/streamlit/README.md
new file mode 100644
index 0000000..20591bd
--- /dev/null
+++ b/streamlit/README.md
@@ -0,0 +1,8 @@
+## Streamlit chatbot client
+
+Client Chatbot con update realtime che sfrutta le API di openAI per connettersi a LM Studio locale
+
+esempio tratto inizialmente da qui:
+
+ https://github.com/ingridstevens/AI-projects/tree/main/streamlit-streaming-langchain
+
diff --git a/streamlit/app.py b/streamlit/app.py
new file mode 100644
index 0000000..627e2c8
--- /dev/null
+++ b/streamlit/app.py
@@ -0,0 +1,66 @@
+####
+#### Streamlit Streaming using LM Studio as OpenAI Standin
+#### run with `streamlit run app.py`
+
+# !pip install pypdf langchain langchain_openai
+
+import streamlit as st
+from langchain_core.messages import AIMessage, HumanMessage
+from langchain_openai import ChatOpenAI
+from langchain_core.output_parsers import StrOutputParser
+from langchain_core.prompts import ChatPromptTemplate
+
+# app config
+st.set_page_config(page_title="Egalware Chatbot", page_icon="🤖")
+st.title("Egalware's Streaming Chatbot")
+
+def get_response(user_query, chat_history):
+
+ template = """
+ You are a helpful assistant. Answer the following questions considering the history of the conversation:
+
+ Chat history: {chat_history}
+
+ User question: {user_question}
+ """
+
+ prompt = ChatPromptTemplate.from_template(template)
+
+ # Using LM Studio Local Inference Server
+ llm = ChatOpenAI(base_url="http://10.74.83.100:1234/v1",api_key="lm-studio", model="qwen/qwen3-4b-2507")
+
+ chain = prompt | llm | StrOutputParser()
+
+ return chain.stream({
+ "chat_history": chat_history,
+ "user_question": user_query,
+ })
+
+# session state
+if "chat_history" not in st.session_state:
+ st.session_state.chat_history = [
+ AIMessage(content="Hello, I am a bot. How can I help you?"),
+ ]
+
+
+# conversation
+for message in st.session_state.chat_history:
+ if isinstance(message, AIMessage):
+ with st.chat_message("AI"):
+ st.write(message.content)
+ elif isinstance(message, HumanMessage):
+ with st.chat_message("Human"):
+ st.write(message.content)
+
+# user input
+user_query = st.chat_input("Type your message here...")
+if user_query is not None and user_query != "":
+ st.session_state.chat_history.append(HumanMessage(content=user_query))
+
+ with st.chat_message("Human"):
+ st.markdown(user_query)
+
+ with st.chat_message("AI"):
+ response = st.write_stream(get_response(user_query, st.session_state.chat_history))
+
+ st.session_state.chat_history.append(AIMessage(content=response))