JakeFake222 commited on
Commit
43b2ee6
Β·
verified Β·
1 Parent(s): fc060b6

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +397 -145
app.py CHANGED
@@ -1,35 +1,50 @@
1
  """
2
- MAKER Agent - Clean Chat Interface
 
 
 
3
  """
4
 
5
  import gradio as gr
6
  import asyncio
 
 
 
7
  from collections import Counter
8
- from dataclasses import dataclass
9
- from typing import Callable
10
  from pathlib import Path
11
 
12
  # ============================================================================
13
- # Core
14
  # ============================================================================
15
 
16
  @dataclass
17
  class VotingConfig:
18
  k: int = 3
19
  max_samples: int = 30
 
20
  temperature_rest: float = 0.1
21
  parallel_samples: int = 3
22
 
 
 
 
 
 
 
23
 
24
  class LLMClient:
 
 
25
  def __init__(self, provider: str, api_key: str, model: str = None):
26
  self.provider = provider.lower()
27
  self.api_key = api_key
28
  self.model = model
29
  self._client = None
30
- self._setup()
31
 
32
- def _setup(self):
33
  if self.provider == "openai":
34
  from openai import AsyncOpenAI
35
  self._client = AsyncOpenAI(api_key=self.api_key)
@@ -51,176 +66,306 @@ class LLMClient:
51
  self._client = AsyncOpenAI(api_key=self.api_key, base_url="https://openrouter.ai/api/v1")
52
  self.model = self.model or "openai/gpt-4o-mini"
53
 
54
- async def generate(self, messages: list, temperature: float = 0.0) -> str:
55
  if self.provider == "anthropic":
56
- system = ""
57
- conv = []
58
- for m in messages:
59
- if m["role"] == "system":
60
- system = m["content"]
61
- else:
62
- conv.append(m)
63
- kwargs = {"model": self.model, "max_tokens": 2000, "messages": conv}
64
- if system:
65
- kwargs["system"] = system
66
- r = await self._client.messages.create(**kwargs)
67
  return r.content[0].text
68
  else:
69
  r = await self._client.chat.completions.create(
70
- model=self.model, messages=messages,
71
- temperature=temperature, max_tokens=2000
 
72
  )
73
  return r.choices[0].message.content
74
 
75
 
76
  class WebSearch:
 
 
77
  @staticmethod
78
- async def search(query: str) -> str:
79
  try:
80
  from duckduckgo_search import DDGS
81
  results = []
82
  with DDGS() as ddgs:
83
- for r in ddgs.text(query, max_results=5):
84
- results.append(f"β€’ {r.get('title', '')}: {r.get('body', '')}")
85
- return "\n".join(results) if results else ""
86
- except:
87
- return ""
 
 
 
 
88
 
89
 
90
  class FileHandler:
 
 
91
  @staticmethod
92
- def load(path: str) -> dict:
93
- p = Path(path)
94
- ext = p.suffix.lower()
 
95
  try:
96
- if ext == '.pdf':
97
- import pymupdf
98
- doc = pymupdf.open(str(p))
99
- text = "\n".join([page.get_text() for page in doc])
100
- doc.close()
101
- return {"name": p.name, "content": text[:20000]}
 
 
 
 
 
 
 
 
102
  elif ext == '.docx':
103
- from docx import Document
104
- doc = Document(str(p))
105
- text = "\n".join([para.text for para in doc.paragraphs])
106
- return {"name": p.name, "content": text[:20000]}
 
 
 
 
 
 
 
 
 
107
  else:
108
- return {"name": p.name, "content": p.read_text(errors='replace')[:20000]}
 
 
109
  except Exception as e:
110
- return {"name": p.name, "content": f"[Error reading file: {e}]"}
111
 
112
 
113
  class MAKERAgent:
114
- def __init__(self, llm: LLMClient, voting: VotingConfig = None):
 
 
115
  self.llm = llm
116
  self.voting = voting or VotingConfig()
117
- self.history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
- async def chat(self, message: str, search: bool = False, files: str = None) -> str:
120
- messages = [{"role": "system", "content": "You are a helpful assistant." + (f"\n\nFiles:\n{files}" if files else "")}]
121
- messages.extend(self.history[-10:])
122
 
123
- user_content = message
124
- if search:
125
- results = await WebSearch.search(message)
126
- if results:
127
- user_content += f"\n\n[Web search results]\n{results}"
128
 
129
- messages.append({"role": "user", "content": user_content})
 
130
 
131
- # Voting
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  votes: Counter = Counter()
133
- samples = 0
 
134
 
135
- response = await self.llm.generate(messages, temperature=0.0)
 
136
  samples += 1
137
- votes[response] += 1
138
 
 
 
 
 
 
 
 
 
 
 
139
  while samples < self.voting.max_samples:
140
- top = votes.most_common(2)
141
- if top[0][1] - (top[1][1] if len(top) > 1 else 0) >= self.voting.k:
142
- break
 
 
 
 
 
 
 
143
 
144
  for _ in range(self.voting.parallel_samples):
145
  if samples >= self.voting.max_samples:
146
  break
147
- r = await self.llm.generate(messages, temperature=self.voting.temperature_rest)
 
148
  samples += 1
149
- votes[r] += 1
 
 
 
 
 
 
 
 
 
150
 
151
- winner = votes.most_common(1)[0][0] if votes else "Sorry, I couldn't generate a response."
 
152
 
153
- self.history.append({"role": "user", "content": message})
154
- self.history.append({"role": "assistant", "content": winner})
 
 
 
 
 
 
 
 
155
 
156
- return winner
157
-
158
- def clear(self):
159
- self.history = []
 
 
 
 
160
 
161
 
162
  # ============================================================================
163
- # State
164
  # ============================================================================
165
 
166
- agent = None
167
- file_content = ""
 
 
 
 
168
 
169
- def connect(provider, key, model, k):
170
- global agent
171
- if not key.strip():
172
- return "❌ Enter API key"
173
  try:
174
- agent = MAKERAgent(LLMClient(provider, key.strip(), model.strip() or None), VotingConfig(k=k))
175
- return f"βœ… Connected: {agent.llm.model}"
 
176
  except Exception as e:
177
- return f"❌ {e}"
 
178
 
179
- def load_files(files):
180
- global file_content
 
 
181
  if not files:
182
- file_content = ""
183
- return "No files"
184
 
185
- parts = []
186
  names = []
187
  for f in files:
188
- data = FileHandler.load(f.name)
189
- parts.append(f"[{data['name']}]\n{data['content']}")
190
- names.append(data['name'])
191
 
192
- file_content = "\n\n".join(parts)
193
  return f"πŸ“Ž {', '.join(names)}"
194
 
195
- def respond(message, history, search, files):
196
- global agent, file_content
 
197
 
198
- if files:
199
- load_files(files)
200
 
201
- if not agent:
202
- return history + [[message, "⚠️ Connect to an LLM first (open Settings below)"]], ""
 
 
 
203
 
204
- if not message.strip():
205
- return history, ""
 
 
 
 
 
 
 
206
 
207
- async def run():
208
- return await agent.chat(message.strip(), search, file_content or None)
209
 
210
- loop = asyncio.new_event_loop()
211
  try:
212
- response = loop.run_until_complete(run())
213
- finally:
214
- loop.close()
 
 
 
 
 
 
 
 
 
 
215
 
216
- return history + [[message, response]], ""
 
 
 
 
 
 
 
 
 
 
 
217
 
218
- def clear():
219
- global agent, file_content
220
- if agent:
221
- agent.clear()
222
- file_content = ""
223
- return [], None
224
 
225
  # ============================================================================
226
  # UI
@@ -228,47 +373,154 @@ def clear():
228
 
229
  with gr.Blocks(title="MAKER Agent") as demo:
230
 
231
- gr.Markdown("## πŸ”§ MAKER Agent")
232
-
233
- chatbot = gr.Chatbot(height=450, show_label=False)
 
 
 
 
234
 
235
- # Input row with everything together
236
- with gr.Group():
237
- with gr.Row():
238
- msg = gr.Textbox(
239
- placeholder="Message MAKER...",
 
 
 
240
  show_label=False,
241
- scale=10,
242
- container=False
243
  )
244
- send = gr.Button("↑", variant="primary", scale=1, min_width=50)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
- with gr.Row():
247
- files = gr.File(
248
- file_count="multiple",
249
- file_types=[".pdf", ".docx", ".txt", ".md", ".json", ".csv", ".py"],
250
- label="",
251
- scale=3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  )
253
- search = gr.Checkbox(label="πŸ” Web search", scale=1)
254
- clear_btn = gr.Button("Clear chat", scale=1)
255
-
256
- # Settings
257
- with gr.Accordion("βš™οΈ Settings", open=False):
258
- with gr.Row():
259
- provider = gr.Dropdown(["groq", "openai", "anthropic", "together", "openrouter"], value="groq", label="Provider")
260
- api_key = gr.Textbox(label="API Key", type="password")
261
- model = gr.Textbox(label="Model", placeholder="default")
262
- k = gr.Slider(1, 7, value=3, step=1, label="K (reliability)")
263
- with gr.Row():
264
- connect_btn = gr.Button("Connect", variant="primary")
265
- status = gr.Markdown("*Not connected*")
266
- gr.Markdown("Free API: [console.groq.com](https://console.groq.com) β€’ [Paper](https://arxiv.org/abs/2511.09030)")
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
- # Events
269
- connect_btn.click(connect, [provider, api_key, model, k], status)
270
- msg.submit(respond, [msg, chatbot, search, files], [chatbot, msg])
271
- send.click(respond, [msg, chatbot, search, files], [chatbot, msg])
272
- clear_btn.click(clear, None, [chatbot, files])
 
273
 
274
- demo.launch()
 
 
1
  """
2
+ MAKER Agent - Chat Interface
3
+ =============================
4
+ Reliable AI Agent with Web Search & File Upload
5
+ Based on: https://arxiv.org/abs/2511.09030
6
  """
7
 
8
  import gradio as gr
9
  import asyncio
10
+ import json
11
+ import re
12
+ import base64
13
  from collections import Counter
14
+ from dataclasses import dataclass, field
15
+ from typing import Any, Callable, Optional
16
  from pathlib import Path
17
 
18
  # ============================================================================
19
+ # MAKER Core (Embedded)
20
  # ============================================================================
21
 
22
  @dataclass
23
  class VotingConfig:
24
  k: int = 3
25
  max_samples: int = 30
26
+ temperature_first: float = 0.0
27
  temperature_rest: float = 0.1
28
  parallel_samples: int = 3
29
 
30
+ @dataclass
31
+ class RedFlagConfig:
32
+ max_response_chars: int = 3000
33
+ min_response_length: int = 5
34
+ banned_patterns: list = field(default_factory=lambda: [])
35
+
36
 
37
  class LLMClient:
38
+ """Universal LLM client."""
39
+
40
  def __init__(self, provider: str, api_key: str, model: str = None):
41
  self.provider = provider.lower()
42
  self.api_key = api_key
43
  self.model = model
44
  self._client = None
45
+ self._setup_client()
46
 
47
+ def _setup_client(self):
48
  if self.provider == "openai":
49
  from openai import AsyncOpenAI
50
  self._client = AsyncOpenAI(api_key=self.api_key)
 
66
  self._client = AsyncOpenAI(api_key=self.api_key, base_url="https://openrouter.ai/api/v1")
67
  self.model = self.model or "openai/gpt-4o-mini"
68
 
69
+ async def generate(self, prompt: str, temperature: float = 0.0, max_tokens: int = 2000) -> str:
70
  if self.provider == "anthropic":
71
+ r = await self._client.messages.create(
72
+ model=self.model, max_tokens=max_tokens,
73
+ messages=[{"role": "user", "content": prompt}]
74
+ )
 
 
 
 
 
 
 
75
  return r.content[0].text
76
  else:
77
  r = await self._client.chat.completions.create(
78
+ model=self.model,
79
+ messages=[{"role": "user", "content": prompt}],
80
+ temperature=temperature, max_tokens=max_tokens
81
  )
82
  return r.choices[0].message.content
83
 
84
 
85
  class WebSearch:
86
+ """Web search using DuckDuckGo (free)."""
87
+
88
  @staticmethod
89
+ async def search(query: str, num_results: int = 5) -> list:
90
  try:
91
  from duckduckgo_search import DDGS
92
  results = []
93
  with DDGS() as ddgs:
94
+ for r in ddgs.text(query, max_results=num_results):
95
+ results.append({
96
+ "title": r.get("title", ""),
97
+ "url": r.get("href", ""),
98
+ "snippet": r.get("body", "")
99
+ })
100
+ return results
101
+ except Exception as e:
102
+ return [{"title": "Error", "url": "", "snippet": str(e)}]
103
 
104
 
105
  class FileHandler:
106
+ """Handle file uploads."""
107
+
108
  @staticmethod
109
+ async def load_file(file_path: str) -> dict:
110
+ path = Path(file_path)
111
+ ext = path.suffix.lower()
112
+
113
  try:
114
+ if ext in {'.txt', '.md', '.json', '.py', '.js', '.html', '.css', '.csv'}:
115
+ content = path.read_text(encoding='utf-8', errors='replace')
116
+ return {"type": "text", "name": path.name, "content": content[:50000]}
117
+
118
+ elif ext == '.pdf':
119
+ try:
120
+ import pymupdf
121
+ doc = pymupdf.open(str(path))
122
+ text = "\n\n".join([page.get_text() for page in doc])
123
+ doc.close()
124
+ return {"type": "pdf", "name": path.name, "content": text[:50000]}
125
+ except ImportError:
126
+ return {"type": "error", "name": path.name, "content": "PDF requires: pip install pymupdf"}
127
+
128
  elif ext == '.docx':
129
+ try:
130
+ from docx import Document
131
+ doc = Document(str(path))
132
+ text = "\n\n".join([p.text for p in doc.paragraphs])
133
+ return {"type": "docx", "name": path.name, "content": text[:50000]}
134
+ except ImportError:
135
+ return {"type": "error", "name": path.name, "content": "DOCX requires: pip install python-docx"}
136
+
137
+ elif ext in {'.png', '.jpg', '.jpeg', '.gif', '.webp'}:
138
+ content = path.read_bytes()
139
+ b64 = base64.b64encode(content).decode('utf-8')
140
+ return {"type": "image", "name": path.name, "base64": b64}
141
+
142
  else:
143
+ content = path.read_text(encoding='utf-8', errors='replace')
144
+ return {"type": "text", "name": path.name, "content": content[:50000]}
145
+
146
  except Exception as e:
147
+ return {"type": "error", "name": path.name, "content": str(e)}
148
 
149
 
150
  class MAKERAgent:
151
+ """MAKER Framework Agent."""
152
+
153
+ def __init__(self, llm: LLMClient, voting: VotingConfig = None, red_flags: RedFlagConfig = None):
154
  self.llm = llm
155
  self.voting = voting or VotingConfig()
156
+ self.red_flags = red_flags or RedFlagConfig()
157
+ self.stats = {"samples": 0, "red_flags": 0, "tool_calls": 0}
158
+
159
+ def _check_red_flags(self, response: str) -> bool:
160
+ if len(response) > self.red_flags.max_response_chars:
161
+ return True
162
+ if len(response) < self.red_flags.min_response_length:
163
+ return True
164
+ for pattern in self.red_flags.banned_patterns:
165
+ if re.search(pattern, response, re.IGNORECASE):
166
+ return True
167
+ return False
168
+
169
+ def _normalize_response(self, response: str) -> str:
170
+ """Normalize response for voting comparison."""
171
+ return response.strip().lower()
172
 
173
+ async def execute(self, prompt: str, use_search: bool = False,
174
+ file_context: str = None, progress_callback: Callable = None) -> dict:
 
175
 
176
+ # Build the full prompt
177
+ full_prompt = "You are a helpful assistant. Respond naturally and conversationally.\n\n"
 
 
 
178
 
179
+ if file_context:
180
+ full_prompt += f"The user has provided the following files for context:\n{file_context}\n\n"
181
 
182
+ full_prompt += f"User: {prompt}\n\nAssistant:"
183
+
184
+ # Handle web search if enabled
185
+ search_results = None
186
+ if use_search:
187
+ if progress_callback:
188
+ progress_callback(0.1, "Searching the web...")
189
+
190
+ search_results = await WebSearch.search(prompt)
191
+ self.stats["tool_calls"] += 1
192
+
193
+ if search_results and search_results[0].get("title") != "Error":
194
+ search_text = "\n".join([f"- {r['title']}: {r['snippet']}" for r in search_results[:5]])
195
+ full_prompt = f"You are a helpful assistant with access to web search results.\n\n"
196
+ if file_context:
197
+ full_prompt += f"Files provided:\n{file_context}\n\n"
198
+ full_prompt += f"Web search results for '{prompt}':\n{search_text}\n\n"
199
+ full_prompt += f"User question: {prompt}\n\nProvide a helpful response based on the search results. Assistant:"
200
+
201
+ if progress_callback:
202
+ progress_callback(0.2, "Getting response...")
203
+
204
+ # Voting loop
205
  votes: Counter = Counter()
206
+ responses_map = {}
207
+ samples, flagged = 0, 0
208
 
209
+ # First sample at temperature 0
210
+ response = await self.llm.generate(full_prompt, temperature=0.0)
211
  samples += 1
212
+ self.stats["samples"] += 1
213
 
214
+ if not self._check_red_flags(response):
215
+ key = self._normalize_response(response)
216
+ votes[key] += 1
217
+ responses_map[key] = response
218
+ else:
219
+ flagged += 1
220
+ self.stats["red_flags"] += 1
221
+
222
+ # Continue voting until we have a winner
223
+ round_num = 1
224
  while samples < self.voting.max_samples:
225
+ if votes:
226
+ top = votes.most_common(2)
227
+ top_count = top[0][1]
228
+ second_count = top[1][1] if len(top) > 1 else 0
229
+ if top_count - second_count >= self.voting.k:
230
+ break
231
+
232
+ round_num += 1
233
+ if progress_callback:
234
+ progress_callback(0.2 + 0.7 * (samples / self.voting.max_samples), f"Voting round {round_num}...")
235
 
236
  for _ in range(self.voting.parallel_samples):
237
  if samples >= self.voting.max_samples:
238
  break
239
+
240
+ response = await self.llm.generate(full_prompt, temperature=self.voting.temperature_rest)
241
  samples += 1
242
+ self.stats["samples"] += 1
243
+
244
+ if not self._check_red_flags(response):
245
+ key = self._normalize_response(response)
246
+ votes[key] += 1
247
+ if key not in responses_map:
248
+ responses_map[key] = response
249
+ else:
250
+ flagged += 1
251
+ self.stats["red_flags"] += 1
252
 
253
+ if progress_callback:
254
+ progress_callback(1.0, "Done!")
255
 
256
+ if votes:
257
+ top_key, top_count = votes.most_common(1)[0]
258
+ return {
259
+ "success": True,
260
+ "response": responses_map[top_key],
261
+ "votes": top_count,
262
+ "total_samples": samples,
263
+ "red_flagged": flagged,
264
+ "search_results": search_results
265
+ }
266
 
267
+ return {
268
+ "success": False,
269
+ "response": "I couldn't generate a reliable response. Please try again.",
270
+ "votes": 0,
271
+ "total_samples": samples,
272
+ "red_flagged": flagged,
273
+ "search_results": search_results
274
+ }
275
 
276
 
277
  # ============================================================================
278
+ # Global State
279
  # ============================================================================
280
 
281
+ current_agent = None
282
+ loaded_files = {}
283
+
284
+ # ============================================================================
285
+ # Functions
286
+ # ============================================================================
287
 
288
+ def setup_agent(provider, api_key, model, k_votes):
289
+ global current_agent
290
+ if not api_key:
291
+ return "❌ Please enter your API key", gr.update(interactive=False)
292
  try:
293
+ llm = LLMClient(provider, api_key, model if model else None)
294
+ current_agent = MAKERAgent(llm, VotingConfig(k=k_votes))
295
+ return f"βœ… Connected to {provider} ({llm.model})", gr.update(interactive=True)
296
  except Exception as e:
297
+ return f"❌ Error: {e}", gr.update(interactive=False)
298
+
299
 
300
+ def process_files(files):
301
+ global loaded_files
302
+ loaded_files = {}
303
+
304
  if not files:
305
+ return "No files attached"
 
306
 
 
307
  names = []
308
  for f in files:
309
+ info = asyncio.run(FileHandler.load_file(f.name))
310
+ loaded_files[info['name']] = info
311
+ names.append(info['name'])
312
 
 
313
  return f"πŸ“Ž {', '.join(names)}"
314
 
315
+
316
+ async def chat_async(message, history, use_search, files, progress=gr.Progress()):
317
+ global current_agent, loaded_files
318
 
319
+ if not current_agent:
320
+ return history + [[message, "⚠️ Please set up your API key first in the Settings tab."]]
321
 
322
+ # Process any new files
323
+ if files:
324
+ for f in files:
325
+ info = await FileHandler.load_file(f.name)
326
+ loaded_files[info['name']] = info
327
 
328
+ # Build file context
329
+ file_context = None
330
+ if loaded_files:
331
+ parts = []
332
+ for name, info in loaded_files.items():
333
+ if info["type"] != "image" and info["type"] != "error":
334
+ parts.append(f"=== {name} ===\n{info.get('content', '')[:10000]}")
335
+ if parts:
336
+ file_context = "\n\n".join(parts)
337
 
338
+ def update_progress(pct, msg):
339
+ progress(pct, desc=msg)
340
 
 
341
  try:
342
+ result = await current_agent.execute(
343
+ message,
344
+ use_search=use_search,
345
+ file_context=file_context,
346
+ progress_callback=update_progress
347
+ )
348
+
349
+ response = result["response"]
350
+
351
+ # Add subtle stats footer
352
+ stats = f"\n\n---\n*{result['votes']} votes, {result['total_samples']} samples*"
353
+
354
+ return history + [[message, response + stats]]
355
 
356
+ except Exception as e:
357
+ return history + [[message, f"❌ Error: {str(e)}"]]
358
+
359
+
360
+ def chat(message, history, use_search, files):
361
+ return asyncio.run(chat_async(message, history, use_search, files))
362
+
363
+
364
+ def clear_chat():
365
+ global loaded_files
366
+ loaded_files = {}
367
+ return [], None, "No files attached"
368
 
 
 
 
 
 
 
369
 
370
  # ============================================================================
371
  # UI
 
373
 
374
  with gr.Blocks(title="MAKER Agent") as demo:
375
 
376
+ # Header
377
+ gr.HTML("""
378
+ <div style="text-align: center; padding: 20px 0 10px 0;">
379
+ <h1 style="font-size: 2rem; margin: 0;">πŸ”§ MAKER Agent</h1>
380
+ <p style="color: #666; margin: 5px 0;">Reliable AI with Voting β€’ <a href="https://arxiv.org/abs/2511.09030" target="_blank">Paper</a></p>
381
+ </div>
382
+ """)
383
 
384
+ with gr.Tabs():
385
+
386
+ # Chat Tab
387
+ with gr.Tab("πŸ’¬ Chat"):
388
+
389
+ chatbot = gr.Chatbot(
390
+ label="Chat",
391
+ height=450,
392
  show_label=False,
393
+ bubble_full_width=False,
 
394
  )
395
+
396
+ with gr.Row():
397
+ with gr.Column(scale=12):
398
+ msg = gr.Textbox(
399
+ placeholder="Ask anything... (Ctrl+Enter to send)",
400
+ show_label=False,
401
+ container=False,
402
+ lines=2,
403
+ )
404
+
405
+ with gr.Column(scale=1, min_width=80):
406
+ send_btn = gr.Button("Send", variant="primary", interactive=False)
407
+
408
+ with gr.Row():
409
+ with gr.Column(scale=4):
410
+ file_upload = gr.File(
411
+ label="",
412
+ file_count="multiple",
413
+ file_types=[".pdf", ".docx", ".txt", ".md", ".json", ".csv"],
414
+ show_label=False,
415
+ )
416
+
417
+ with gr.Column(scale=2):
418
+ file_status = gr.Markdown("No files attached")
419
+
420
+ with gr.Column(scale=2):
421
+ use_search = gr.Checkbox(
422
+ label="πŸ” Web Search",
423
+ value=False,
424
+ info="Search DuckDuckGo"
425
+ )
426
+
427
+ with gr.Column(scale=1):
428
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", size="sm")
429
+
430
+ # Event handlers
431
+ file_upload.change(process_files, file_upload, file_status)
432
+
433
+ msg.submit(chat, [msg, chatbot, use_search, file_upload], chatbot).then(
434
+ lambda: "", None, msg
435
+ )
436
+ send_btn.click(chat, [msg, chatbot, use_search, file_upload], chatbot).then(
437
+ lambda: "", None, msg
438
+ )
439
+ clear_btn.click(clear_chat, None, [chatbot, file_upload, file_status])
440
 
441
+ # Settings Tab
442
+ with gr.Tab("βš™οΈ Settings"):
443
+
444
+ gr.Markdown("### Connect to an LLM Provider")
445
+
446
+ with gr.Row():
447
+ with gr.Column():
448
+ provider = gr.Dropdown(
449
+ ["groq", "openai", "anthropic", "together", "openrouter"],
450
+ value="groq",
451
+ label="Provider",
452
+ info="Groq is free & fast!"
453
+ )
454
+ api_key = gr.Textbox(
455
+ label="API Key",
456
+ type="password",
457
+ placeholder="Paste your API key here..."
458
+ )
459
+ model = gr.Textbox(
460
+ label="Model (optional)",
461
+ placeholder="Leave blank for default"
462
+ )
463
+
464
+ with gr.Column():
465
+ k_votes = gr.Slider(
466
+ 1, 7, value=3, step=1,
467
+ label="Reliability (K votes)",
468
+ info="Higher = more reliable, slower"
469
+ )
470
+
471
+ gr.Markdown("""
472
+ ### Get API Keys
473
+
474
+ **Groq** (recommended - free & fast):
475
+ [console.groq.com](https://console.groq.com)
476
+
477
+ **OpenAI**: [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
478
+
479
+ **Anthropic**: [console.anthropic.com](https://console.anthropic.com)
480
+ """)
481
+
482
+ connect_btn = gr.Button("πŸ”Œ Connect", variant="primary")
483
+ status = gr.Markdown("πŸ‘† Enter your API key and click Connect")
484
+
485
+ connect_btn.click(
486
+ setup_agent,
487
+ [provider, api_key, model, k_votes],
488
+ [status, send_btn]
489
  )
490
+
491
+ # About Tab
492
+ with gr.Tab("ℹ️ About"):
493
+ gr.Markdown("""
494
+ ## How MAKER Works
495
+
496
+ This agent uses the **MAKER Framework** to achieve reliable AI responses:
497
+
498
+ 1. **Multiple Samples** - Generates several responses for each question
499
+ 2. **Voting** - Responses "vote" and the winner needs K votes ahead
500
+ 3. **Red-Flagging** - Suspicious outputs are automatically discarded
501
+
502
+ ### Why This Matters
503
+
504
+ Instead of hoping the AI gets it right, MAKER uses statistics to ensure reliability. The paper achieved **1 million steps with zero errors** using this approach.
505
+
506
+ ### Features
507
+
508
+ - πŸ” **Web Search** - Free DuckDuckGo search (no API key needed)
509
+ - πŸ“Ž **File Upload** - PDF, DOCX, TXT, MD, JSON, CSV
510
+ - ⚑ **Multiple Providers** - Groq, OpenAI, Anthropic, and more
511
+
512
+ ### Links
513
+
514
+ - πŸ“„ [Research Paper](https://arxiv.org/abs/2511.09030)
515
+ - πŸŽ₯ [Video Explanation](https://youtube.com/watch?v=TJ-vWGCosdQ)
516
+ """)
517
 
518
+ # Footer
519
+ gr.HTML("""
520
+ <div style="text-align: center; color: #888; padding: 15px; font-size: 0.85rem;">
521
+ MAKER Framework β€’ <a href="https://arxiv.org/abs/2511.09030" style="color: #888;">arxiv.org/abs/2511.09030</a>
522
+ </div>
523
+ """)
524
 
525
+ if __name__ == "__main__":
526
+ demo.launch()