Adedoyinjames commited on
Commit
3f5d6f1
·
verified ·
1 Parent(s): 0e7bc20

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +105 -0
app.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import random
4
+ import threading
5
+ import time
6
+ import schedule
7
+ import tweepy
8
+ import gradio as gr
9
+
10
+ # Secrets from HF Space environment
11
+ HF_TOKEN = os.environ['HF_TOKEN']
12
+ CONSUMER_KEY = os.environ['CONSUMER_KEY']
13
+ CONSUMER_SECRET = os.environ['CONSUMER_SECRET']
14
+ ACCESS_TOKEN = os.environ['ACCESS_TOKEN']
15
+ ACCESS_SECRET = os.environ['ACCESS_SECRET']
16
+
17
+ # LLM API setup
18
+ API_URL = "https://router.huggingface.co/v1/chat/completions"
19
+ headers = {
20
+ "Authorization": f"Bearer {HF_TOKEN}",
21
+ }
22
+
23
+ def query(payload):
24
+ response = requests.post(API_URL, headers=headers, json=payload)
25
+ return response.json()
26
+
27
+ # Topics for posts
28
+ topics = ['AI', 'Tech', 'Business Startups', 'Entrepreneurship']
29
+
30
+ # Function to generate a high-quality post using LLM
31
+ def generate_post(topic=None):
32
+ if topic is None:
33
+ topic = random.choice(topics)
34
+ prompt = f"Generate a high-quality, educational, and informative X (Twitter) post under 270 characters showcasing your expertise in {topic}. Make it engaging, insightful, and professional."
35
+ response = query({
36
+ "messages": [
37
+ {
38
+ "role": "user",
39
+ "content": prompt
40
+ }
41
+ ],
42
+ "model": "deepseek-ai/DeepSeek-V3.2:novita",
43
+ "max_tokens": 200,
44
+ "temperature": 0.7
45
+ })
46
+ post_content = response["choices"][0]["message"]["content"].strip()
47
+ # Ensure under 270 chars
48
+ if len(post_content) > 280:
49
+ post_content = post_content[:277] + "..."
50
+ return post_content
51
+
52
+ # Tweepy setup for X API
53
+ auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
54
+ auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
55
+ api = tweepy.API(auth)
56
+
57
+ # Function to post to X
58
+ def post_to_x(content):
59
+ try:
60
+ api.update_status(status=content)
61
+ return f"Posted successfully: {content}"
62
+ except Exception as e:
63
+ return f"Error posting: {str(e)}"
64
+
65
+ # Auto-post function
66
+ def auto_post():
67
+ content = generate_post()
68
+ result = post_to_x(content)
69
+ print(result) # Log to console
70
+
71
+ # Background scheduler thread
72
+ def run_scheduler():
73
+ schedule.every(2).hours.do(auto_post)
74
+ while True:
75
+ schedule.run_pending()
76
+ time.sleep(60) # Check every minute
77
+
78
+ # Start scheduler in background
79
+ threading.Thread(target=run_scheduler, daemon=True).start()
80
+
81
+ # Gradio interface for manual generation and posting
82
+ def manual_generate(topic):
83
+ content = generate_post(topic)
84
+ return content
85
+
86
+ def manual_post(content):
87
+ return post_to_x(content)
88
+
89
+ with gr.Blocks() as demo:
90
+ gr.Markdown("# AI/Tech/Startups/Entrepreneurship X Post Generator")
91
+ gr.Markdown("Use this interface to manually generate and post high-quality content. Auto-posting runs every 2 hours in the background.")
92
+
93
+ with gr.Row():
94
+ topic_input = gr.Dropdown(choices=topics + ["Random"], label="Select Topic", value="Random")
95
+ generate_btn = gr.Button("Generate Post")
96
+
97
+ output_text = gr.Textbox(label="Generated Post (under 270 chars)", lines=5)
98
+
99
+ post_btn = gr.Button("Post to X")
100
+ post_result = gr.Textbox(label="Post Result")
101
+
102
+ generate_btn.click(manual_generate, inputs=topic_input, outputs=output_text)
103
+ post_btn.click(manual_post, inputs=output_text, outputs=post_result)
104
+
105
+ demo.launch()