Spaces:
Running
Running
| import os | |
| import requests | |
| import random | |
| import threading | |
| import time | |
| import schedule | |
| import tweepy | |
| import gradio as gr | |
| # Secrets from HF Space environment | |
| HF_TOKEN = os.environ['HF_TOKEN'] | |
| CONSUMER_KEY = os.environ['CONSUMER_KEY'] | |
| CONSUMER_SECRET = os.environ['CONSUMER_SECRET'] | |
| ACCESS_TOKEN = os.environ['ACCESS_TOKEN'] | |
| ACCESS_SECRET = os.environ['ACCESS_SECRET'] | |
| # LLM API setup | |
| API_URL = "https://router.huggingface.co/v1/chat/completions" | |
| headers = { | |
| "Authorization": f"Bearer {HF_TOKEN}", | |
| } | |
| def query(payload): | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| return response.json() | |
| # Topics for posts | |
| topics = ['AI', 'Tech', 'Business Startups', 'Entrepreneurship'] | |
| # Function to generate a high-quality post using LLM | |
| def generate_post(topic=None): | |
| if topic is None: | |
| topic = random.choice(topics) | |
| prompt = f"Generate a high-quality, educational, and informative X (Twitter) post under 280 characters showcasing expertise in {topic}. Make it engaging, insightful, and professional. Include relevant hashtags if natural." | |
| response = query({ | |
| "messages": [ | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ], | |
| "model": "deepseek-ai/DeepSeek-V3.2:novita", | |
| "max_tokens": 200, | |
| "temperature": 0.8 | |
| }) | |
| post_content = response["choices"][0]["message"]["content"].strip() | |
| # Ensure under 280 chars | |
| if len(post_content) > 280: | |
| post_content = post_content[:277] + "..." | |
| return post_content | |
| # Tweepy v2 Client setup (for posting tweets) | |
| client = tweepy.Client( | |
| consumer_key=CONSUMER_KEY, | |
| consumer_secret=CONSUMER_SECRET, | |
| access_token=ACCESS_TOKEN, | |
| access_token_secret=ACCESS_SECRET | |
| ) | |
| # Function to post to X (using v2 API) | |
| def post_to_x(content): | |
| try: | |
| response = client.create_tweet(text=content) | |
| tweet_id = response.data['id'] | |
| return f"Posted successfully! View: https://x.com/user/status/{tweet_id}\n\n{content}" | |
| except Exception as e: | |
| return f"Error posting: {str(e)}" | |
| # Auto-post function | |
| def auto_post(): | |
| content = generate_post() | |
| result = post_to_x(content) | |
| print(result) # Log to console | |
| # Background scheduler thread | |
| def run_scheduler(): | |
| schedule.every(2).hours.do(auto_post) | |
| while True: | |
| schedule.run_pending() | |
| time.sleep(60) | |
| # Start scheduler in background | |
| threading.Thread(target=run_scheduler, daemon=True).start() | |
| # Gradio interface | |
| def manual_generate(topic): | |
| if topic == "Random": | |
| topic = None | |
| content = generate_post(topic) | |
| return content | |
| def manual_post(content): | |
| return post_to_x(content) | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# AI/Tech/Startups/Entrepreneurship X Post Generator") | |
| gr.Markdown("Generate and post high-quality content. Auto-posting every 2 hours in background.") | |
| with gr.Row(): | |
| topic_input = gr.Dropdown(choices=topics + ["Random"], label="Topic", value="Random") | |
| generate_btn = gr.Button("Generate Post") | |
| output_text = gr.Textbox(label="Generated Post", lines=6) | |
| post_btn = gr.Button("Post to X Now") | |
| post_result = gr.Textbox(label="Result", lines=4) | |
| generate_btn.click(manual_generate, inputs=topic_input, outputs=output_text) | |
| post_btn.click(manual_post, inputs=output_text, outputs=post_result) | |
| demo.launch() |