API Request
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
question = "What is flask python?"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": question}],
max_tokens=256,
n=1,
stop=None,
temperature=0.7
)
answer = response['choices'][0]['message']['content']
print(question)
print(answer)
Streaming Completion
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
print("Please enter your question (press Enter to submit):")
question = input()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": question}],
max_tokens=256,
n=1,
stop=None,
temperature=0.7,
stream=True
)
for chunk in response:
content = chunk["choices"][0]["delta"].get("content", )
print(content, end=, flush=True)
print("\n")
Conversation History
import openai
import os, sys
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
conversation_history = []
while True:
print("\nPlease enter your question (or 'exit' to end):")
question = input()
if question.lower() == 'exit':
break
conversation_history.append({"role": "user", "content": question})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation_history,
max_tokens=256,
n=1,
stop=None,
temperature=0.7,
stream=True
)
for chunk in response:
content = chunk["choices"][0]["delta"].get("content", )
print(content, end=, flush=True)
conversation_history.append({"role": "system", "content": content})
print("Conversation ended.")
Context
import openai
import os, sys
openai.api_key = os.environ.get("OPENAI_API_KEY")
conversation_history = []
context_message = {
"role": "system",
"content": "System: Please keep the answers short"
}
conversation_history.append(context_message)
questions = [
"What is Flask? Keep the answers short.",
"What's the current version?"
]
for question in questions:
print("\nQuestion:", question)
conversation_history.append({"role": "user", "content": question})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation_history,
max_tokens=64,
n=1,
stop=None,
temperature=0.7,
stream=True
)
for chunk in response:
content = chunk["choices"][0]["delta"].get("content", )
print(content, end=, flush=True)
conversation_history.append({"role": "system", "content": content})
🚀 AI Agent
import openai
import os, sys
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def query_chatgpt(prompt):
print("\n" + prompt)
response = openai.ChatCompletion.create(
model="gpt-4.1",
messages=[{"role": "user", "content": prompt}],
)
return response['choices'][0]['message']['content'].strip()
topic = "function"
languages = {
"Python": f"Explain the '{topic}' topic in Python. ",
"PHP": f"Explain the concept similar to {topic} in PHP. ",
"Java": f"Explain the equivalent concept of {topic} in Java. ",
}
context = "Keep the answer short, without examples."
for lang, prompt in languages.items():
summary = query_chatgpt("--- " + prompt + context)
prompt_2 = f"How difficult is the topic '{topic}' in {lang} for beginners? Just reply with a number, 1 to 5."
difficulty = query_chatgpt(prompt_2)
print("Difficulty: " + difficulty + "\n")
print(summary)
if int(difficulty) >= 3:
code = query_chatgpt(f"Give a code example of {topic} in {lang}")
qna = query_chatgpt(f"Create 3 beginner questions and answers about {topic} in {lang}.")
print(code)
print(qna)
🚀 Deploy AI Agent
from openai import OpenAI
import os
import subprocess
import json
import datetime
import sys
import sqlite3
from dotenv import load_dotenv
load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
REPOS = {
"python": "/var/www/refresh.local/refresh.ro/Application/github/python-pages/",
"algorithms": "/var/www/refresh.local/refresh.ro/Application/github/algorithms-pages/",
"php": "/var/www/refresh.local/refresh.ro/Application/github/php-pages/",
"mlearning": "/var/www/refresh.local/refresh.ro/Application/github/mlearning-pages/",
"java": "/var/www/refresh.local/refresh.ro/Application/github/java-pages/"
}
FTP_BASE = os.getenv("FTP_BASE")
FTP_USER = os.getenv("FTP_USER")
FTP_PASS = os.getenv("FTP_PASS")
DB_PATH = os.getenv("CURR_DIR") + "mlearning-pages/main/packages/openai/deploy_ai_agent/prompt_cache.db"
def init_db():
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute()
conn.commit()
conn.close()
def get_cached_response(prompt):
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute("SELECT response FROM prompt_cache WHERE prompt = ?", (prompt,))
row = c.fetchone()
conn.close()
return row[0] if row else None
def store_response(prompt, response):
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
try:
c.execute("INSERT INTO prompt_cache (prompt, response) VALUES (?,?)", (prompt, response))
conn.commit()
except sqlite3.IntegrityError:
pass
finally:
conn.close()
def get_action_plan(natural_language_cmd):
system_prompt = f"""
You are an AI agent that converts deployment commands into structured JSON instructions.
Valid repositories:
{',' . join(REPOS.keys())}
Return a JSON object with:
- "git": list of repositories to update (subset of the valid ones)
- "ftp": list of diretories to upload via FTP (same names)
Examples:
User: Export only python repo differences to GitHub
Response: {{ "git": ["python"], "ftp": [] }}
User: Upload java and php to FTP only
Response: {{ "git": [], "ftp": ["java", "php"] }}
User: Export all differences to FTP and GitHub
Response: {{
"git": ["python", "algorithms", "php", "mlearning", "java"],
"ftp": ["python", "algorithms", "php", "mlearning", "java"]
}}
"""
cached = get_cached_response(natural_language_cmd)
if cached:
print("? Using cached response from SQLite")
response_text = cached
else:
print("? Sending to OpenAI...")
response = client.chat.completions.create(
model="gpt-4.1",
messages=[
{"role": "system", "content": system_prompt.strip()},
{"role": "user", "content": f"Command: {natural_language_cmd}"}
]
)
response_text = response.choices[0].message.content.strip()
store_response(natural_language_cmd, response_text)
try:
return json.loads(response_text)
except json.JSONDecodeError as e:
print("Error parsing OpenAI response:", e)
print("Raw response:", response_text)
return None
def perform_git(repo_name):
repo_path = REPOS[repo_name]
print(f"? Updating Github repo: {repo_name}")
os.chdir(repo_path)
subprocess.run(["git", "pull", "origin", "main", "--force"])
subprocess.run(["git", "add", "."])
subprocess.run(["git", "commit", "-am", f"{repo_name}-pages update"])
subprocess.run(["git", "push", "origin", "main"])
def get_today_date():
from datetime import datetime
return datetime.today().strftime('%Y-%m-%d')
def get_changed_files(repo_path):
os.chdir(repo_path)
try:
result = subprocess.run(
["git", "diff", "--stat", f"@{{{get_today_date()}}}", "--diff-filter=ACRMRT", "--name-only"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=True
)
changed_files = result.stdout.strip().split('\n')
return [f for f in changed_files if f.strip()]
except subprocess.CalledProcessError as e:
print("Git diff failed:", e.stderr)
return []
def perform_ftp(repo_name):
local_repo_path = REPOS[repo_name]
remote_path = f"{FTP_BASE}{repo_name}-pages"
print(f"? Uploding {repo_name} files to FTP ...")
changed_files = get_changed_files(local_repo_path)
if not changed_files:
print("No changed files to upload")
return
for rel_path in changed_files:
local_file = os.path.join(local_repo_path, rel_path)
remote_file = f"{remote_path}/{rel_path}"
if not os.path.isfile(local_file):
continue
print(f"- {rel_path}")
subprocess.run(["curl", "-T", local_file, remote_file, "--user", f"{FTP_USER}:{FTP_PASS}"])
def main():
init_db()
if len(sys.argv) > 1:
user_command = sys.argv[1].strip()
else:
user_command = input("What should I do? \n> ").strip()
action_plan = get_action_plan(user_command)
if not action_plan:
print("No valid action plan. Aborting.")
return
print(f"Action plan: {action_plan}")
for repo in action_plan.get("ftp", []):
if repo in REPOS:
perform_ftp(repo)
for repo in action_plan.get("git", []):
if repo in REPOS:
perform_git(repo)
print("✅ All tasks completed.")
if __name__ == '__main__':
main()