Skip to content

Commit c31780b

Browse files
committed
initial app setup working code-generationg project
1 parent d0cf238 commit c31780b

File tree

4 files changed

+290
-0
lines changed

4 files changed

+290
-0
lines changed
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
# Secret files
2+
secret.txt
3+
*.key
4+
*.env
5+
6+
# Python
7+
__pycache__/
8+
*.py[cod]
9+
*$py.class
10+
*.so
11+
.Python
12+
build/
13+
develop-eggs/
14+
dist/
15+
downloads/
16+
eggs/
17+
.eggs/
18+
lib/
19+
lib64/
20+
parts/
21+
sdist/
22+
var/
23+
wheels/
24+
*.egg-info/
25+
.installed.cfg
26+
*.egg
27+
MANIFEST
28+
29+
# Virtual environments
30+
venv/
31+
env/
32+
ENV/
33+
env.bak/
34+
venv.bak/
35+
36+
# IDE
37+
.vscode/
38+
.idea/
39+
*.swp
40+
*.swo
41+
*~
42+
43+
# OS
44+
.DS_Store
45+
Thumbs.db
46+
47+
# Generated code files (optional - remove if you want to track generated files)
48+
generated_*.py
49+
generated_*.js
50+
generated_*.java
51+
generated_*.cpp
52+
generated_*.cs
53+
generated_*.go
54+
generated_*.rs
55+
generated_*.php
56+
generated_*.rb
57+
generated_*.swift
58+
generated_*.kt
59+
generated_*.ts
60+
generated_*.html
61+
generated_*.css
62+
generated_*.sql
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
# Code Generation CLI (GitHub Models)
2+
3+
A minimal command‑line tool that generates code from a short description and a target language using the GitHub Models (OpenAI‑compatible) API.
4+
5+
## What it does
6+
- Takes two inputs: program name/description and programming language
7+
- Calls the GitHub Models Chat Completions API
8+
- Prints the generated code to stdout
9+
10+
## Requirements
11+
- Python 3.9+
12+
- Internet access to `https://models.github.ai/inference`
13+
- GitHub Personal Access Token (fine‑grained) with Models/AI inference access
14+
15+
## Setup
16+
1) Create and activate a virtual environment
17+
```bash
18+
python -m venv .venv
19+
source .venv/bin/activate # Windows PowerShell: .\.venv\Scripts\Activate.ps1
20+
```
21+
22+
2) Install dependencies
23+
```bash
24+
pip install -r requirements.txt
25+
```
26+
27+
3) Create `secret.txt`
28+
```text
29+
<YOUR_GITHUB_TOKEN_HERE>
30+
```
31+
- Single line, no quotes, no key name
32+
- File must be in the same directory as `code_generator.py`
33+
34+
## Usage
35+
```bash
36+
python code_generator.py "Two Sum" python
37+
```
38+
39+
Optional logging and diagnostics:
40+
```bash
41+
# Verbose logs
42+
python code_generator.py "Two Sum" python --log-level DEBUG
43+
44+
# Deep connection diagnostics (DNS/TLS/proxy/CA info)
45+
python code_generator.py "Two Sum" python --debug-connect --log-level DEBUG
46+
```
47+
48+
## Defaults
49+
- Endpoint: `https://models.github.ai/inference`
50+
- Model: `openai/gpt-4o-mini`
51+
52+
## Troubleshooting
53+
- 401 Unauthorized in diagnostics: token missing/invalid or lacks Models permissions
54+
- 429 Too Many Requests: rate limit; wait and retry
55+
- Connection errors: check proxy/VPN/firewall; try `--debug-connect` and verify TLS succeeds
56+
- Cert issues: ensure `certifi` is installed in the venv (already listed)
57+
58+
## Files
59+
```
60+
code_generator.py # CLI
61+
secret.txt # GitHub token (git‑ignored)
62+
requirements.txt # Dependencies
63+
.gitignore # Ignore rules
64+
README.md # This file
65+
```
Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Code Generator CLI using GitHub Models (OpenAI-compatible API)
4+
5+
Usage:
6+
python code_generator.py "Program description" language
7+
8+
Reads the token from secret.txt (same directory) and prints the generated code.
9+
"""
10+
11+
import sys
12+
import argparse
13+
import logging
14+
import socket
15+
import ssl
16+
from urllib.parse import urlparse
17+
from pathlib import Path
18+
from typing import Optional
19+
from openai import OpenAI, APIConnectionError, RateLimitError, APIStatusError
20+
21+
DEFAULT_ENDPOINT = "https://models.github.ai/inference"
22+
DEFAULT_MODEL = "openai/gpt-4o-mini"
23+
24+
25+
def load_token_from_secret() -> str:
26+
secret_path = Path("secret.txt")
27+
if not secret_path.exists():
28+
print("Error: secret.txt not found. Place your GitHub token in secret.txt")
29+
sys.exit(1)
30+
token = secret_path.read_text(encoding="utf-8").strip()
31+
if not token:
32+
print("Error: secret.txt is empty.")
33+
sys.exit(1)
34+
return token
35+
36+
37+
def generate_code(program_name: str, language: str, token: str, endpoint: str = DEFAULT_ENDPOINT, model: str = DEFAULT_MODEL) -> str:
38+
client = OpenAI(api_key=token, base_url=endpoint, timeout=30.0)
39+
40+
prompt = f"""
41+
Generate a complete, working {language} program for: {program_name}
42+
43+
Requirements:
44+
- Include clear comments and minimal error handling
45+
- Include example usage if applicable
46+
- Follow best practices for {language}
47+
- Only return the code, no explanations or markdown formatting
48+
"""
49+
50+
# Minimal retry loop for transient network errors
51+
last_error: Optional[Exception] = None
52+
for _ in range(3):
53+
try:
54+
response = client.chat.completions.create(
55+
model=model,
56+
messages=[
57+
{"role": "system", "content": f"You are an expert {language} programmer. Generate clean, well-documented code."},
58+
{"role": "user", "content": prompt}
59+
],
60+
max_tokens=2000,
61+
temperature=0.7,
62+
)
63+
return response.choices[0].message.content.strip()
64+
except (APIConnectionError, RateLimitError, APIStatusError) as e:
65+
last_error = e
66+
67+
raise Exception(f"Failed to generate after retries: {last_error}")
68+
69+
import os
70+
def debug_connection(token: str, endpoint: str = DEFAULT_ENDPOINT) -> None:
71+
"""Deep connectivity diagnostics with extensive logging."""
72+
logging.info("Starting connection diagnostics")
73+
logging.info("Endpoint: %s", endpoint)
74+
logging.info("Env HTTPS_PROXY=%s", os.environ.get("HTTPS_PROXY"))
75+
logging.info("Env HTTP_PROXY=%s", os.environ.get("HTTP_PROXY"))
76+
logging.info("Env SSL_CERT_FILE=%s", os.environ.get("SSL_CERT_FILE"))
77+
logging.info("Env REQUESTS_CA_BUNDLE=%s", os.environ.get("REQUESTS_CA_BUNDLE"))
78+
79+
# CA bundle path (certifi if available)
80+
ca_path = None
81+
try:
82+
import certifi # type: ignore
83+
ca_path = certifi.where()
84+
logging.info("certifi.where() => %s", ca_path)
85+
except Exception as e:
86+
logging.warning("certifi not available or failed: %s", repr(e))
87+
88+
parsed = urlparse(endpoint)
89+
host = parsed.hostname or ""
90+
port = parsed.port or 443
91+
path = (parsed.path.rstrip("/") or "") + "/v1/models"
92+
logging.info("Resolved request target host=%s port=%d path=%s", host, port, path)
93+
94+
# DNS resolution
95+
try:
96+
addrs = socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)
97+
unique_ips = sorted({ai[4][0] for ai in addrs})
98+
logging.info("DNS A/AAAA results: %s", ", ".join(unique_ips))
99+
except Exception as e:
100+
logging.error("DNS resolution failed: %s", repr(e))
101+
102+
# TLS handshake and simple GET using stdlib
103+
try:
104+
context = ssl.create_default_context()
105+
if ca_path:
106+
context.load_verify_locations(cafile=ca_path)
107+
with socket.create_connection((host, port), timeout=10) as sock:
108+
with context.wrap_socket(sock, server_hostname=host) as ssock:
109+
logging.info("TLS handshake OK. Cipher=%s, TLSVersion=%s", ssock.cipher(), ssock.version())
110+
# Minimal HTTP/1.1 request
111+
req = (
112+
f"GET {path} HTTP/1.1\r\n"
113+
f"Host: {host}\r\n"
114+
f"Authorization: Bearer {token[:6]}...\r\n"
115+
f"User-Agent: debug-connection-cli\r\n"
116+
f"Connection: close\r\n\r\n"
117+
)
118+
ssock.sendall(req.encode("utf-8"))
119+
data = ssock.recv(4096)
120+
preview = data.decode("latin1", errors="replace")
121+
logging.info("Initial HTTP response bytes=%d", len(data))
122+
logging.debug("HTTP response preview:\n%s", preview[:1000])
123+
except ssl.SSLError as e:
124+
logging.error("TLS/SSL error: %s", repr(e))
125+
except Exception as e:
126+
logging.error("Socket/HTTP error: %s", repr(e))
127+
128+
129+
def parse_args() -> argparse.Namespace:
130+
parser = argparse.ArgumentParser(description="Generate code using GitHub Models")
131+
parser.add_argument("program_name", help="Name or description of the program to generate")
132+
parser.add_argument("language", help="Programming language for the generated code")
133+
parser.add_argument("--log-level", default="INFO", choices=["DEBUG","INFO","WARNING","ERROR","CRITICAL"], help="Logging level")
134+
parser.add_argument("--debug-connect", action="store_true", help="Run deep connection diagnostics before generation")
135+
return parser.parse_args()
136+
137+
138+
def main() -> None:
139+
args = parse_args()
140+
logging.basicConfig(
141+
level=getattr(logging, args.log_level.upper(), logging.INFO),
142+
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
143+
datefmt="%H:%M:%S",
144+
)
145+
token = load_token_from_secret()
146+
if args.debug_connect:
147+
debug_connection(token, DEFAULT_ENDPOINT)
148+
try:
149+
code = generate_code(args.program_name, args.language, token)
150+
print(code)
151+
except KeyboardInterrupt:
152+
print("\nOperation cancelled.")
153+
sys.exit(1)
154+
except Exception as e:
155+
print(f"Error: {e}")
156+
sys.exit(1)
157+
158+
159+
if __name__ == "__main__":
160+
main()
161+
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
openai>=1.0.0
2+
pathlib2>=2.3.7; python_version < "3.4"

0 commit comments

Comments
 (0)