Initial documentation structure

This commit is contained in:
2025-09-04 02:19:22 +00:00
commit 66d97011ab
18 changed files with 3114 additions and 0 deletions

View File

@@ -0,0 +1,141 @@
#!/usr/bin/env python3
"""
Close PR with Feedback - Reject a PR and help AI learn
"""
import sys
import json
import yaml
import requests
from datetime import datetime
from typing import List
sys.path.append('/home/netops/orchestrator')
from gitea_integration import GiteaIntegration
from pr_feedback import PRFeedbackSystem
def close_pr_with_feedback(pr_number: int, reason: str, issues: List[str]):
"""Close a PR and record feedback for AI learning"""
# Load config
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
config = yaml.safe_load(f)
# Initialize systems
gitea = GiteaIntegration(config['gitea'])
feedback_system = PRFeedbackSystem()
print(f"\n🚫 Closing PR #{pr_number} with feedback...")
# First, add a comment to the PR explaining why it's being closed
comment = f"""## 🤖 AI Configuration Review - Rejected
**Reason**: {reason}
**Issues Found**:
{chr(10).join(f'- {issue}' for issue in issues)}
This feedback has been recorded to improve future AI suggestions. The AI will learn from these issues and avoid them in future configurations.
### Specific Problems:
- **Security**: The any/any/any permit rule is too permissive
- **Best Practice**: Source addresses should be specific, not 'any'
- **Risk**: This configuration could expose the network to threats
The AI will generate better suggestions next time based on this feedback.
"""
# Post comment to PR (using Gitea API)
api_url = f"{config['gitea']['url']}/api/v1/repos/{config['gitea']['repo']}/issues/{pr_number}/comments"
headers = {
'Authorization': f"token {config['gitea']['token']}",
'Content-Type': 'application/json'
}
comment_data = {"body": comment}
try:
response = requests.post(api_url, json=comment_data, headers=headers)
if response.status_code in [200, 201]:
print("✅ Added feedback comment to PR")
else:
print(f"⚠️ Could not add comment: {response.status_code}")
except Exception as e:
print(f"⚠️ Error adding comment: {e}")
# Record feedback for AI learning
feedback_details = {
'reason': reason,
'specific_issues': '\n'.join(issues),
'configuration_issues': [
{'type': 'security_permissive', 'description': 'Rules too permissive (any/any/any)'},
{'type': 'security_missing', 'description': 'Missing source address restrictions'}
]
}
feedback_system.record_pr_feedback(pr_number, 'rejected', feedback_details)
# Update orchestrator state
state_file = '/var/lib/orchestrator/state.json'
try:
with open(state_file, 'r') as f:
state = json.load(f)
state['pending_pr'] = None
state['last_pr_status'] = 'rejected'
state['last_pr_rejected'] = datetime.now().isoformat()
with open(state_file, 'w') as f:
json.dump(state, f, indent=2)
print("✅ Updated orchestrator state")
except Exception as e:
print(f"⚠️ Could not update state: {e}")
# Show AI learning summary
patterns = feedback_system.analyze_feedback_patterns()
print(f"\n📊 AI Learning Summary:")
print(f"Total feedback entries: {patterns['total_prs']}")
print(f"Rejected PRs: {patterns['rejected']}")
print(f"Security concerns: {patterns['security_concerns']}")
print("\n✅ PR closed with feedback. The AI will learn from this!")
print("\nNext time the AI generates a configuration, it will:")
print("- Avoid any/any/any permit rules")
print("- Use specific source addresses")
print("- Follow security best practices")
print("\n⚠️ IMPORTANT: Now manually close the PR in Gitea!")
print(f"Go to: {config['gitea']['url']}/{config['gitea']['repo']}/pulls/{pr_number}")
print("Click the 'Close Pull Request' button")
# Quick reject function for current PR
def reject_current_pr():
"""Reject PR #2 with specific feedback"""
close_pr_with_feedback(
pr_number=2,
reason="Security policy too permissive - any/any/any permit rule is dangerous",
issues=[
"ALLOW-ESTABLISHED policy permits all traffic from trust to untrust",
"Source address should not be 'any' - use specific networks",
"Application should not be 'any' - specify required services only",
"This configuration could expose internal network to threats"
]
)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--current":
# Reject the current PR #2
reject_current_pr()
else:
# Interactive mode
pr_num = input("Enter PR number to reject: ")
reason = input("Reason for rejection: ")
issues = []
print("Enter specific issues (empty line to finish):")
while True:
issue = input("- ")
if not issue:
break
issues.append(issue)
close_pr_with_feedback(int(pr_num), reason, issues)

View File

@@ -0,0 +1,377 @@
#!/usr/bin/env python3
"""
Gitea PR Creation and Feedback Handler
Creates real PRs in Gitea and handles rejection feedback
"""
import os
import sys
import json
import yaml
import requests
from datetime import datetime
from pathlib import Path
import subprocess
class GiteaPRManager:
def __init__(self, config_path='/home/netops/orchestrator/config.yaml'):
"""Initialize with Gitea configuration"""
# Load config
with open(config_path, 'r') as f:
self.config = yaml.safe_load(f)
self.gitea_config = self.config.get('gitea', {})
self.base_url = self.gitea_config.get('url', 'http://localhost:3000')
self.token = self.gitea_config.get('token', '')
self.repo_owner = self.gitea_config.get('owner', 'netops')
self.repo_name = self.gitea_config.get('repo', 'srx-config')
self.headers = {
'Authorization': f'token {self.token}',
'Content-Type': 'application/json'
}
self.pending_prs_dir = Path('/shared/ai-gitops/pending_prs')
self.feedback_dir = Path('/shared/ai-gitops/feedback')
def create_pr_from_ai_suggestions(self, pr_file=None):
"""Create a PR in Gitea from AI suggestions"""
print("\n" + "="*60)
print("Creating Gitea PR from AI Suggestions")
print("="*60)
# Find latest PR file if not specified
if pr_file is None:
pr_files = sorted(self.pending_prs_dir.glob('pr_*.json'),
key=lambda x: x.stat().st_mtime, reverse=True)
if not pr_files:
print("❌ No pending PR files found")
return None
pr_file = pr_files[0]
print(f"📄 Using PR file: {pr_file.name}")
# Load PR data
with open(pr_file, 'r') as f:
pr_data = json.load(f)
# Create a new branch
branch_name = f"ai-suggestions-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
# Create the configuration file content
config_content = f"""# AI-Generated Network Configuration
# Generated: {pr_data.get('timestamp', datetime.now().isoformat())}
# Model: {pr_data.get('model', 'llama2:13b')}
# Feedback Aware: {pr_data.get('feedback_aware', False)}
{pr_data.get('suggestions', '')}
"""
# Create branch and file via Gitea API
try:
# First, get the default branch SHA
repo_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}"
repo_response = requests.get(repo_url, headers=self.headers)
if repo_response.status_code != 200:
print(f"❌ Failed to get repo info: {repo_response.status_code}")
print(f" Response: {repo_response.text}")
return None
default_branch = repo_response.json().get('default_branch', 'main')
# Get the SHA of the default branch
branch_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/branches/{default_branch}"
branch_response = requests.get(branch_url, headers=self.headers)
if branch_response.status_code != 200:
print(f"❌ Failed to get branch info: {branch_response.status_code}")
return None
base_sha = branch_response.json()['commit']['id']
# Create new branch
create_branch_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/branches"
branch_data = {
'new_branch_name': branch_name,
'old_branch_name': default_branch
}
branch_create = requests.post(create_branch_url,
headers=self.headers,
json=branch_data)
if branch_create.status_code not in [201, 200]:
print(f"❌ Failed to create branch: {branch_create.status_code}")
print(f" Response: {branch_create.text}")
return None
print(f"✅ Created branch: {branch_name}")
# Create or update file in the new branch
file_path = f"ai-suggestions/config_{datetime.now().strftime('%Y%m%d_%H%M%S')}.conf"
file_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/contents/{file_path}"
import base64
file_data = {
'branch': branch_name,
'content': base64.b64encode(config_content.encode()).decode(),
'message': f"AI suggestions: {pr_data.get('title', 'Network optimization')}"
}
file_response = requests.post(file_url, headers=self.headers, json=file_data)
if file_response.status_code not in [201, 200]:
print(f"⚠️ Could not create file via API, trying alternative method")
else:
print(f"✅ Created config file: {file_path}")
# Create Pull Request
pr_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/pulls"
pr_body = f"""## AI-Generated Network Configuration
### Analysis Context
- **Zones Analyzed**: {', '.join(pr_data.get('network_context', {}).get('zones', []))}
- **Policies Reviewed**: {pr_data.get('network_context', {}).get('policies', 0)}
- **Feedback Aware**: {pr_data.get('feedback_aware', False)}
### Suggested Changes
```junos
{pr_data.get('suggestions', '')}
```
### Review Checklist
- [ ] No any/any/any rules
- [ ] Logging enabled on all policies
- [ ] Proper zone segmentation
- [ ] Address-sets used instead of individual IPs
- [ ] Applications are specific (not "any")
### How to Test
1. Apply to lab SRX first
2. Verify traffic flow
3. Check logs for any issues
4. Apply to production if tests pass
---
*This PR was automatically generated by the AI Network Automation system*
"""
pr_request = {
'title': pr_data.get('title', 'AI Network Configuration Suggestions'),
'head': branch_name,
'base': default_branch,
'body': pr_body
}
pr_response = requests.post(pr_url, headers=self.headers, json=pr_request)
if pr_response.status_code == 201:
pr_info = pr_response.json()
pr_number = pr_info['number']
pr_html_url = pr_info['html_url']
print(f"\n✅ Pull Request created successfully!")
print(f" PR Number: #{pr_number}")
print(f" URL: {pr_html_url}")
# Save PR info for tracking
pr_tracking = {
'pr_number': pr_number,
'pr_url': pr_html_url,
'branch': branch_name,
'created_at': datetime.now().isoformat(),
'ai_request_id': pr_data.get('request_id'),
'suggestions_file': str(pr_file)
}
tracking_file = self.pending_prs_dir / f"gitea_pr_{pr_number}.json"
with open(tracking_file, 'w') as f:
json.dump(pr_tracking, f, indent=2)
return pr_number
else:
print(f"❌ Failed to create PR: {pr_response.status_code}")
print(f" Response: {pr_response.text}")
return None
except Exception as e:
print(f"❌ Error creating PR: {e}")
return None
def reject_pr_with_feedback(self, pr_number, feedback_message):
"""Reject a PR and save feedback for AI learning"""
print("\n" + "="*60)
print(f"Rejecting PR #{pr_number} with Feedback")
print("="*60)
# Close the PR via API
pr_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/pulls/{pr_number}"
# Add comment with feedback
comment_url = f"{pr_url}/reviews"
comment_data = {
'body': feedback_message,
'event': 'REJECT' # or 'REQUEST_CHANGES'
}
comment_response = requests.post(comment_url, headers=self.headers, json=comment_data)
if comment_response.status_code not in [200, 201]:
# Try alternative: just add a comment
issue_comment_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/issues/{pr_number}/comments"
comment_data = {
'body': f"❌ **REJECTED**\n\n{feedback_message}"
}
requests.post(issue_comment_url, headers=self.headers, json=comment_data)
# Close the PR
close_data = {
'state': 'closed'
}
close_response = requests.patch(pr_url, headers=self.headers, json=close_data)
if close_response.status_code == 200:
print(f"✅ PR #{pr_number} closed")
else:
print(f"⚠️ Could not close PR via API")
# Save feedback for AI learning
feedback_entry = {
'pr_number': pr_number,
'timestamp': datetime.now().isoformat(),
'feedback_type': 'rejected',
'reviewer': 'security_team',
'details': {
'reason': feedback_message,
'specific_issues': self.parse_feedback_for_issues(feedback_message)
}
}
# Load and update feedback history
feedback_file = self.feedback_dir / 'pr_feedback_history.json'
self.feedback_dir.mkdir(parents=True, exist_ok=True)
if feedback_file.exists():
with open(feedback_file, 'r') as f:
history = json.load(f)
else:
history = []
history.append(feedback_entry)
with open(feedback_file, 'w') as f:
json.dump(history, f, indent=2)
print(f"✅ Feedback saved for AI learning")
print(f" Total feedback entries: {len(history)}")
return feedback_entry
def parse_feedback_for_issues(self, feedback_text):
"""Parse feedback text to extract specific issues"""
issues = []
# Common security issues to look for
patterns = [
('any/any/any', 'Never use any/any/any rules'),
('no logging', 'Always enable logging'),
('source-address any', 'Avoid using source-address any'),
('destination-address any', 'Avoid using destination-address any'),
('application any', 'Specify applications instead of any'),
('overly permissive', 'Rules are too permissive'),
('zone segmentation', 'Improper zone segmentation'),
('iot', 'IoT security concerns')
]
feedback_lower = feedback_text.lower()
for pattern, description in patterns:
if pattern in feedback_lower:
issues.append({
'pattern': pattern,
'description': description,
'type': 'security'
})
return issues if issues else feedback_text
def main():
"""Main entry point for testing"""
print("\n" + "="*60)
print(" GITEA PR FEEDBACK TESTING")
print("="*60)
manager = GiteaPRManager()
print("\nOptions:")
print("1. Create a new PR from latest AI suggestions")
print("2. Reject a PR with feedback")
print("3. Run complete test cycle")
choice = input("\nSelect option (1-3): ")
if choice == '1':
pr_number = manager.create_pr_from_ai_suggestions()
if pr_number:
print(f"\n✅ Successfully created PR #{pr_number}")
print("\nYou can now:")
print(f"1. Review it in Gitea")
print(f"2. Reject it with: python3 gitea_pr_feedback.py")
elif choice == '2':
pr_number = input("Enter PR number to reject: ")
print("\nEnter rejection feedback (press Ctrl+D when done):")
feedback_lines = []
try:
while True:
feedback_lines.append(input())
except EOFError:
pass
feedback = '\n'.join(feedback_lines)
if not feedback:
feedback = """This configuration has security issues:
1. Any/any/any rules detected - this violates zero-trust principles
2. No logging enabled on some policies
3. Overly permissive access between zones
Please revise to:
- Use specific address-sets
- Enable logging on all policies
- Implement proper zone segmentation"""
manager.reject_pr_with_feedback(pr_number, feedback)
elif choice == '3':
print("\n📋 Complete test cycle:")
print("1. Creating PR from AI suggestions...")
pr_number = manager.create_pr_from_ai_suggestions()
if pr_number:
print(f"\n2. Waiting for review...")
input(" Press Enter to simulate rejection...")
feedback = """Security Review Failed:
❌ Critical Issues Found:
- Any/any/any rule in policy ALLOW-ALL
- No logging on DMZ policies
- IoT zone has unrestricted access to HOME zone
Requirements:
- All policies must use specific addresses
- Logging must be enabled
- IoT devices need strict access control
"""
print("\n3. Rejecting PR with feedback...")
manager.reject_pr_with_feedback(pr_number, feedback)
print("\n4. AI will learn from this feedback in next run")
print(" Run: python3 run_pipeline.py --skip-netflow")
print(" The AI should avoid these mistakes next time!")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,316 @@
#!/usr/bin/env python3
"""
Gitea Webhook Listener - Automatically captures PR approvals/rejections
Runs on orchestrator VM to capture feedback in real-time
"""
from flask import Flask, request, jsonify
import json
import logging
import subprocess
from datetime import datetime
from pathlib import Path
from dotenv import load_dotenv
import os
import hmac
import hashlib
# Load environment variables from home directory
env_path = Path.home() / '.env'
load_dotenv(env_path)
from flask import Flask, request, jsonify
# This loads from .env file
WEBHOOK_SECRET = os.environ.get('WEBHOOK_SECRET', '')
app = Flask(__name__)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('/var/log/webhook-listener.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# Configuration
FEEDBACK_FILE = "/shared/ai-gitops/feedback/pr_feedback_history.json"
LEARNING_FILE = "/shared/ai-gitops/learning/patterns.json"
def ensure_directories():
"""Ensure required directories exist"""
Path(FEEDBACK_FILE).parent.mkdir(parents=True, exist_ok=True)
Path(LEARNING_FILE).parent.mkdir(parents=True, exist_ok=True)
# Ensure deployment log directory exists
Path('/var/log/orchestrator').mkdir(parents=True, exist_ok=True)
def load_feedback_history():
"""Load existing feedback history"""
if Path(FEEDBACK_FILE).exists():
try:
with open(FEEDBACK_FILE, 'r') as f:
return json.load(f)
except:
return []
return []
def save_feedback_history(feedback):
"""Save updated feedback history"""
with open(FEEDBACK_FILE, 'w') as f:
json.dump(feedback, f, indent=2)
logger.info(f"Saved feedback history with {len(feedback)} entries")
def load_learning_patterns():
"""Load learning patterns"""
if Path(LEARNING_FILE).exists():
try:
with open(LEARNING_FILE, 'r') as f:
return json.load(f)
except:
pass
return {"avoid_patterns": [], "successful_patterns": []}
def save_learning_patterns(patterns):
"""Save learning patterns"""
with open(LEARNING_FILE, 'w') as f:
json.dump(patterns, f, indent=2)
logger.info("Updated learning patterns")
def extract_config_changes(pr_body):
"""Extract SRX config commands from PR body"""
if not pr_body:
return []
configs = []
lines = pr_body.split('\n')
in_code_block = False
for line in lines:
line = line.strip()
if line.startswith('```'):
in_code_block = not in_code_block
elif in_code_block and line.startswith('set '):
configs.append(line)
elif not in_code_block and line.startswith('set '):
configs.append(line)
return configs
def update_learning(feedback_entry):
"""Update AI learning patterns based on feedback"""
patterns = load_learning_patterns()
if feedback_entry["status"] == "rejected":
# Add rejected patterns
for config in feedback_entry.get("config_changes", []):
if config not in patterns["avoid_patterns"]:
patterns["avoid_patterns"].append(config)
# Mark common rejection reasons
reason = feedback_entry.get("reason", "").lower()
if "any any any" in reason or "any/any/any" in reason:
patterns["avoid_patterns"].append("any-any-any-pattern")
if "redundant" in reason or "already configured" in reason:
patterns["avoid_patterns"].append("redundant-config")
if "too broad" in reason or "overly permissive" in reason:
patterns["avoid_patterns"].append("overly-permissive")
elif feedback_entry["status"] == "approved":
# Track successful patterns
for config in feedback_entry.get("config_changes", []):
if config not in patterns["successful_patterns"]:
patterns["successful_patterns"].append(config)
save_learning_patterns(patterns)
logger.info(f"Learning updated: {len(patterns['avoid_patterns'])} patterns to avoid")
@app.route('/webhook', methods=['POST'])
def handle_webhook():
"""Main webhook handler for Gitea PR events"""
try:
# Verify webhook signature for security
if WEBHOOK_SECRET:
signature = request.headers.get('X-Gitea-Signature', '')
if not signature:
logger.warning("No signature provided in webhook request")
return jsonify({"error": "No signature"}), 403
# Calculate expected signature
expected = 'sha256=' + hmac.new(
WEBHOOK_SECRET.encode(),
request.data,
hashlib.sha256
).hexdigest()
# Compare signatures
if not hmac.compare_digest(signature, expected):
logger.warning(f"Invalid signature from {request.remote_addr}")
return jsonify({"error": "Invalid signature"}), 403
logger.debug("Webhook signature verified successfully")
# Get event data
data = request.json
event = request.headers.get('X-Gitea-Event', '')
logger.info(f"Received event: {event}")
if event != "pull_request":
return jsonify({"status": "ignored", "reason": "Not a PR event"}), 200
action = data.get('action', '')
pr = data.get('pull_request', {})
# Check if this is an AI-generated PR
pr_title = pr.get('title', '')
if 'AI-Generated' not in pr_title and 'Network Configuration Update' not in pr_title:
logger.info(f"Ignoring non-AI PR: {pr_title}")
return jsonify({"status": "ignored", "reason": "Not AI-generated"}), 200
# Process closed PRs (either merged or rejected)
if action == "closed":
pr_number = pr.get('number', 0)
pr_body = pr.get('body', '')
merged = pr.get('merged', False)
# Extract config changes from PR body
config_changes = extract_config_changes(pr_body)
# Create feedback entry
feedback_entry = {
"timestamp": datetime.now().isoformat(),
"pr_number": pr_number,
"pr_title": pr_title,
"status": "approved" if merged else "rejected",
"config_changes": config_changes,
"merged": merged
}
# For rejected PRs, try to extract reason from PR comments or description
if not merged:
feedback_entry["feedback_type"] = "rejected" # For compatibility
# Look for common rejection patterns in title or last comment
if "any" in str(config_changes).lower():
feedback_entry["reason"] = "Contains any/any/any patterns"
else:
feedback_entry["reason"] = "Changes not needed or incorrect"
logger.info(f"❌ PR #{pr_number} REJECTED - {pr_title}")
else:
feedback_entry["feedback_type"] = "approved" # For compatibility
logger.info(f"✅ PR #{pr_number} APPROVED - {pr_title}")
# Save feedback
feedback = load_feedback_history()
feedback.append(feedback_entry)
save_feedback_history(feedback)
# Update learning patterns
update_learning(feedback_entry)
# AUTO-DEPLOYMENT CODE - If PR was merged, trigger deployment
if merged:
logger.info(f"PR #{pr_number} was merged - triggering auto-deployment")
try:
result = subprocess.run(
[
'/home/netops/orchestrator/venv/bin/python',
'/home/netops/orchestrator/deploy_approved.py'
],
capture_output=True,
text=True,
timeout=300
)
if result.returncode == 0:
logger.info(f"✅ Successfully auto-deployed PR #{pr_number}")
# Log deployment
with open('/var/log/orchestrator/deployments.log', 'a') as f:
f.write(f"{datetime.now().isoformat()} - Auto-deployed PR #{pr_number}\n")
else:
logger.error(f"❌ Auto-deployment failed: {result.stderr}")
except subprocess.TimeoutExpired:
logger.error("Deployment timed out after 5 minutes")
except Exception as e:
logger.error(f"Deployment error: {e}")
return jsonify({
"status": "recorded",
"pr_number": pr_number,
"decision": feedback_entry["status"],
"configs_captured": len(config_changes),
"deployed": merged # Indicate if deployment was triggered
}), 200
return jsonify({"status": "ignored", "reason": f"Action {action} not processed"}), 200
except Exception as e:
logger.error(f"Error processing webhook: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/health', methods=['GET'])
def health_check():
"""Health check endpoint"""
return jsonify({
"status": "healthy",
"service": "webhook-listener",
"feedback_file": str(Path(FEEDBACK_FILE).exists()),
"learning_file": str(Path(LEARNING_FILE).exists())
}), 200
@app.route('/stats', methods=['GET'])
def get_stats():
"""Get feedback statistics"""
try:
feedback = load_feedback_history()
patterns = load_learning_patterns()
approved = len([f for f in feedback if f.get("status") == "approved"])
rejected = len([f for f in feedback if f.get("status") == "rejected"])
return jsonify({
"total_prs": len(feedback),
"approved": approved,
"rejected": rejected,
"approval_rate": f"{(approved/len(feedback)*100):.1f}%" if feedback else "0%",
"patterns_to_avoid": len(patterns.get("avoid_patterns", [])),
"successful_patterns": len(patterns.get("successful_patterns", [])),
"last_feedback": feedback[-1]["timestamp"] if feedback else None
}), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/feedback/recent', methods=['GET'])
def recent_feedback():
"""Get recent feedback entries"""
try:
feedback = load_feedback_history()
recent = feedback[-5:] if len(feedback) > 5 else feedback
recent.reverse() # Newest first
return jsonify(recent), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/learning/patterns', methods=['GET'])
def get_patterns():
"""Get current learning patterns"""
try:
patterns = load_learning_patterns()
return jsonify(patterns), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
if __name__ == "__main__":
# Ensure directories exist
ensure_directories()
logger.info("Starting Gitea webhook listener...")
logger.info(f"Feedback file: {FEEDBACK_FILE}")
logger.info(f"Learning file: {LEARNING_FILE}")
# Run Flask app
app.run(host='0.0.0.0', port=5000, debug=False)