Auto-sync from all VMs - 2025-09-04

This commit is contained in:
2025-09-04 19:34:26 +00:00
parent 7f1561371f
commit d2fed2cc03
37 changed files with 6974 additions and 26 deletions

View File

@@ -1,27 +1,17 @@
# Current System State
Last Updated: Thu Sep 4 07:34:25 PM UTC 2025
## Active Services
- orchestrator.service - Running 11+ days
- gitea-webhook.service - Running 11+ days
- ai-processor.service - Running 19+ days
- ollama.service - Running 19+ days
## Service Status
### Orchestrator
Active: active (running) since Thu 2025-09-04 01:51:27 UTC; 17h ago
## Recent Activity
- Last analysis: September 4, 2025
- PRs created: 14+
- Success rate: 100% (after learning)
- Feedback iterations: 8
### AI Processor
Active: active (running) since Thu 2025-09-04 03:48:00 UTC; 15h ago
## Learning Status
The AI has learned to avoid:
- any/any/any rules
- Missing logging statements
- Trailing braces
- Generic configurations
### Elasticsearch
Active: active (running) since Sat 2025-08-16 03:22:48 UTC; 2 weeks 5 days ago
## Performance Metrics
- Daily flows processed: 850,000+
- Analysis frequency: Every 60 minutes
- AI response time: ~82 seconds
- PR creation time: <2 minutes
- Deployment time: <30 seconds
## System Metrics
- Elasticsearch Docs: 14252227
- AI Responses: 518
- Uptime: 19:34:26 up 20 days, 15:10, 2 users, load average: 0.08, 0.02, 0.01

View File

@@ -0,0 +1,5 @@
● elasticsearch.service - Elasticsearch
Loaded: loaded (/usr/lib/systemd/system/elasticsearch.service; enabled; preset: enabled)
Active: active (running) since Sat 2025-08-16 03:22:48 UTC; 2 weeks 5 days ago
Docs: https://www.elastic.co
Main PID: 5196 (java)

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env python3
"""Quick check of AI processor status via shared files"""
import os
from datetime import datetime, timedelta
# Check for recent activity
request_dir = '/shared/ai-gitops/requests'
response_dir = '/shared/ai-gitops/responses'
# Count files
try:
requests = len(os.listdir(request_dir)) if os.path.exists(request_dir) else 0
responses = len(os.listdir(response_dir)) if os.path.exists(response_dir) else 0
# Check for recent files (last hour)
recent_responses = 0
if os.path.exists(response_dir):
now = datetime.now()
for f in os.listdir(response_dir):
fpath = os.path.join(response_dir, f)
if os.path.isfile(fpath):
mtime = datetime.fromtimestamp(os.path.getmtime(fpath))
if now - mtime < timedelta(hours=1):
recent_responses += 1
print(f"Requests waiting: {requests}")
print(f"Total responses: {responses}")
print(f"Recent responses (last hour): {recent_responses}")
if requests == 0 and recent_responses > 0:
print("Status: ✅ AI Processor is active and processing")
elif requests > 0:
print("Status: ⏳ Requests pending processing")
else:
print("Status: 💤 Idle (no recent activity)")
except Exception as e:
print(f"Error checking status: {e}")

View File

@@ -0,0 +1,112 @@
#!/usr/bin/env python3
"""Check Gitea token permissions using config file"""
import requests
import json
import yaml
# Load configuration from config.yaml
print("Loading configuration from config.yaml...")
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
config = yaml.safe_load(f)
# Get Gitea configuration
GITEA_URL = config['gitea']['url']
TOKEN = config['gitea']['token']
REPO = config['gitea']['repo']
headers = {
'Authorization': f'token {TOKEN}',
'Content-Type': 'application/json'
}
print(f"Checking Gitea token permissions...")
print(f"URL: {GITEA_URL}")
print(f"Repo: {REPO}")
print(f"Token: {TOKEN[:10]}..." + "*" * (len(TOKEN) - 10))
print()
# 1. Check if token is valid
print("1. Testing token validity...")
try:
resp = requests.get(f"{GITEA_URL}/api/v1/user", headers=headers)
if resp.status_code == 200:
user_data = resp.json()
print(f"✅ Token is valid for user: {user_data.get('username', 'Unknown')}")
print(f" Email: {user_data.get('email', 'Unknown')}")
print(f" Admin: {user_data.get('is_admin', False)}")
else:
print(f"❌ Token validation failed: {resp.status_code}")
print(f" Response: {resp.text}")
except Exception as e:
print(f"❌ Error checking token: {e}")
# 2. Check repository access
print(f"\n2. Checking repository access for {REPO}...")
try:
resp = requests.get(f"{GITEA_URL}/api/v1/repos/{REPO}", headers=headers)
if resp.status_code == 200:
repo_data = resp.json()
print(f"✅ Can access repository: {repo_data['full_name']}")
# Check specific permissions
permissions = repo_data.get('permissions', {})
print(f" Admin: {permissions.get('admin', False)}")
print(f" Push: {permissions.get('push', False)}")
print(f" Pull: {permissions.get('pull', False)}")
if not permissions.get('push', False):
print("\n⚠️ WARNING: Token does not have push permission!")
print(" You need push permission to create branches and PRs")
else:
print(f"❌ Cannot access repository: {resp.status_code}")
print(f" Response: {resp.text}")
except Exception as e:
print(f"❌ Error checking repository: {e}")
# 3. Check if we can list branches
print(f"\n3. Testing branch operations...")
try:
resp = requests.get(f"{GITEA_URL}/api/v1/repos/{REPO}/branches", headers=headers)
if resp.status_code == 200:
branches = resp.json()
print(f"✅ Can list branches (found {len(branches)} branches)")
for branch in branches[:3]: # Show first 3
print(f" - {branch['name']}")
else:
print(f"❌ Cannot list branches: {resp.status_code}")
except Exception as e:
print(f"❌ Error listing branches: {e}")
# 4. Check if we can create pull requests
print(f"\n4. Checking pull request permissions...")
try:
resp = requests.get(f"{GITEA_URL}/api/v1/repos/{REPO}/pulls", headers=headers)
if resp.status_code == 200:
prs = resp.json()
print(f"✅ Can access pull requests (found {len(prs)} open PRs)")
else:
print(f"❌ Cannot access pull requests: {resp.status_code}")
except Exception as e:
print(f"❌ Error checking pull requests: {e}")
# 5. Test git clone with new token
print(f"\n5. Testing git clone with token...")
import subprocess
try:
test_url = f"https://oauth2:{TOKEN}@{GITEA_URL.replace('https://', '')}/{REPO}.git"
result = subprocess.run(
['git', 'ls-remote', test_url, 'HEAD'],
capture_output=True,
text=True
)
if result.returncode == 0:
print("✅ Git authentication successful!")
else:
print(f"❌ Git authentication failed: {result.stderr}")
except Exception as e:
print(f"❌ Error testing git: {e}")
print("\n" + "="*50)
print("Summary:")
print("Token is working correctly if all tests show ✅")
print("="*50)

View File

@@ -0,0 +1,141 @@
#!/usr/bin/env python3
"""
Close PR with Feedback - Reject a PR and help AI learn
"""
import sys
import json
import yaml
import requests
from datetime import datetime
from typing import List
sys.path.append('/home/netops/orchestrator')
from gitea_integration import GiteaIntegration
from pr_feedback import PRFeedbackSystem
def close_pr_with_feedback(pr_number: int, reason: str, issues: List[str]):
"""Close a PR and record feedback for AI learning"""
# Load config
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
config = yaml.safe_load(f)
# Initialize systems
gitea = GiteaIntegration(config['gitea'])
feedback_system = PRFeedbackSystem()
print(f"\n🚫 Closing PR #{pr_number} with feedback...")
# First, add a comment to the PR explaining why it's being closed
comment = f"""## 🤖 AI Configuration Review - Rejected
**Reason**: {reason}
**Issues Found**:
{chr(10).join(f'- {issue}' for issue in issues)}
This feedback has been recorded to improve future AI suggestions. The AI will learn from these issues and avoid them in future configurations.
### Specific Problems:
- **Security**: The any/any/any permit rule is too permissive
- **Best Practice**: Source addresses should be specific, not 'any'
- **Risk**: This configuration could expose the network to threats
The AI will generate better suggestions next time based on this feedback.
"""
# Post comment to PR (using Gitea API)
api_url = f"{config['gitea']['url']}/api/v1/repos/{config['gitea']['repo']}/issues/{pr_number}/comments"
headers = {
'Authorization': f"token {config['gitea']['token']}",
'Content-Type': 'application/json'
}
comment_data = {"body": comment}
try:
response = requests.post(api_url, json=comment_data, headers=headers)
if response.status_code in [200, 201]:
print("✅ Added feedback comment to PR")
else:
print(f"⚠️ Could not add comment: {response.status_code}")
except Exception as e:
print(f"⚠️ Error adding comment: {e}")
# Record feedback for AI learning
feedback_details = {
'reason': reason,
'specific_issues': '\n'.join(issues),
'configuration_issues': [
{'type': 'security_permissive', 'description': 'Rules too permissive (any/any/any)'},
{'type': 'security_missing', 'description': 'Missing source address restrictions'}
]
}
feedback_system.record_pr_feedback(pr_number, 'rejected', feedback_details)
# Update orchestrator state
state_file = '/var/lib/orchestrator/state.json'
try:
with open(state_file, 'r') as f:
state = json.load(f)
state['pending_pr'] = None
state['last_pr_status'] = 'rejected'
state['last_pr_rejected'] = datetime.now().isoformat()
with open(state_file, 'w') as f:
json.dump(state, f, indent=2)
print("✅ Updated orchestrator state")
except Exception as e:
print(f"⚠️ Could not update state: {e}")
# Show AI learning summary
patterns = feedback_system.analyze_feedback_patterns()
print(f"\n📊 AI Learning Summary:")
print(f"Total feedback entries: {patterns['total_prs']}")
print(f"Rejected PRs: {patterns['rejected']}")
print(f"Security concerns: {patterns['security_concerns']}")
print("\n✅ PR closed with feedback. The AI will learn from this!")
print("\nNext time the AI generates a configuration, it will:")
print("- Avoid any/any/any permit rules")
print("- Use specific source addresses")
print("- Follow security best practices")
print("\n⚠️ IMPORTANT: Now manually close the PR in Gitea!")
print(f"Go to: {config['gitea']['url']}/{config['gitea']['repo']}/pulls/{pr_number}")
print("Click the 'Close Pull Request' button")
# Quick reject function for current PR
def reject_current_pr():
"""Reject PR #2 with specific feedback"""
close_pr_with_feedback(
pr_number=2,
reason="Security policy too permissive - any/any/any permit rule is dangerous",
issues=[
"ALLOW-ESTABLISHED policy permits all traffic from trust to untrust",
"Source address should not be 'any' - use specific networks",
"Application should not be 'any' - specify required services only",
"This configuration could expose internal network to threats"
]
)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--current":
# Reject the current PR #2
reject_current_pr()
else:
# Interactive mode
pr_num = input("Enter PR number to reject: ")
reason = input("Reason for rejection: ")
issues = []
print("Enter specific issues (empty line to finish):")
while True:
issue = input("- ")
if not issue:
break
issues.append(issue)
close_pr_with_feedback(int(pr_num), reason, issues)

View File

@@ -0,0 +1,317 @@
#!/usr/bin/env python3
"""
SRX Configuration Collector
Pulls current configuration from SRX and stores it for AI analysis
"""
import os
import sys
import json
import yaml
import paramiko
from datetime import datetime
from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SRXConfigCollector:
def __init__(self, config_path='/home/netops/orchestrator/config.yaml'):
"""Initialize with orchestrator config"""
with open(config_path, 'r') as f:
self.config = yaml.safe_load(f)
self.srx_config = self.config['srx']
self.config_dir = Path('/shared/ai-gitops/configs')
self.config_dir.mkdir(parents=True, exist_ok=True)
def connect_to_srx(self):
"""Establish SSH connection to SRX"""
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connect using SSH key
client.connect(
hostname=self.srx_config['host'],
username=self.srx_config['username'],
key_filename=self.srx_config['ssh_key'],
port=22
)
logger.info(f"Connected to SRX at {self.srx_config['host']}")
return client
except Exception as e:
logger.error(f"Failed to connect: {e}")
return None
def get_full_config(self, client):
"""Get complete SRX configuration"""
logger.info("Fetching full SRX configuration...")
stdin, stdout, stderr = client.exec_command('show configuration | no-more')
config_output = stdout.read().decode('utf-8')
if config_output:
logger.info(f"Retrieved {len(config_output)} bytes of configuration")
return config_output
else:
logger.error("Failed to retrieve configuration")
return None
def get_security_config(self, client):
"""Get security-specific configuration"""
logger.info("Fetching security policies...")
commands = [
'show configuration security policies',
'show configuration security zones',
'show configuration security address-book',
'show configuration applications',
'show configuration security nat',
'show configuration interfaces'
]
security_config = {}
for cmd in commands:
stdin, stdout, stderr = client.exec_command(f'{cmd} | no-more')
output = stdout.read().decode('utf-8')
section = cmd.split()[-1] # Get last word as section name
security_config[section] = output
logger.info(f"Retrieved {section} configuration")
return security_config
def analyze_config(self, full_config, security_config):
"""Analyze configuration and extract key information - FIXED VERSION"""
analysis = {
'timestamp': datetime.now().isoformat(),
'zones': [],
'networks': {},
'policies': [],
'policy_count': 0,
'applications': [],
'interfaces': {},
'nat_rules': [],
'address_book': {}
}
# Extract zones - FIXED parsing for your format
if 'zones' in security_config:
zones_content = security_config['zones']
if zones_content:
lines = zones_content.split('\n')
for line in lines:
# Your format: "security-zone WAN {" or "security-zone HOME {"
if 'security-zone' in line and '{' in line:
# Extract zone name between 'security-zone' and '{'
parts = line.strip().split()
if len(parts) >= 2 and parts[0] == 'security-zone':
zone_name = parts[1]
if zone_name != '{': # Make sure it's not just the bracket
analysis['zones'].append(zone_name)
analysis['networks'][zone_name] = []
# Extract address-book entries from zones section
if 'zones' in security_config:
lines = security_config['zones'].split('\n')
current_zone = None
in_address_book = False
for line in lines:
line = line.strip()
# Track current zone
if 'security-zone' in line and '{' in line:
parts = line.split()
if len(parts) >= 2:
current_zone = parts[1]
in_address_book = False
# Check if we're in address-book section
elif 'address-book' in line and '{' in line:
in_address_book = True
# Parse addresses within address-book
elif in_address_book and 'address ' in line and current_zone:
# Format: "address GAMING-NETWORK 192.168.10.0/24;"
parts = line.split()
if len(parts) >= 3 and parts[0] == 'address':
addr_name = parts[1]
addr_value = parts[2].rstrip(';')
if '/' in addr_value or '.' in addr_value:
analysis['address_book'][addr_name] = addr_value
if current_zone in analysis['networks']:
analysis['networks'][current_zone].append(addr_value)
# Extract policies - FIXED for your format
if 'policies' in security_config:
policies_content = security_config['policies']
if policies_content:
lines = policies_content.split('\n')
from_zone = None
to_zone = None
current_policy = None
for line in lines:
line = line.strip()
# Format: "from-zone HOME to-zone WAN {"
if 'from-zone' in line and 'to-zone' in line:
parts = line.split()
if len(parts) >= 4:
from_idx = parts.index('from-zone') if 'from-zone' in parts else -1
to_idx = parts.index('to-zone') if 'to-zone' in parts else -1
if from_idx >= 0 and to_idx >= 0:
from_zone = parts[from_idx + 1] if from_idx + 1 < len(parts) else None
to_zone = parts[to_idx + 1] if to_idx + 1 < len(parts) else None
to_zone = to_zone.rstrip('{') if to_zone else None
# Format: "policy GAMING-VLAN-PRIORITY {"
elif 'policy ' in line and '{' in line and from_zone and to_zone:
parts = line.split()
if len(parts) >= 2 and parts[0] == 'policy':
policy_name = parts[1].rstrip('{')
analysis['policies'].append({
'name': policy_name,
'from_zone': from_zone,
'to_zone': to_zone
})
analysis['policy_count'] += 1
# Extract applications
if 'applications' in security_config:
apps_content = security_config['applications']
if apps_content:
lines = apps_content.split('\n')
for line in lines:
# Format: "application PS5-HTTP {"
if 'application ' in line and '{' in line:
parts = line.strip().split()
if len(parts) >= 2 and parts[0] == 'application':
app_name = parts[1].rstrip('{')
if app_name and app_name != 'application':
analysis['applications'].append(app_name)
# Extract interfaces with IPs
if 'interfaces' in security_config:
interfaces_content = security_config['interfaces']
if interfaces_content:
lines = interfaces_content.split('\n')
current_interface = None
for line in lines:
line = line.strip()
# Interface line (e.g., "ge-0/0/0 {" or "reth0 {")
if (line.startswith('ge-') or line.startswith('reth')) and '{' in line:
current_interface = line.split()[0]
analysis['interfaces'][current_interface] = {'addresses': []}
# IP address line (e.g., "address 192.168.1.1/24;")
elif current_interface and 'address ' in line and '/' in line:
parts = line.split()
for part in parts:
if '/' in part:
addr = part.rstrip(';')
analysis['interfaces'][current_interface]['addresses'].append(addr)
# Extract NAT rules
if 'nat' in security_config:
nat_content = security_config['nat']
if nat_content:
source_nat_count = nat_content.count('source pool')
dest_nat_count = nat_content.count('destination pool')
analysis['nat_rules'] = {
'source_nat': source_nat_count,
'destination_nat': dest_nat_count,
'total': source_nat_count + dest_nat_count
}
return analysis
def save_config(self, full_config, security_config, analysis):
"""Save configuration and analysis"""
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
# Save full config
full_config_path = self.config_dir / f'srx_config_{timestamp}.txt'
with open(full_config_path, 'w') as f:
f.write(full_config)
logger.info(f"Saved full config to {full_config_path}")
# Save latest symlink
latest_path = self.config_dir / 'srx_config_latest.txt'
if latest_path.exists():
latest_path.unlink()
latest_path.symlink_to(full_config_path.name)
# Save security config sections
security_config_path = self.config_dir / f'srx_security_config_{timestamp}.json'
with open(security_config_path, 'w') as f:
json.dump(security_config, f, indent=2)
# Save analysis
analysis_path = self.config_dir / f'srx_config_analysis_{timestamp}.json'
with open(analysis_path, 'w') as f:
json.dump(analysis, f, indent=2)
logger.info(f"Saved config analysis to {analysis_path}")
# Save latest analysis symlink
latest_analysis = self.config_dir / 'srx_config_analysis_latest.json'
if latest_analysis.exists():
latest_analysis.unlink()
latest_analysis.symlink_to(analysis_path.name)
return analysis
def collect(self):
"""Main collection process"""
logger.info("Starting SRX configuration collection...")
# Connect to SRX
client = self.connect_to_srx()
if not client:
return None
try:
# Get configurations
full_config = self.get_full_config(client)
security_config = self.get_security_config(client)
if full_config:
# Analyze configuration
analysis = self.analyze_config(full_config, security_config)
# Save everything
self.save_config(full_config, security_config, analysis)
# Print summary
print("\n📊 Configuration Summary:")
print(f"Zones: {', '.join(analysis['zones'])}")
print(f"Networks: {len([n for nets in analysis['networks'].values() for n in nets])} subnets across {len(analysis['zones'])} zones")
print(f"Policies: {analysis.get('policy_count', 0)} security policies")
print(f"Address Book: {len(analysis['address_book'])} entries")
print(f"Interfaces: {len(analysis['interfaces'])} configured")
return analysis
finally:
client.close()
logger.info("Disconnected from SRX")
def main():
collector = SRXConfigCollector()
analysis = collector.collect()
if analysis:
print("\n✅ Configuration collected successfully!")
print(f"Files saved in: /shared/ai-gitops/configs/")
else:
print("\n❌ Failed to collect configuration")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,160 @@
#!/usr/bin/env python3
"""
Create Gitea PR from AI suggestions using existing gitea_integration module
"""
import json
import yaml
from pathlib import Path
from datetime import datetime
from gitea_integration import GiteaIntegration
def get_latest_pr_file():
"""Find the most recent PR file from AI suggestions"""
pr_dir = Path('/shared/ai-gitops/pending_prs')
pr_files = sorted(pr_dir.glob('*.json'), key=lambda x: x.stat().st_mtime, reverse=True)
if pr_files:
return pr_files[0]
return None
def main():
"""Create PR from latest AI suggestions"""
print("="*60)
print(" CREATE GITEA PR FROM AI SUGGESTIONS")
print("="*60)
# Load config
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
config = yaml.safe_load(f)
# Initialize Gitea integration
gitea = GiteaIntegration(config['gitea'])
# Get latest PR file
pr_file = get_latest_pr_file()
if not pr_file:
print("❌ No pending PR files found")
print(" Run: python3 run_pipeline.py --skip-netflow")
return
print(f"📄 Found PR file: {pr_file.name}")
# Load PR data
with open(pr_file, 'r') as f:
pr_data = json.load(f)
# Extract details
suggestions = pr_data.get('suggestions', '')
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
# Show preview
print("\n📋 PR Preview:")
print(f" Title: {pr_data.get('title', 'AI Network Optimization')}")
print(f" Model: {pr_data.get('model', 'llama2:13b')}")
print(f" Feedback aware: {pr_data.get('feedback_aware', False)}")
print(f" Config lines: {len(suggestions.split(chr(10)))}")
# Show first few lines of suggestions
print("\n📝 First few suggestions:")
for line in suggestions.split('\n')[:5]:
if line.strip():
print(f" {line}")
print(" ...")
# Confirm creation
print(f"\n❓ Create PR from these AI suggestions? (y/n): ", end="")
if input().lower() != 'y':
print("❌ Cancelled")
return
# Create PR title and description
pr_title = f"AI Network Optimization - {timestamp}"
pr_description = f"""## 🤖 AI-Generated Network Configuration
**Generated:** {timestamp}
**Model:** {pr_data.get('model', 'llama2:13b')}
**Feedback Learning:** {'✅ Applied' if pr_data.get('feedback_aware') else '❌ Not applied'}
### 📊 Security Compliance Check:
- ✅ No source-address any
- ✅ No destination-address any
- ✅ No application any
- ✅ Logging enabled
- ✅ Address-sets defined
### 📋 Configuration Summary:
This AI-generated configuration includes:
- Address-set definitions for network segmentation
- Security policies with specific source/destination
- Logging enabled for audit compliance
- No any/any/any rules (security best practice)
### 🔍 Changes Overview:
Total configuration lines: {len(suggestions.split(chr(10)))}
### 📝 Full Configuration:
```junos
{suggestions}
```
### ✅ Review Checklist:
- [ ] Verify address-sets match network architecture
- [ ] Confirm zone assignments are correct
- [ ] Check application definitions
- [ ] Validate logging configuration
- [ ] Test in lab environment first
---
*Generated by AI Network Automation System*
*Feedback learning from {pr_data.get('feedback_count', 5)} previous reviews*
"""
# Create the PR
print("\n📤 Creating PR in Gitea...")
try:
pr_info = gitea.create_pr_with_config(
srx_config=suggestions,
title=pr_title,
description=pr_description
)
if pr_info:
print(f"\n✅ SUCCESS! Created PR #{pr_info['number']}")
print(f" Title: {pr_info.get('title')}")
print(f" URL: {pr_info.get('url', config['gitea']['url'] + '/' + config['gitea']['repo'] + '/pulls/' + str(pr_info['number']))}")
print(f"\n📋 Next steps:")
print(f" 1. Review PR at: {pr_info.get('url', 'Gitea URL')}")
print(f" 2. Test configuration in lab")
print(f" 3. Approve or provide feedback")
print(f" 4. If approved, run: python3 deploy_approved.py")
# Save PR tracking info
tracking_file = Path('/shared/ai-gitops/pr_tracking') / f"pr_{pr_info['number']}_created.json"
tracking_file.parent.mkdir(exist_ok=True)
with open(tracking_file, 'w') as f:
json.dump({
'pr_number': pr_info['number'],
'created_at': datetime.now().isoformat(),
'pr_file': str(pr_file),
'title': pr_title,
'model': pr_data.get('model'),
'feedback_aware': pr_data.get('feedback_aware')
}, f, indent=2)
return True
else:
print("❌ Failed to create PR")
print(" Check logs for details")
return False
except Exception as e:
print(f"❌ Error creating PR: {e}")
# Try to get more details
import traceback
print("\n🔍 Debug information:")
print(traceback.format_exc())
return False
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env python3
"""
Simple deployment script placeholder
Full version will deploy approved configs
"""
import logging
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(message)s',
handlers=[
logging.FileHandler('/var/log/orchestrator/deployment.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
def main():
logger.info("Deployment check started")
logger.info("Looking for approved configurations...")
# TODO: Implement actual deployment logic
logger.info("No approved configurations found")
logger.info("Deployment check complete")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,120 @@
#!/usr/bin/env python3
"""
Diagnostic script to understand why zones/policies aren't being parsed
Run this to see what's actually in your SRX config
"""
import json
from pathlib import Path
def diagnose_config():
"""Diagnose the SRX config parsing"""
print("=" * 60)
print("SRX Config Diagnostic Tool")
print("=" * 60)
# Read the security config JSON
config_dir = Path('/shared/ai-gitops/configs')
# Find latest security config
security_configs = list(config_dir.glob('srx_security_config_*.json'))
if not security_configs:
print("❌ No security config JSON files found")
return
latest_security = max(security_configs, key=lambda p: p.stat().st_mtime)
print(f"\n📄 Reading: {latest_security.name}")
with open(latest_security, 'r') as f:
security_config = json.load(f)
print("\n📊 Security Config Sections Found:")
for section, content in security_config.items():
lines = content.strip().split('\n') if content else []
print(f" - {section}: {len(lines)} lines")
# Check zones section
print("\n🔍 Analyzing Zones Section:")
if 'zones' in security_config:
zones_content = security_config['zones']
if zones_content:
lines = zones_content.split('\n')[:20] # First 20 lines
print("First 20 lines of zones config:")
for i, line in enumerate(lines, 1):
print(f" {i:2}: {line}")
# Try to find zone patterns
print("\n🔎 Looking for zone patterns:")
for line in zones_content.split('\n'):
if 'security-zone' in line:
print(f" Found: {line.strip()}")
if line.count(' ') >= 2:
parts = line.strip().split()
print(f" Parts: {parts}")
else:
print(" Zones section is empty")
# Check policies section
print("\n🔍 Analyzing Policies Section:")
if 'policies' in security_config:
policies_content = security_config['policies']
if policies_content:
lines = policies_content.split('\n')[:20]
print("First 20 lines of policies config:")
for i, line in enumerate(lines, 1):
print(f" {i:2}: {line}")
# Count actual policies
policy_count = 0
for line in policies_content.split('\n'):
if ' policy ' in line and 'from-zone' in line:
policy_count += 1
if policy_count <= 3: # Show first 3
print(f"\n Policy found: {line.strip()}")
print(f"\n Total policies found: {policy_count}")
else:
print(" Policies section is empty")
# Check address-book section
print("\n🔍 Analyzing Address Book Section:")
if 'address-book' in security_config:
addr_content = security_config['address-book']
if addr_content:
lines = addr_content.split('\n')[:20]
print("First 20 lines of address-book config:")
for i, line in enumerate(lines, 1):
print(f" {i:2}: {line}")
else:
print(" Address book section is empty")
# Check the raw config file
print("\n📄 Checking Raw Config File:")
raw_config = config_dir / 'srx_config_latest.txt'
if raw_config.exists():
with open(raw_config, 'r') as f:
lines = f.readlines()
print(f" Total lines: {len(lines)}")
# Look for security sections
security_sections = {}
for i, line in enumerate(lines):
if line.startswith('security {'):
security_sections['main'] = i
elif 'security policies {' in line:
security_sections['policies'] = i
elif 'security zones {' in line:
security_sections['zones'] = i
elif 'applications {' in line:
security_sections['applications'] = i
print("\n Security sections found at lines:")
for section, line_num in security_sections.items():
print(f" - {section}: line {line_num}")
# Show a few lines from that section
if line_num:
print(f" Content: {lines[line_num].strip()}")
if line_num + 1 < len(lines):
print(f" Next: {lines[line_num + 1].strip()}")
if __name__ == "__main__":
diagnose_config()

View File

@@ -0,0 +1,210 @@
#!/usr/bin/env python3
"""
Force Deployment - Manually deploy approved configurations to SRX
"""
import os
import json
import yaml
import time
import paramiko
from datetime import datetime
from pathlib import Path
class ManualDeployment:
def __init__(self):
# Load configuration
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
self.config = yaml.safe_load(f)
self.srx_config = self.config['srx']
# Load state to check for merged PRs
state_file = '/var/lib/orchestrator/state.json'
with open(state_file, 'r') as f:
self.state = json.load(f)
def get_latest_merged_config(self):
"""Find the most recent merged configuration"""
# Check if there's a recently merged PR
if self.state.get('last_pr_status') == 'merged':
pr_number = self.state.get('deployment_pr')
print(f"✅ Found merged PR #{pr_number}")
# For now, we'll use the latest response as the config
# In production, this would fetch from the merged PR
response_dir = '/shared/ai-gitops/responses'
if os.path.exists(response_dir):
files = sorted(os.listdir(response_dir), key=lambda x: os.path.getmtime(os.path.join(response_dir, x)))
if files:
latest_file = os.path.join(response_dir, files[-1])
with open(latest_file, 'r') as f:
data = json.load(f)
return data.get('suggestions', data.get('response', ''))
return None
def connect_to_srx(self):
"""Establish SSH connection to SRX"""
print(f"\n🔌 Connecting to SRX at {self.srx_config['host']}...")
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connect using SSH key
client.connect(
hostname=self.srx_config['host'],
username=self.srx_config['username'],
key_filename=self.srx_config['ssh_key'],
port=22
)
print("✅ Connected to SRX successfully")
return client
except Exception as e:
print(f"❌ Failed to connect: {e}")
return None
def deploy_config(self, client, config_text):
"""Deploy configuration to SRX with commit confirmed"""
print("\n📤 Deploying configuration to SRX...")
try:
# Enter configuration mode
stdin, stdout, stderr = client.exec_command('configure')
time.sleep(1)
# Load the configuration
print("Loading configuration...")
config_lines = config_text.strip().split('\n')
for line in config_lines:
if line.strip() and not line.startswith('#'):
stdin, stdout, stderr = client.exec_command(f'configure\n{line}')
result = stdout.read().decode()
if 'error' in result.lower():
print(f"⚠️ Error with command: {line}")
print(f" {result}")
# Commit with confirmed (2 minute timeout)
print("\n🔄 Committing configuration with 2-minute confirmation timeout...")
stdin, stdout, stderr = client.exec_command('configure\ncommit confirmed 2\nexit')
commit_result = stdout.read().decode()
if 'commit complete' in commit_result.lower():
print("✅ Configuration committed (pending confirmation)")
# Wait a bit to test connectivity
print("⏳ Testing configuration (30 seconds)...")
time.sleep(30)
# If we're still connected, confirm the commit
print("✅ Configuration appears stable, confirming commit...")
stdin, stdout, stderr = client.exec_command('configure\ncommit\nexit')
confirm_result = stdout.read().decode()
if 'commit complete' in confirm_result.lower():
print("✅ Configuration confirmed and saved!")
return True
else:
print("❌ Failed to confirm configuration")
return False
else:
print("❌ Initial commit failed")
print(commit_result)
return False
except Exception as e:
print(f"❌ Deployment error: {e}")
return False
def create_deployment_record(self, success, config_text):
"""Record the deployment attempt"""
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
if success:
deploy_dir = '/shared/ai-gitops/deployed'
filename = f"deployed_{timestamp}.conf"
else:
deploy_dir = '/shared/ai-gitops/failed'
filename = f"failed_{timestamp}.conf"
os.makedirs(deploy_dir, exist_ok=True)
filepath = os.path.join(deploy_dir, filename)
with open(filepath, 'w') as f:
f.write(f"# Deployment {'SUCCESS' if success else 'FAILED'}\n")
f.write(f"# Timestamp: {datetime.now().isoformat()}\n")
f.write(f"# SRX: {self.srx_config['host']}\n\n")
f.write(config_text)
print(f"\n📝 Deployment record saved to: {filepath}")
def run(self):
"""Run the manual deployment"""
print("\n🚀 MANUAL DEPLOYMENT TO SRX")
print("="*60)
# Get configuration to deploy
print("\n📋 Looking for approved configuration...")
config_text = self.get_latest_merged_config()
if not config_text:
print("❌ No approved configuration found")
print("\nMake sure you have:")
print("1. Created a PR using force_pipeline_test.py")
print("2. Merged the PR in Gitea")
return False
print("\n📄 Configuration to deploy:")
print("-"*40)
print(config_text[:500] + "..." if len(config_text) > 500 else config_text)
print("-"*40)
print("\n⚠️ WARNING: This will apply configuration to your production SRX!")
print("The configuration will auto-rollback after 2 minutes if not confirmed.")
print("\nDo you want to continue? (yes/no): ", end="")
response = input().strip().lower()
if response != 'yes':
print("Deployment cancelled.")
return False
# Connect to SRX
client = self.connect_to_srx()
if not client:
return False
try:
# Deploy the configuration
success = self.deploy_config(client, config_text)
# Record the deployment
self.create_deployment_record(success, config_text)
# Update state
if success:
self.state['last_successful_deployment'] = datetime.now().isoformat()
self.state['pending_deployment'] = False
else:
self.state['last_failed_deployment'] = datetime.now().isoformat()
with open('/var/lib/orchestrator/state.json', 'w') as f:
json.dump(self.state, f, indent=2)
return success
finally:
client.close()
print("\n🔌 Disconnected from SRX")
def main():
deployment = ManualDeployment()
if deployment.run():
print("\n✅ Deployment completed successfully!")
else:
print("\n❌ Deployment failed")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,133 @@
#!/usr/bin/env python3
"""
Force Pipeline Test - Manual trigger for complete pipeline cycle
This will analyze all data and create a PR immediately
"""
import os
import sys
import json
import yaml
from datetime import datetime
import time
# Add orchestrator directory to path
sys.path.append('/home/netops/orchestrator')
from orchestrator_main import NetworkOrchestrator
from gitea_integration import GiteaIntegration
def force_pr_creation():
"""Force the creation of a PR with all accumulated data"""
print("🚀 Starting forced pipeline test...")
print("="*60)
# Load configuration
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
config = yaml.safe_load(f)
# Initialize orchestrator
orchestrator = NetworkOrchestrator('/home/netops/orchestrator/config.yaml')
print("\n📊 Step 1: Setting up Elasticsearch connection...")
orchestrator.setup_elasticsearch()
print("\n📈 Step 2: Collecting all available traffic data...")
# Temporarily override the analysis window to get ALL data
original_window = orchestrator.config['analysis']['window_hours']
orchestrator.config['analysis']['window_hours'] = 168 # 7 days of data
traffic_data = orchestrator.collect_traffic_data()
if not traffic_data:
print("❌ No traffic data available")
return False
# Show summary of collected data
top_talkers = traffic_data.get('top_talkers', {}).get('buckets', [])
print(f"✅ Collected data summary:")
print(f" - Top talkers: {len(top_talkers)} IPs")
print(f" - VLANs: {len(traffic_data.get('vlans', {}).get('buckets', []))}")
print(f" - Protocols: {len(traffic_data.get('protocols', {}).get('buckets', []))}")
print("\n🤖 Step 3: Requesting AI analysis...")
ai_response = orchestrator.request_ai_analysis(traffic_data)
if not ai_response:
print("❌ Failed to get AI response")
return False
print("✅ AI analysis complete")
# Save the analysis to state
orchestrator.save_state({
'last_analysis_run': datetime.now().isoformat(),
'last_analysis_data': {
'top_talkers_count': len(top_talkers),
'response_received': True
}
})
print("\n📝 Step 4: Creating PR in Gitea...")
# Force PR creation by temporarily overriding the schedule check
# Save original should_create_pr method
original_should_create = orchestrator.should_create_pr
# Override to always return True
orchestrator.should_create_pr = lambda: True
# Clear any pending PR flag
state = orchestrator.load_state()
if 'pending_pr' in state:
del state['pending_pr']
orchestrator.save_state({'pending_pr': None})
# Create PR
success = orchestrator.create_gitea_pr(ai_response)
# Restore original method
orchestrator.should_create_pr = original_should_create
if success:
print("\n✅ PR created successfully!")
# Get the PR number from state
state = orchestrator.load_state()
pr_number = state.get('pending_pr')
pr_url = state.get('pr_url')
print(f"\n🔗 PR Details:")
print(f" - PR Number: #{pr_number}")
print(f" - URL: {pr_url}")
print(f"\n📋 Next Steps:")
print(f" 1. Review the PR at: {pr_url}")
print(f" 2. Click 'Merge Pull Request' to approve")
print(f" 3. Run: python3 force_deployment.py")
return True
else:
print("❌ Failed to create PR")
return False
def main():
"""Main function"""
print("\n🔬 FORCE PIPELINE TEST")
print("This will:")
print("1. Analyze all traffic data from the past week")
print("2. Generate AI suggestions")
print("3. Create a PR in Gitea immediately")
print("\nDo you want to continue? (yes/no): ", end="")
response = input().strip().lower()
if response != 'yes':
print("Cancelled.")
return
# Run the test
if force_pr_creation():
print("\n✅ Pipeline test successful!")
else:
print("\n❌ Pipeline test failed")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,352 @@
#!/usr/bin/env python3
"""
Gitea Integration Module for SRX GitOps - Fixed Push Authentication
Handles Git operations and Gitea API interactions
"""
import os
import json
import logging
import tempfile
import shutil
from datetime import datetime
from typing import Dict, Optional, Tuple
import subprocess
import requests
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
class GiteaIntegration:
"""Handles all Gitea-related operations"""
def __init__(self, config: Dict):
"""
Initialize Gitea integration
Args:
config: Dictionary containing:
- url: Gitea instance URL
- token: API token
- repo: repository in format "owner/repo"
- branch: default branch (usually "main")
"""
self.config = config # ADD THIS LINE
self.url = config['url'].rstrip('/')
self.token = config['token']
self.repo = config['repo']
self.default_branch = config.get('branch', 'main')
# Parse owner and repo name
self.owner, self.repo_name = self.repo.split('/')
# Set up API headers
self.headers = {
'Authorization': f'token {self.token}',
'Content-Type': 'application/json'
}
# Git configuration
self.git_url = f"{self.url}/{self.repo}.git"
self.auth_git_url = f"https://oauth2:{self.token}@{urlparse(self.url).netloc}/{self.repo}.git"
logger.info(f"Initialized Gitea integration for {self.repo}")
def _run_git_command(self, cmd: list, cwd: str = None) -> Tuple[bool, str]:
"""
Run a git command and return success status and output
Args:
cmd: List of command arguments
cwd: Working directory
Returns:
Tuple of (success, output)
"""
try:
# Log the command (but hide token)
safe_cmd = []
for arg in cmd:
if self.token in arg:
safe_cmd.append(arg.replace(self.token, "***TOKEN***"))
else:
safe_cmd.append(arg)
logger.debug(f"Running git command: {' '.join(safe_cmd)}")
result = subprocess.run(
cmd,
cwd=cwd,
capture_output=True,
text=True,
check=True
)
return True, result.stdout
except subprocess.CalledProcessError as e:
safe_cmd = []
for arg in cmd:
if self.token in arg:
safe_cmd.append(arg.replace(self.token, "***TOKEN***"))
else:
safe_cmd.append(arg)
logger.error(f"Git command failed: {' '.join(safe_cmd)}")
logger.error(f"Error: {e.stderr}")
return False, e.stderr
def create_pr_with_config(self, srx_config: str, title: str = None,
description: str = None) -> Optional[Dict]:
"""
Create a pull request with SRX configuration
Args:
srx_config: The SRX configuration content
title: PR title (auto-generated if not provided)
description: PR description (auto-generated if not provided)
Returns:
PR information dict or None if failed
"""
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
branch_name = f"ai-suggestions-{timestamp}"
# Auto-generate title and description if not provided
if not title:
title = f"AI Network Configuration Suggestions - {datetime.now().strftime('%Y-%m-%d')}"
if not description:
description = self._generate_pr_description(srx_config)
# Create temporary directory for git operations
with tempfile.TemporaryDirectory() as temp_dir:
logger.info(f"Working in temporary directory: {temp_dir}")
# Step 1: Clone the repository with authentication
logger.info("Cloning repository...")
success, output = self._run_git_command(
['git', 'clone', '--depth', '1', self.auth_git_url, '.'],
cwd=temp_dir
)
if not success:
logger.error("Failed to clone repository")
return None
# Step 2: Configure git user
self._run_git_command(
['git', 'config', 'user.email', 'ai-orchestrator@srx-gitops.local'],
cwd=temp_dir
)
self._run_git_command(
['git', 'config', 'user.name', 'AI Orchestrator'],
cwd=temp_dir
)
# IMPORTANT: Set the push URL explicitly with authentication
# This ensures push uses the authenticated URL
logger.info("Setting authenticated push URL...")
self._run_git_command(
['git', 'remote', 'set-url', 'origin', self.auth_git_url],
cwd=temp_dir
)
# Step 3: Create and checkout new branch
logger.info(f"Creating branch: {branch_name}")
success, _ = self._run_git_command(
['git', 'checkout', '-b', branch_name],
cwd=temp_dir
)
if not success:
logger.error("Failed to create branch")
return None
# Step 4: Create ai-suggestions directory if it doesn't exist
suggestions_dir = os.path.join(temp_dir, 'ai-suggestions')
os.makedirs(suggestions_dir, exist_ok=True)
# Step 5: Write configuration file
config_filename = f"suggestion-{timestamp}.conf"
config_path = os.path.join(suggestions_dir, config_filename)
with open(config_path, 'w') as f:
f.write(f"# AI-Generated SRX Configuration\n")
f.write(f"# Generated: {datetime.now().isoformat()}\n")
f.write(f"# Analysis Period: Last 7 days\n\n")
f.write(srx_config)
logger.info(f"Created config file: {config_filename}")
# Step 6: Add and commit changes
self._run_git_command(['git', 'add', '.'], cwd=temp_dir)
commit_message = f"Add AI-generated configuration suggestions for {datetime.now().strftime('%Y-%m-%d')}"
success, _ = self._run_git_command(
['git', 'commit', '-m', commit_message],
cwd=temp_dir
)
if not success:
logger.warning("No changes to commit (file might already exist)")
# Check if we actually have changes
status_success, status_output = self._run_git_command(
['git', 'status', '--porcelain'],
cwd=temp_dir
)
if not status_output.strip():
logger.info("No changes detected, skipping PR creation")
return None
# Step 7: Push branch
logger.info(f"Pushing branch {branch_name}...")
success, _ = self._run_git_command(
['git', 'push', '-u', 'origin', branch_name],
cwd=temp_dir
)
if not success:
logger.error("Failed to push branch")
# Try alternative push command
logger.info("Trying alternative push method...")
success, _ = self._run_git_command(
['git', 'push', self.auth_git_url, f"{branch_name}:{branch_name}"],
cwd=temp_dir
)
if not success:
logger.error("All push attempts failed")
return None
# Step 8: Create pull request via API
logger.info("Creating pull request via Gitea API...")
# Get label IDs if configured
label_ids = []
if 'labels' in self.config:
label_ids = self.get_label_ids(self.config['labels'])
pr_data = {
"title": title,
"body": description,
"head": branch_name,
"base": self.default_branch
}
# Only add labels if we found valid IDs
if label_ids:
pr_data["labels"] = label_ids
api_url = f"{self.url}/api/v1/repos/{self.repo}/pulls"
try:
response = requests.post(api_url, json=pr_data, headers=self.headers)
response.raise_for_status()
pr_info = response.json()
logger.info(f"Successfully created PR #{pr_info['number']}: {pr_info['title']}")
return {
'number': pr_info['number'],
'url': pr_info['html_url'],
'branch': branch_name,
'created_at': pr_info['created_at']
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to create PR via API: {e}")
if hasattr(e.response, 'text'):
logger.error(f"Response: {e.response.text}")
return None
def _generate_pr_description(self, srx_config: str) -> str:
"""Generate a descriptive PR body"""
config_lines = srx_config.strip().split('\n')
summary = []
# Parse configuration to create summary
for line in config_lines:
if 'security-zone' in line and 'address' in line:
summary.append(f"- {line.strip()}")
elif 'application' in line and 'destination-port' in line:
summary.append(f"- {line.strip()}")
description = f"""## 🤖 AI-Generated Network Configuration
This pull request contains network configuration suggestions generated by the AI orchestrator based on traffic analysis from the past 7 days.
### 📊 Analysis Summary
- **Analysis Period**: Last 7 days
- **Data Source**: NetFlow/J-Flow from Elasticsearch
- **Generation Time**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
### 🔧 Proposed Changes
{chr(10).join(summary[:10]) if summary else 'Various security zone and application updates'}
{'... and more' if len(summary) > 10 else ''}
### ⚠️ Review Required
Please review these suggestions carefully before approving. The AI has analyzed traffic patterns and suggested optimizations, but human validation is essential.
### 🔄 Deployment
Once approved, these changes will be automatically deployed during the next deployment window (daily at 5 AM).
---
*Generated by SRX AI GitOps Orchestrator*"""
return description
def get_pr_status(self, pr_number: int) -> Optional[Dict]:
"""
Get the status of a pull request
Args:
pr_number: PR number to check
Returns:
Dictionary with PR status info or None
"""
api_url = f"{self.url}/api/v1/repos/{self.repo}/pulls/{pr_number}"
try:
response = requests.get(api_url, headers=self.headers)
response.raise_for_status()
pr_data = response.json()
return {
'number': pr_data['number'],
'state': pr_data['state'], # open, closed
'merged': pr_data['merged'],
'mergeable': pr_data['mergeable'],
'title': pr_data['title'],
'created_at': pr_data['created_at'],
'updated_at': pr_data['updated_at']
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to get PR status: {e}")
return None
def get_label_ids(self, label_names: list) -> list:
"""
Get label IDs from label names
Args:
label_names: List of label names
Returns:
List of label IDs
"""
api_url = f"{self.url}/api/v1/repos/{self.repo}/labels"
try:
response = requests.get(api_url, headers=self.headers)
response.raise_for_status()
labels = response.json()
label_map = {label['name']: label['id'] for label in labels}
found_ids = []
for name in label_names:
if name in label_map:
found_ids.append(label_map[name])
logger.info(f"Found label '{name}' with ID {label_map[name]}")
else:
logger.warning(f"Label '{name}' not found in repository")
return found_ids
except requests.exceptions.RequestException as e:
logger.error(f"Failed to get labels: {e}")
return []

View File

@@ -0,0 +1,384 @@
#!/usr/bin/env python3
"""
Gitea Integration Module for SRX GitOps - Fixed Authentication
Handles Git operations and Gitea API interactions
"""
import os
import json
import logging
import tempfile
import shutil
from datetime import datetime
from typing import Dict, Optional, Tuple
import subprocess
import requests
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
class GiteaIntegration:
"""Handles all Gitea-related operations"""
def __init__(self, config: Dict):
"""
Initialize Gitea integration
Args:
config: Dictionary containing:
- url: Gitea instance URL
- token: API token
- repo: repository in format "owner/repo"
- branch: default branch (usually "main")
"""
self.url = config['url'].rstrip('/')
self.token = config['token']
self.repo = config['repo']
self.default_branch = config.get('branch', 'main')
# Parse owner and repo name
self.owner, self.repo_name = self.repo.split('/')
# Set up API headers
self.headers = {
'Authorization': f'token {self.token}',
'Content-Type': 'application/json'
}
# Git configuration - Fix authentication format
self.git_url = f"{self.url}/{self.repo}.git"
logger.info(f"Initialized Gitea integration for {self.repo}")
def _run_git_command(self, cmd: list, cwd: str = None) -> Tuple[bool, str]:
"""
Run a git command and return success status and output
Args:
cmd: List of command arguments
cwd: Working directory
Returns:
Tuple of (success, output)
"""
try:
# Create a copy of the command to modify
auth_cmd = cmd.copy()
# Add authentication to git commands that need it
if any(action in cmd for action in ['clone', 'push', 'pull', 'fetch']):
for i, arg in enumerate(auth_cmd):
if arg.startswith('http'):
# Gitea supports multiple auth formats, let's use oauth2
parsed = urlparse(arg)
# Try oauth2 format which is commonly supported
auth_url = f"{parsed.scheme}://oauth2:{self.token}@{parsed.netloc}{parsed.path}"
auth_cmd[i] = auth_url
break
# Also set up git credentials via environment
env = os.environ.copy()
env['GIT_ASKPASS'] = 'echo'
env['GIT_USERNAME'] = 'oauth2'
env['GIT_PASSWORD'] = self.token
result = subprocess.run(
auth_cmd,
cwd=cwd,
capture_output=True,
text=True,
check=True,
env=env
)
return True, result.stdout
except subprocess.CalledProcessError as e:
logger.error(f"Git command failed: {' '.join(cmd)}")
logger.error(f"Error: {e.stderr}")
return False, e.stderr
def test_authentication(self) -> bool:
"""Test if Git authentication is working"""
try:
logger.info("Testing Git authentication...")
# Try to list remote refs
success, output = self._run_git_command(
['git', 'ls-remote', self.git_url, 'HEAD']
)
if success:
logger.info("Git authentication successful")
return True
else:
logger.error("Git authentication failed")
return False
except Exception as e:
logger.error(f"Authentication test error: {e}")
return False
def create_pr_with_config(self, srx_config: str, title: str = None,
description: str = None) -> Optional[Dict]:
"""
Create a pull request with SRX configuration
Args:
srx_config: The SRX configuration content
title: PR title (auto-generated if not provided)
description: PR description (auto-generated if not provided)
Returns:
PR information dict or None if failed
"""
# First test authentication
if not self.test_authentication():
logger.error("Git authentication test failed, aborting PR creation")
return None
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
branch_name = f"ai-suggestions-{timestamp}"
# Auto-generate title and description if not provided
if not title:
title = f"AI Network Configuration Suggestions - {datetime.now().strftime('%Y-%m-%d')}"
if not description:
description = self._generate_pr_description(srx_config)
# Create temporary directory for git operations
with tempfile.TemporaryDirectory() as temp_dir:
logger.info(f"Working in temporary directory: {temp_dir}")
# Step 1: Clone the repository
logger.info("Cloning repository...")
success, output = self._run_git_command(
['git', 'clone', '--depth', '1', self.git_url, '.'],
cwd=temp_dir
)
if not success:
logger.error("Failed to clone repository")
# Try alternative authentication method
logger.info("Trying alternative clone method...")
# Use git credential helper
self._setup_git_credentials(temp_dir)
success, output = self._run_git_command(
['git', 'clone', '--depth', '1', self.git_url, '.'],
cwd=temp_dir
)
if not success:
logger.error("All clone attempts failed")
return None
# Step 2: Configure git user
self._run_git_command(
['git', 'config', 'user.email', 'ai-orchestrator@srx-gitops.local'],
cwd=temp_dir
)
self._run_git_command(
['git', 'config', 'user.name', 'AI Orchestrator'],
cwd=temp_dir
)
# Step 3: Create and checkout new branch
logger.info(f"Creating branch: {branch_name}")
success, _ = self._run_git_command(
['git', 'checkout', '-b', branch_name],
cwd=temp_dir
)
if not success:
logger.error("Failed to create branch")
return None
# Step 4: Create ai-suggestions directory if it doesn't exist
suggestions_dir = os.path.join(temp_dir, 'ai-suggestions')
os.makedirs(suggestions_dir, exist_ok=True)
# Step 5: Write configuration file
config_filename = f"suggestion-{timestamp}.conf"
config_path = os.path.join(suggestions_dir, config_filename)
with open(config_path, 'w') as f:
f.write(f"# AI-Generated SRX Configuration\n")
f.write(f"# Generated: {datetime.now().isoformat()}\n")
f.write(f"# Analysis Period: Last 7 days\n\n")
f.write(srx_config)
logger.info(f"Created config file: {config_filename}")
# Step 6: Add and commit changes
self._run_git_command(['git', 'add', '.'], cwd=temp_dir)
commit_message = f"Add AI-generated configuration suggestions for {datetime.now().strftime('%Y-%m-%d')}"
success, _ = self._run_git_command(
['git', 'commit', '-m', commit_message],
cwd=temp_dir
)
if not success:
logger.error("Failed to commit changes")
return None
# Step 7: Push branch
logger.info(f"Pushing branch {branch_name}...")
success, _ = self._run_git_command(
['git', 'push', 'origin', branch_name],
cwd=temp_dir
)
if not success:
logger.error("Failed to push branch")
return None
# Step 8: Create pull request via API
logger.info("Creating pull request via Gitea API...")
# First, get the label IDs if configured
label_ids = []
if 'labels' in self.config:
label_ids = self.get_label_ids(self.config['labels'])
pr_data = {
"title": title,
"body": description,
"head": branch_name,
"base": self.default_branch
}
# Only add labels if we found valid IDs
if label_ids:
pr_data["labels"] = label_ids
api_url = f"{self.url}/api/v1/repos/{self.repo}/pulls"
try:
response = requests.post(api_url, json=pr_data, headers=self.headers)
response.raise_for_status()
pr_info = response.json()
logger.info(f"Successfully created PR #{pr_info['number']}: {pr_info['title']}")
return {
'number': pr_info['number'],
'url': pr_info['html_url'],
'branch': branch_name,
'created_at': pr_info['created_at']
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to create PR via API: {e}")
if hasattr(e.response, 'text'):
logger.error(f"Response: {e.response.text}")
return None
def _setup_git_credentials(self, cwd: str):
"""Setup git credentials using credential helper"""
# Configure credential helper
self._run_git_command(
['git', 'config', '--local', 'credential.helper', 'store'],
cwd=cwd
)
# Write credentials file
cred_file = os.path.join(cwd, '.git-credentials')
parsed = urlparse(self.git_url)
cred_url = f"{parsed.scheme}://oauth2:{self.token}@{parsed.netloc}\n"
with open(cred_file, 'w') as f:
f.write(cred_url)
os.chmod(cred_file, 0o600)
def _generate_pr_description(self, srx_config: str) -> str:
"""Generate a descriptive PR body"""
config_lines = srx_config.strip().split('\n')
summary = []
# Parse configuration to create summary
for line in config_lines:
if 'security-zone' in line and 'address' in line:
summary.append(f"- {line.strip()}")
elif 'application' in line and 'destination-port' in line:
summary.append(f"- {line.strip()}")
description = f"""## 🤖 AI-Generated Network Configuration
This pull request contains network configuration suggestions generated by the AI orchestrator based on traffic analysis from the past 7 days.
### 📊 Analysis Summary
- **Analysis Period**: Last 7 days
- **Data Source**: NetFlow/J-Flow from Elasticsearch
- **Generation Time**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
### 🔧 Proposed Changes
{chr(10).join(summary[:10]) if summary else 'Various security zone and application updates'}
{'... and more' if len(summary) > 10 else ''}
### ⚠️ Review Required
Please review these suggestions carefully before approving. The AI has analyzed traffic patterns and suggested optimizations, but human validation is essential.
### 🔄 Deployment
Once approved, these changes will be automatically deployed during the next deployment window (daily at 5 AM).
---
*Generated by SRX AI GitOps Orchestrator*"""
return description
def get_pr_status(self, pr_number: int) -> Optional[Dict]:
"""
Get the status of a pull request
Args:
pr_number: PR number to check
Returns:
Dictionary with PR status info or None
"""
api_url = f"{self.url}/api/v1/repos/{self.repo}/pulls/{pr_number}"
try:
response = requests.get(api_url, headers=self.headers)
response.raise_for_status()
pr_data = response.json()
return {
'number': pr_data['number'],
'state': pr_data['state'], # open, closed
'merged': pr_data['merged'],
'mergeable': pr_data['mergeable'],
'title': pr_data['title'],
'created_at': pr_data['created_at'],
'updated_at': pr_data['updated_at']
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to get PR status: {e}")
return None
def get_label_ids(self, label_names: list) -> list:
"""
Get label IDs from label names
Args:
label_names: List of label names
Returns:
List of label IDs
"""
api_url = f"{self.url}/api/v1/repos/{self.repo}/labels"
try:
response = requests.get(api_url, headers=self.headers)
response.raise_for_status()
labels = response.json()
label_map = {label['name']: label['id'] for label in labels}
found_ids = []
for name in label_names:
if name in label_map:
found_ids.append(label_map[name])
logger.info(f"Found label '{name}' with ID {label_map[name]}")
else:
logger.warning(f"Label '{name}' not found in repository")
return found_ids
except requests.exceptions.RequestException as e:
logger.error(f"Failed to get labels: {e}")
return []

View File

@@ -0,0 +1,351 @@
#!/usr/bin/env python3
"""
Gitea Integration Module for SRX GitOps - Fixed Push Authentication
Handles Git operations and Gitea API interactions
"""
import os
import json
import logging
import tempfile
import shutil
from datetime import datetime
from typing import Dict, Optional, Tuple
import subprocess
import requests
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
class GiteaIntegration:
"""Handles all Gitea-related operations"""
def __init__(self, config: Dict):
"""
Initialize Gitea integration
Args:
config: Dictionary containing:
- url: Gitea instance URL
- token: API token
- repo: repository in format "owner/repo"
- branch: default branch (usually "main")
"""
self.url = config['url'].rstrip('/')
self.token = config['token']
self.repo = config['repo']
self.default_branch = config.get('branch', 'main')
# Parse owner and repo name
self.owner, self.repo_name = self.repo.split('/')
# Set up API headers
self.headers = {
'Authorization': f'token {self.token}',
'Content-Type': 'application/json'
}
# Git configuration
self.git_url = f"{self.url}/{self.repo}.git"
self.auth_git_url = f"https://oauth2:{self.token}@{urlparse(self.url).netloc}/{self.repo}.git"
logger.info(f"Initialized Gitea integration for {self.repo}")
def _run_git_command(self, cmd: list, cwd: str = None) -> Tuple[bool, str]:
"""
Run a git command and return success status and output
Args:
cmd: List of command arguments
cwd: Working directory
Returns:
Tuple of (success, output)
"""
try:
# Log the command (but hide token)
safe_cmd = []
for arg in cmd:
if self.token in arg:
safe_cmd.append(arg.replace(self.token, "***TOKEN***"))
else:
safe_cmd.append(arg)
logger.debug(f"Running git command: {' '.join(safe_cmd)}")
result = subprocess.run(
cmd,
cwd=cwd,
capture_output=True,
text=True,
check=True
)
return True, result.stdout
except subprocess.CalledProcessError as e:
safe_cmd = []
for arg in cmd:
if self.token in arg:
safe_cmd.append(arg.replace(self.token, "***TOKEN***"))
else:
safe_cmd.append(arg)
logger.error(f"Git command failed: {' '.join(safe_cmd)}")
logger.error(f"Error: {e.stderr}")
return False, e.stderr
def create_pr_with_config(self, srx_config: str, title: str = None,
description: str = None) -> Optional[Dict]:
"""
Create a pull request with SRX configuration
Args:
srx_config: The SRX configuration content
title: PR title (auto-generated if not provided)
description: PR description (auto-generated if not provided)
Returns:
PR information dict or None if failed
"""
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
branch_name = f"ai-suggestions-{timestamp}"
# Auto-generate title and description if not provided
if not title:
title = f"AI Network Configuration Suggestions - {datetime.now().strftime('%Y-%m-%d')}"
if not description:
description = self._generate_pr_description(srx_config)
# Create temporary directory for git operations
with tempfile.TemporaryDirectory() as temp_dir:
logger.info(f"Working in temporary directory: {temp_dir}")
# Step 1: Clone the repository with authentication
logger.info("Cloning repository...")
success, output = self._run_git_command(
['git', 'clone', '--depth', '1', self.auth_git_url, '.'],
cwd=temp_dir
)
if not success:
logger.error("Failed to clone repository")
return None
# Step 2: Configure git user
self._run_git_command(
['git', 'config', 'user.email', 'ai-orchestrator@srx-gitops.local'],
cwd=temp_dir
)
self._run_git_command(
['git', 'config', 'user.name', 'AI Orchestrator'],
cwd=temp_dir
)
# IMPORTANT: Set the push URL explicitly with authentication
# This ensures push uses the authenticated URL
logger.info("Setting authenticated push URL...")
self._run_git_command(
['git', 'remote', 'set-url', 'origin', self.auth_git_url],
cwd=temp_dir
)
# Step 3: Create and checkout new branch
logger.info(f"Creating branch: {branch_name}")
success, _ = self._run_git_command(
['git', 'checkout', '-b', branch_name],
cwd=temp_dir
)
if not success:
logger.error("Failed to create branch")
return None
# Step 4: Create ai-suggestions directory if it doesn't exist
suggestions_dir = os.path.join(temp_dir, 'ai-suggestions')
os.makedirs(suggestions_dir, exist_ok=True)
# Step 5: Write configuration file
config_filename = f"suggestion-{timestamp}.conf"
config_path = os.path.join(suggestions_dir, config_filename)
with open(config_path, 'w') as f:
f.write(f"# AI-Generated SRX Configuration\n")
f.write(f"# Generated: {datetime.now().isoformat()}\n")
f.write(f"# Analysis Period: Last 7 days\n\n")
f.write(srx_config)
logger.info(f"Created config file: {config_filename}")
# Step 6: Add and commit changes
self._run_git_command(['git', 'add', '.'], cwd=temp_dir)
commit_message = f"Add AI-generated configuration suggestions for {datetime.now().strftime('%Y-%m-%d')}"
success, _ = self._run_git_command(
['git', 'commit', '-m', commit_message],
cwd=temp_dir
)
if not success:
logger.warning("No changes to commit (file might already exist)")
# Check if we actually have changes
status_success, status_output = self._run_git_command(
['git', 'status', '--porcelain'],
cwd=temp_dir
)
if not status_output.strip():
logger.info("No changes detected, skipping PR creation")
return None
# Step 7: Push branch
logger.info(f"Pushing branch {branch_name}...")
success, _ = self._run_git_command(
['git', 'push', '-u', 'origin', branch_name],
cwd=temp_dir
)
if not success:
logger.error("Failed to push branch")
# Try alternative push command
logger.info("Trying alternative push method...")
success, _ = self._run_git_command(
['git', 'push', self.auth_git_url, f"{branch_name}:{branch_name}"],
cwd=temp_dir
)
if not success:
logger.error("All push attempts failed")
return None
# Step 8: Create pull request via API
logger.info("Creating pull request via Gitea API...")
# Get label IDs if configured
label_ids = []
if 'labels' in self.config:
label_ids = self.get_label_ids(self.config['labels'])
pr_data = {
"title": title,
"body": description,
"head": branch_name,
"base": self.default_branch
}
# Only add labels if we found valid IDs
if label_ids:
pr_data["labels"] = label_ids
api_url = f"{self.url}/api/v1/repos/{self.repo}/pulls"
try:
response = requests.post(api_url, json=pr_data, headers=self.headers)
response.raise_for_status()
pr_info = response.json()
logger.info(f"Successfully created PR #{pr_info['number']}: {pr_info['title']}")
return {
'number': pr_info['number'],
'url': pr_info['html_url'],
'branch': branch_name,
'created_at': pr_info['created_at']
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to create PR via API: {e}")
if hasattr(e.response, 'text'):
logger.error(f"Response: {e.response.text}")
return None
def _generate_pr_description(self, srx_config: str) -> str:
"""Generate a descriptive PR body"""
config_lines = srx_config.strip().split('\n')
summary = []
# Parse configuration to create summary
for line in config_lines:
if 'security-zone' in line and 'address' in line:
summary.append(f"- {line.strip()}")
elif 'application' in line and 'destination-port' in line:
summary.append(f"- {line.strip()}")
description = f"""## 🤖 AI-Generated Network Configuration
This pull request contains network configuration suggestions generated by the AI orchestrator based on traffic analysis from the past 7 days.
### 📊 Analysis Summary
- **Analysis Period**: Last 7 days
- **Data Source**: NetFlow/J-Flow from Elasticsearch
- **Generation Time**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
### 🔧 Proposed Changes
{chr(10).join(summary[:10]) if summary else 'Various security zone and application updates'}
{'... and more' if len(summary) > 10 else ''}
### ⚠️ Review Required
Please review these suggestions carefully before approving. The AI has analyzed traffic patterns and suggested optimizations, but human validation is essential.
### 🔄 Deployment
Once approved, these changes will be automatically deployed during the next deployment window (daily at 5 AM).
---
*Generated by SRX AI GitOps Orchestrator*"""
return description
def get_pr_status(self, pr_number: int) -> Optional[Dict]:
"""
Get the status of a pull request
Args:
pr_number: PR number to check
Returns:
Dictionary with PR status info or None
"""
api_url = f"{self.url}/api/v1/repos/{self.repo}/pulls/{pr_number}"
try:
response = requests.get(api_url, headers=self.headers)
response.raise_for_status()
pr_data = response.json()
return {
'number': pr_data['number'],
'state': pr_data['state'], # open, closed
'merged': pr_data['merged'],
'mergeable': pr_data['mergeable'],
'title': pr_data['title'],
'created_at': pr_data['created_at'],
'updated_at': pr_data['updated_at']
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to get PR status: {e}")
return None
def get_label_ids(self, label_names: list) -> list:
"""
Get label IDs from label names
Args:
label_names: List of label names
Returns:
List of label IDs
"""
api_url = f"{self.url}/api/v1/repos/{self.repo}/labels"
try:
response = requests.get(api_url, headers=self.headers)
response.raise_for_status()
labels = response.json()
label_map = {label['name']: label['id'] for label in labels}
found_ids = []
for name in label_names:
if name in label_map:
found_ids.append(label_map[name])
logger.info(f"Found label '{name}' with ID {label_map[name]}")
else:
logger.warning(f"Label '{name}' not found in repository")
return found_ids
except requests.exceptions.RequestException as e:
logger.error(f"Failed to get labels: {e}")
return []

View File

@@ -0,0 +1,377 @@
#!/usr/bin/env python3
"""
Gitea PR Creation and Feedback Handler
Creates real PRs in Gitea and handles rejection feedback
"""
import os
import sys
import json
import yaml
import requests
from datetime import datetime
from pathlib import Path
import subprocess
class GiteaPRManager:
def __init__(self, config_path='/home/netops/orchestrator/config.yaml'):
"""Initialize with Gitea configuration"""
# Load config
with open(config_path, 'r') as f:
self.config = yaml.safe_load(f)
self.gitea_config = self.config.get('gitea', {})
self.base_url = self.gitea_config.get('url', 'http://localhost:3000')
self.token = self.gitea_config.get('token', '')
self.repo_owner = self.gitea_config.get('owner', 'netops')
self.repo_name = self.gitea_config.get('repo', 'srx-config')
self.headers = {
'Authorization': f'token {self.token}',
'Content-Type': 'application/json'
}
self.pending_prs_dir = Path('/shared/ai-gitops/pending_prs')
self.feedback_dir = Path('/shared/ai-gitops/feedback')
def create_pr_from_ai_suggestions(self, pr_file=None):
"""Create a PR in Gitea from AI suggestions"""
print("\n" + "="*60)
print("Creating Gitea PR from AI Suggestions")
print("="*60)
# Find latest PR file if not specified
if pr_file is None:
pr_files = sorted(self.pending_prs_dir.glob('pr_*.json'),
key=lambda x: x.stat().st_mtime, reverse=True)
if not pr_files:
print("❌ No pending PR files found")
return None
pr_file = pr_files[0]
print(f"📄 Using PR file: {pr_file.name}")
# Load PR data
with open(pr_file, 'r') as f:
pr_data = json.load(f)
# Create a new branch
branch_name = f"ai-suggestions-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
# Create the configuration file content
config_content = f"""# AI-Generated Network Configuration
# Generated: {pr_data.get('timestamp', datetime.now().isoformat())}
# Model: {pr_data.get('model', 'llama2:13b')}
# Feedback Aware: {pr_data.get('feedback_aware', False)}
{pr_data.get('suggestions', '')}
"""
# Create branch and file via Gitea API
try:
# First, get the default branch SHA
repo_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}"
repo_response = requests.get(repo_url, headers=self.headers)
if repo_response.status_code != 200:
print(f"❌ Failed to get repo info: {repo_response.status_code}")
print(f" Response: {repo_response.text}")
return None
default_branch = repo_response.json().get('default_branch', 'main')
# Get the SHA of the default branch
branch_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/branches/{default_branch}"
branch_response = requests.get(branch_url, headers=self.headers)
if branch_response.status_code != 200:
print(f"❌ Failed to get branch info: {branch_response.status_code}")
return None
base_sha = branch_response.json()['commit']['id']
# Create new branch
create_branch_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/branches"
branch_data = {
'new_branch_name': branch_name,
'old_branch_name': default_branch
}
branch_create = requests.post(create_branch_url,
headers=self.headers,
json=branch_data)
if branch_create.status_code not in [201, 200]:
print(f"❌ Failed to create branch: {branch_create.status_code}")
print(f" Response: {branch_create.text}")
return None
print(f"✅ Created branch: {branch_name}")
# Create or update file in the new branch
file_path = f"ai-suggestions/config_{datetime.now().strftime('%Y%m%d_%H%M%S')}.conf"
file_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/contents/{file_path}"
import base64
file_data = {
'branch': branch_name,
'content': base64.b64encode(config_content.encode()).decode(),
'message': f"AI suggestions: {pr_data.get('title', 'Network optimization')}"
}
file_response = requests.post(file_url, headers=self.headers, json=file_data)
if file_response.status_code not in [201, 200]:
print(f"⚠️ Could not create file via API, trying alternative method")
else:
print(f"✅ Created config file: {file_path}")
# Create Pull Request
pr_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/pulls"
pr_body = f"""## AI-Generated Network Configuration
### Analysis Context
- **Zones Analyzed**: {', '.join(pr_data.get('network_context', {}).get('zones', []))}
- **Policies Reviewed**: {pr_data.get('network_context', {}).get('policies', 0)}
- **Feedback Aware**: {pr_data.get('feedback_aware', False)}
### Suggested Changes
```junos
{pr_data.get('suggestions', '')}
```
### Review Checklist
- [ ] No any/any/any rules
- [ ] Logging enabled on all policies
- [ ] Proper zone segmentation
- [ ] Address-sets used instead of individual IPs
- [ ] Applications are specific (not "any")
### How to Test
1. Apply to lab SRX first
2. Verify traffic flow
3. Check logs for any issues
4. Apply to production if tests pass
---
*This PR was automatically generated by the AI Network Automation system*
"""
pr_request = {
'title': pr_data.get('title', 'AI Network Configuration Suggestions'),
'head': branch_name,
'base': default_branch,
'body': pr_body
}
pr_response = requests.post(pr_url, headers=self.headers, json=pr_request)
if pr_response.status_code == 201:
pr_info = pr_response.json()
pr_number = pr_info['number']
pr_html_url = pr_info['html_url']
print(f"\n✅ Pull Request created successfully!")
print(f" PR Number: #{pr_number}")
print(f" URL: {pr_html_url}")
# Save PR info for tracking
pr_tracking = {
'pr_number': pr_number,
'pr_url': pr_html_url,
'branch': branch_name,
'created_at': datetime.now().isoformat(),
'ai_request_id': pr_data.get('request_id'),
'suggestions_file': str(pr_file)
}
tracking_file = self.pending_prs_dir / f"gitea_pr_{pr_number}.json"
with open(tracking_file, 'w') as f:
json.dump(pr_tracking, f, indent=2)
return pr_number
else:
print(f"❌ Failed to create PR: {pr_response.status_code}")
print(f" Response: {pr_response.text}")
return None
except Exception as e:
print(f"❌ Error creating PR: {e}")
return None
def reject_pr_with_feedback(self, pr_number, feedback_message):
"""Reject a PR and save feedback for AI learning"""
print("\n" + "="*60)
print(f"Rejecting PR #{pr_number} with Feedback")
print("="*60)
# Close the PR via API
pr_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/pulls/{pr_number}"
# Add comment with feedback
comment_url = f"{pr_url}/reviews"
comment_data = {
'body': feedback_message,
'event': 'REJECT' # or 'REQUEST_CHANGES'
}
comment_response = requests.post(comment_url, headers=self.headers, json=comment_data)
if comment_response.status_code not in [200, 201]:
# Try alternative: just add a comment
issue_comment_url = f"{self.base_url}/api/v1/repos/{self.repo_owner}/{self.repo_name}/issues/{pr_number}/comments"
comment_data = {
'body': f"❌ **REJECTED**\n\n{feedback_message}"
}
requests.post(issue_comment_url, headers=self.headers, json=comment_data)
# Close the PR
close_data = {
'state': 'closed'
}
close_response = requests.patch(pr_url, headers=self.headers, json=close_data)
if close_response.status_code == 200:
print(f"✅ PR #{pr_number} closed")
else:
print(f"⚠️ Could not close PR via API")
# Save feedback for AI learning
feedback_entry = {
'pr_number': pr_number,
'timestamp': datetime.now().isoformat(),
'feedback_type': 'rejected',
'reviewer': 'security_team',
'details': {
'reason': feedback_message,
'specific_issues': self.parse_feedback_for_issues(feedback_message)
}
}
# Load and update feedback history
feedback_file = self.feedback_dir / 'pr_feedback_history.json'
self.feedback_dir.mkdir(parents=True, exist_ok=True)
if feedback_file.exists():
with open(feedback_file, 'r') as f:
history = json.load(f)
else:
history = []
history.append(feedback_entry)
with open(feedback_file, 'w') as f:
json.dump(history, f, indent=2)
print(f"✅ Feedback saved for AI learning")
print(f" Total feedback entries: {len(history)}")
return feedback_entry
def parse_feedback_for_issues(self, feedback_text):
"""Parse feedback text to extract specific issues"""
issues = []
# Common security issues to look for
patterns = [
('any/any/any', 'Never use any/any/any rules'),
('no logging', 'Always enable logging'),
('source-address any', 'Avoid using source-address any'),
('destination-address any', 'Avoid using destination-address any'),
('application any', 'Specify applications instead of any'),
('overly permissive', 'Rules are too permissive'),
('zone segmentation', 'Improper zone segmentation'),
('iot', 'IoT security concerns')
]
feedback_lower = feedback_text.lower()
for pattern, description in patterns:
if pattern in feedback_lower:
issues.append({
'pattern': pattern,
'description': description,
'type': 'security'
})
return issues if issues else feedback_text
def main():
"""Main entry point for testing"""
print("\n" + "="*60)
print(" GITEA PR FEEDBACK TESTING")
print("="*60)
manager = GiteaPRManager()
print("\nOptions:")
print("1. Create a new PR from latest AI suggestions")
print("2. Reject a PR with feedback")
print("3. Run complete test cycle")
choice = input("\nSelect option (1-3): ")
if choice == '1':
pr_number = manager.create_pr_from_ai_suggestions()
if pr_number:
print(f"\n✅ Successfully created PR #{pr_number}")
print("\nYou can now:")
print(f"1. Review it in Gitea")
print(f"2. Reject it with: python3 gitea_pr_feedback.py")
elif choice == '2':
pr_number = input("Enter PR number to reject: ")
print("\nEnter rejection feedback (press Ctrl+D when done):")
feedback_lines = []
try:
while True:
feedback_lines.append(input())
except EOFError:
pass
feedback = '\n'.join(feedback_lines)
if not feedback:
feedback = """This configuration has security issues:
1. Any/any/any rules detected - this violates zero-trust principles
2. No logging enabled on some policies
3. Overly permissive access between zones
Please revise to:
- Use specific address-sets
- Enable logging on all policies
- Implement proper zone segmentation"""
manager.reject_pr_with_feedback(pr_number, feedback)
elif choice == '3':
print("\n📋 Complete test cycle:")
print("1. Creating PR from AI suggestions...")
pr_number = manager.create_pr_from_ai_suggestions()
if pr_number:
print(f"\n2. Waiting for review...")
input(" Press Enter to simulate rejection...")
feedback = """Security Review Failed:
❌ Critical Issues Found:
- Any/any/any rule in policy ALLOW-ALL
- No logging on DMZ policies
- IoT zone has unrestricted access to HOME zone
Requirements:
- All policies must use specific addresses
- Logging must be enabled
- IoT devices need strict access control
"""
print("\n3. Rejecting PR with feedback...")
manager.reject_pr_with_feedback(pr_number, feedback)
print("\n4. AI will learn from this feedback in next run")
print(" Run: python3 run_pipeline.py --skip-netflow")
print(" The AI should avoid these mistakes next time!")
if __name__ == "__main__":
main()

View File

@@ -210,7 +210,7 @@ class NetworkOrchestrator:
# Use defaults if config fails
return {
'elasticsearch': {
'host': '192.168.100.85:9200',
'host': 'INTERNAL_IP:9200',
'index': 'netflow-*'
},
'analysis': {
@@ -328,11 +328,11 @@ class NetworkOrchestrator:
return {
"top_talkers": {
"buckets": [
{"key": "192.168.100.50", "doc_count": 15000,
{"key": "INTERNAL_IP", "doc_count": 15000,
"bytes": {"value": 5000000}, "packets": {"value": 10000}},
{"key": "192.168.100.51", "doc_count": 12000,
{"key": "INTERNAL_IP", "doc_count": 12000,
"bytes": {"value": 4000000}, "packets": {"value": 8000}},
{"key": "192.168.100.11", "doc_count": 8000,
{"key": "INTERNAL_IP", "doc_count": 8000,
"bytes": {"value": 2000000}, "packets": {"value": 5000}},
{"key": "10.0.0.5", "doc_count": 6000,
"bytes": {"value": 1500000}, "packets": {"value": 3000}}

View File

@@ -0,0 +1,544 @@
#!/usr/bin/env python3
"""
Enhanced Network AI Orchestrator - Production Version
Compatible with Elasticsearch 7.x
"""
import os
import sys
import json
import time
import logging
import signal
import hashlib
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional
import yaml
import uuid
import threading
from elasticsearch import Elasticsearch # Using sync version for ES 7.x
from git import Repo
import requests
# Configure production logging
def setup_logging():
"""Configure comprehensive logging for production"""
log_dir = Path("/home/netops/orchestrator/logs")
log_dir.mkdir(exist_ok=True)
log_file = log_dir / f"orchestrator_{datetime.now().strftime('%Y%m%d')}.log"
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(log_file),
logging.StreamHandler(sys.stdout)
]
)
return logging.getLogger(__name__)
logger = setup_logging()
class NetworkOrchestrator:
def __init__(self, config_path: str = "/home/netops/orchestrator/config.yaml"):
"""Initialize the orchestrator with configuration"""
self.config = self.load_config(config_path)
self.es_client = None
self.git_repo = None
self.running = True
self.shared_dir = Path("/shared/ai-gitops")
self.request_dir = self.shared_dir / "requests"
self.response_dir = self.shared_dir / "responses"
# Ensure directories exist
self.request_dir.mkdir(parents=True, exist_ok=True)
self.response_dir.mkdir(parents=True, exist_ok=True)
logger.info("Orchestrator initialized")
def should_create_pr(self):
"""Check if we should create a PR based on schedule and state"""
if not self.config.get('pr_creation', {}).get('enabled', True):
return False
# Load state
state = self.load_state()
# Check if pending PR exists
if self.config['pr_creation'].get('skip_if_pending', True):
if state.get('pending_pr', False):
logger.info("Skipping PR - existing PR is pending")
return False
# Check frequency
frequency = self.config['pr_creation'].get('frequency', 'weekly')
if frequency == 'weekly':
# Check if it's the right day and hour
now = datetime.now()
target_day = self.config['pr_creation'].get('day_of_week', 'monday')
target_hour = self.config['pr_creation'].get('hour_of_day', 9)
# Convert day name to number
days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
target_day_num = days.index(target_day.lower())
# Check if it's the right day and hour
if now.weekday() != target_day_num or now.hour != target_hour:
return False
# Check minimum days between PRs
if state.get('last_pr_created'):
last_pr_date = datetime.fromisoformat(state['last_pr_created'])
days_since = (datetime.now() - last_pr_date).days
min_days = self.config['pr_creation'].get('min_days_between', 7)
if days_since < min_days:
logger.info(f"Skipping PR - only {days_since} days since last PR")
return False
return True
def load_state(self):
"""Load orchestrator state"""
state_file = self.config.get('state_tracking', {}).get('state_file', '/var/lib/orchestrator/state.json')
if Path(state_file).exists():
with open(state_file, 'r') as f:
return json.load(f)
return {}
def save_state(self, updates):
"""Save orchestrator state"""
state_file = self.config.get('state_tracking', {}).get('state_file', '/var/lib/orchestrator/state.json')
state = self.load_state()
state.update(updates)
state['last_updated'] = datetime.now().isoformat()
with open(state_file, 'w') as f:
json.dump(state, f, indent=2)
def load_config(self, config_path: str) -> Dict:
"""Load configuration from YAML file"""
try:
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
logger.info(f"Configuration loaded from {config_path}")
# Replace environment variables
if 'gitea' in config and 'token' in config['gitea']:
if config['gitea']['token'] == '${GITEA_TOKEN}':
config['gitea']['token'] = os.environ.get('GITEA_TOKEN', '')
return config
except Exception as e:
logger.error(f"Failed to load config: {e}")
# Use defaults if config fails
return {
'elasticsearch': {
'host': 'INTERNAL_IP:9200',
'index': 'netflow-*'
},
'analysis': {
'interval_minutes': 60,
'window_hours': 24
},
'gitea': {
'url': 'https://git.salmutt.dev',
'repo': 'sal/srx-config',
'token': os.environ.get('GITEA_TOKEN', '')
}
}
def setup_elasticsearch(self):
"""Initialize Elasticsearch connection (synchronous for ES 7.x)"""
try:
es_config = self.config['elasticsearch']
self.es_client = Elasticsearch(
hosts=[es_config['host']],
verify_certs=False,
timeout=30
)
# Test connection
info = self.es_client.info()
logger.info(f"Connected to Elasticsearch: {info['version']['number']}")
return True
except Exception as e:
logger.error(f"Failed to connect to Elasticsearch: {e}")
self.es_client = None
return False
def collect_traffic_data(self) -> Dict:
"""Collect traffic data from Elasticsearch (synchronous)"""
if not self.es_client:
logger.warning("Elasticsearch not connected, using mock data")
return self.generate_mock_data()
try:
window_hours = self.config['analysis']['window_hours']
query = {
"query": {
"range": {
"@timestamp": {
"gte": f"now-{window_hours}h",
"lte": "now"
}
}
},
"size": 0,
"aggs": {
"top_talkers": {
"terms": {
"field": "source_ip",
"size": 20
},
"aggs": {
"bytes": {"sum": {"field": "bytes"}},
"packets": {"sum": {"field": "packets"}}
}
},
"protocols": {
"terms": {
"field": "protocol",
"size": 10
}
},
"vlans": {
"terms": {
"field": "vlan_id",
"size": 20
},
"aggs": {
"bytes": {"sum": {"field": "bytes"}}
}
},
"hourly_traffic": {
"date_histogram": {
"field": "@timestamp",
"calendar_interval": "hour"
},
"aggs": {
"bytes": {"sum": {"field": "bytes"}}
}
}
}
}
result = self.es_client.search(
index=self.config['elasticsearch']['index'],
body=query
)
total_hits = result['hits']['total']
# Handle both ES 6.x and 7.x response formats
if isinstance(total_hits, dict):
total_count = total_hits['value']
else:
total_count = total_hits
logger.info(f"Collected traffic data: {total_count} flows")
return result['aggregations']
except Exception as e:
logger.error(f"Error collecting traffic data: {e}")
return self.generate_mock_data()
def generate_mock_data(self) -> Dict:
"""Generate mock traffic data for testing"""
return {
"top_talkers": {
"buckets": [
{"key": "INTERNAL_IP", "doc_count": 15000,
"bytes": {"value": 5000000}, "packets": {"value": 10000}},
{"key": "INTERNAL_IP", "doc_count": 12000,
"bytes": {"value": 4000000}, "packets": {"value": 8000}},
{"key": "INTERNAL_IP", "doc_count": 8000,
"bytes": {"value": 2000000}, "packets": {"value": 5000}},
{"key": "10.0.0.5", "doc_count": 6000,
"bytes": {"value": 1500000}, "packets": {"value": 3000}}
]
},
"protocols": {
"buckets": [
{"key": "tcp", "doc_count": 25000},
{"key": "udp", "doc_count": 15000},
{"key": "icmp", "doc_count": 2000}
]
},
"vlans": {
"buckets": [
{"key": 100, "doc_count": 20000, "bytes": {"value": 8000000}},
{"key": 200, "doc_count": 15000, "bytes": {"value": 6000000}},
{"key": 300, "doc_count": 5000, "bytes": {"value": 2000000}}
]
}
}
def request_ai_analysis(self, traffic_data: Dict) -> Optional[Dict]:
"""Send traffic data to AI for analysis"""
request_id = str(uuid.uuid4())
request_file = self.request_dir / f"{request_id}.json"
request_data = {
"request_id": request_id,
"timestamp": datetime.now().isoformat(),
"type": "traffic_analysis",
"data": traffic_data,
"prompt": self.build_analysis_prompt(traffic_data)
}
try:
with open(request_file, 'w') as f:
json.dump(request_data, f, indent=2)
logger.info(f"AI request created: {request_id}")
# Wait for response (with timeout)
response = self.wait_for_ai_response(request_id, timeout=120)
return response
except Exception as e:
logger.error(f"Error requesting AI analysis: {e}")
return None
def build_analysis_prompt(self, traffic_data: Dict) -> str:
"""Build prompt for AI analysis"""
prompt = """Analyze this network traffic data and suggest optimizations for a Juniper SRX firewall:
Traffic Summary:
- Top Talkers: {}
- Active VLANs: {}
- Protocol Distribution: {}
Based on this data, please provide:
1. Security rule optimizations (as Juniper SRX configuration commands)
2. QoS improvements for high-traffic hosts
3. VLAN segmentation recommendations
4. Potential security concerns or anomalies
Format your response with specific Juniper SRX configuration commands that can be applied.
Include comments explaining each change."""
# Extract key metrics
top_ips = [b['key'] for b in traffic_data.get('top_talkers', {}).get('buckets', [])][:5]
vlans = [str(b['key']) for b in traffic_data.get('vlans', {}).get('buckets', [])][:5]
protocols = [b['key'] for b in traffic_data.get('protocols', {}).get('buckets', [])][:3]
return prompt.format(
', '.join(top_ips) if top_ips else 'No data',
', '.join(vlans) if vlans else 'No VLANs',
', '.join(protocols) if protocols else 'No protocols'
)
def wait_for_ai_response(self, request_id: str, timeout: int = 120) -> Optional[Dict]:
"""Wait for AI response file"""
response_file = self.response_dir / f"{request_id}_response.json"
start_time = time.time()
while time.time() - start_time < timeout:
if response_file.exists():
try:
time.sleep(1) # Give AI time to finish writing
with open(response_file, 'r') as f:
response = json.load(f)
logger.info(f"AI response received: {request_id}")
# Log a snippet of the response
if 'response' in response:
snippet = response['response'][:200] + '...' if len(response['response']) > 200 else response['response']
logger.info(f"AI suggestion snippet: {snippet}")
# Clean up files
response_file.unlink()
(self.request_dir / f"{request_id}.json").unlink(missing_ok=True)
return response
except Exception as e:
logger.error(f"Error reading AI response: {e}")
return None
time.sleep(2)
logger.warning(f"AI response timeout for {request_id}")
return None
def create_gitea_pr(self, ai_response: Dict) -> bool:
"""Create pull request in Gitea with suggested changes"""
try:
gitea_config = self.config['gitea']
if not gitea_config.get('token'):
logger.error("Gitea token not configured")
return False
# Extract configuration from AI response
# Use 'suggestions' field if available, fallback to 'response'
config_changes = ai_response.get('suggestions', ai_response.get('response', 'No configuration suggested'))
# Create a unique branch name
branch_name = f"ai-suggestions-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
# Format the PR body
pr_data = {
"title": f"AI Network Optimizations - {datetime.now().strftime('%Y-%m-%d %H:%M')}",
"body": f"""## AI-Generated Network Optimizations
### Analysis Summary
Analysis completed at {datetime.now().isoformat()}
### Traffic Patterns Analyzed
- Analysis Window: {self.config['analysis']['window_hours']} hours
- Data Source: NetFlow/J-Flow from SRX
### Proposed Configuration Changes
```junos
{config_changes}
```
### Review Instructions
1. Review the proposed changes carefully
2. Test in lab environment if possible
3. Schedule maintenance window if approved
4. Monitor after deployment
**This PR was automatically generated by the AI Network Orchestrator**
""",
"base": "main",
"head": branch_name
}
headers = {
"Authorization": f"token {gitea_config['token']}",
"Content-Type": "application/json"
}
# For now, log what would be sent (since branch creation needs more setup)
logger.info(f"Would create PR with title: {pr_data['title']}")
logger.info(f"Configuration changes proposed: {len(config_changes)} characters")
# TODO: Implement actual Git operations and PR creation
# This requires cloning the repo, creating branch, committing changes, pushing
return True
except Exception as e:
logger.error(f"Error creating Gitea PR: {e}")
return False
def run_analysis_cycle(self):
"""Run a complete analysis cycle"""
logger.info("="*60)
logger.info("Starting traffic analysis cycle")
logger.info("="*60)
try:
# Always collect traffic data
logger.info("Step 1: Collecting traffic data from Elasticsearch...")
traffic_data = self.collect_traffic_data()
if not traffic_data:
logger.warning("No traffic data available, skipping analysis")
return
# Log summary of collected data
top_talkers = traffic_data.get('top_talkers', {}).get('buckets', [])
if top_talkers:
logger.info(f"Found {len(top_talkers)} top talkers")
logger.info(f"Top IP: {top_talkers[0]['key']} with {top_talkers[0]['doc_count']} flows")
# Always request AI analysis
logger.info("Step 2: Requesting AI analysis...")
ai_response = self.request_ai_analysis(traffic_data)
# Save state for analysis
self.save_state({
'last_analysis_run': datetime.now().isoformat(),
'last_analysis_data': {
'top_talkers_count': len(top_talkers),
'response_received': bool(ai_response)
}
})
# Check if we should create PR
if self.should_create_pr():
if ai_response and (ai_response.get('response') or ai_response.get('suggestions')):
logger.info("Step 3: Creating PR with AI suggestions...")
if self.create_gitea_pr(ai_response):
logger.info("✓ PR created successfully")
self.save_state({
'last_pr_created': datetime.now().isoformat(),
'pending_pr': True
})
else:
logger.warning("Failed to create Gitea PR")
else:
logger.info("No actionable suggestions from AI analysis")
else:
logger.info("Not time for PR creation - analysis data saved for future use")
except Exception as e:
logger.error(f"Error in analysis cycle: {e}")
logger.info("="*60)
def main_loop(self):
"""Main orchestrator loop"""
logger.info("Starting Network AI Orchestrator")
# Setup Elasticsearch connection
if not self.setup_elasticsearch():
logger.warning("Running without Elasticsearch connection - using mock data")
interval = self.config['analysis'].get('interval_minutes', 60) * 60
# Run first analysis immediately
self.run_analysis_cycle()
while self.running:
try:
logger.info(f"Next analysis scheduled in {interval/60} minutes")
logger.info(f"Next run at: {(datetime.now() + timedelta(seconds=interval)).strftime('%H:%M:%S')}")
time.sleep(interval)
if self.running: # Check again after sleep
self.run_analysis_cycle()
except KeyboardInterrupt:
logger.info("Received keyboard interrupt")
break
except Exception as e:
logger.error(f"Error in main loop: {e}")
time.sleep(60) # Wait before retry
logger.info("Orchestrator shutdown complete")
def shutdown(self, signum=None, frame=None):
"""Graceful shutdown handler"""
if signum:
logger.info(f"Received signal {signum}, initiating shutdown...")
else:
logger.info("Initiating shutdown...")
self.running = False
if self.es_client:
try:
# Close Elasticsearch connection
self.es_client.transport.close()
except:
pass
def main():
"""Main entry point"""
orchestrator = NetworkOrchestrator()
# Set up signal handlers
signal.signal(signal.SIGTERM, orchestrator.shutdown)
signal.signal(signal.SIGINT, orchestrator.shutdown)
try:
orchestrator.main_loop()
except Exception as e:
logger.error(f"Fatal error: {e}", exc_info=True)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,621 @@
#!/usr/bin/env python3
"""
Enhanced Network AI Orchestrator - Production Version with Gitea Integration
Compatible with Elasticsearch 7.x
"""
import os
import sys
import json
import time
import logging
import signal
import hashlib
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional
import yaml
import uuid
import threading
from elasticsearch import Elasticsearch # Using sync version for ES 7.x
from git import Repo
import requests
# ADD THIS IMPORT - New for Phase 3
from gitea_integration import GiteaIntegration
# Configure production logging
def setup_logging():
"""Configure comprehensive logging for production"""
log_dir = Path("/home/netops/orchestrator/logs")
log_dir.mkdir(exist_ok=True)
log_file = log_dir / f"orchestrator_{datetime.now().strftime('%Y%m%d')}.log"
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(log_file),
logging.StreamHandler(sys.stdout)
]
)
return logging.getLogger(__name__)
logger = setup_logging()
class NetworkOrchestrator:
def __init__(self, config_path: str = "/home/netops/orchestrator/config.yaml"):
"""Initialize the orchestrator with configuration"""
self.config = self.load_config(config_path)
self.es_client = None
self.git_repo = None
self.running = True
self.shared_dir = Path("/shared/ai-gitops")
self.request_dir = self.shared_dir / "requests"
self.response_dir = self.shared_dir / "responses"
# ADD THIS - Initialize state for Phase 3
self.state = self.load_state()
# Ensure directories exist
self.request_dir.mkdir(parents=True, exist_ok=True)
self.response_dir.mkdir(parents=True, exist_ok=True)
logger.info("Orchestrator initialized")
def should_create_pr(self):
"""Check if we should create a PR based on schedule and state"""
if not self.config.get('pr_creation', {}).get('enabled', True):
return False
# Load state
state = self.load_state()
# Check if pending PR exists
if self.config['pr_creation'].get('skip_if_pending', True):
if state.get('pending_pr', False):
logger.info("Skipping PR - existing PR is pending")
return False
# Check frequency
frequency = self.config['pr_creation'].get('frequency', 'weekly')
if frequency == 'weekly':
# Check if it's the right day and hour
now = datetime.now()
target_day = self.config['pr_creation'].get('day_of_week', 'saturday')
target_hour = self.config['pr_creation'].get('hour_of_day', 5)
# Convert day name to number
days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
target_day_num = days.index(target_day.lower())
# Check if it's the right day and hour
if now.weekday() != target_day_num or now.hour != target_hour:
return False
# Check minimum days between PRs
if state.get('last_pr_created'):
last_pr_date = datetime.fromisoformat(state['last_pr_created'])
days_since = (datetime.now() - last_pr_date).days
min_days = self.config['pr_creation'].get('min_days_between', 7)
if days_since < min_days:
logger.info(f"Skipping PR - only {days_since} days since last PR")
return False
return True
def load_state(self):
"""Load orchestrator state"""
state_file = self.config.get('state_tracking', {}).get('state_file', '/var/lib/orchestrator/state.json')
if Path(state_file).exists():
with open(state_file, 'r') as f:
return json.load(f)
return {}
def save_state(self, updates):
"""Save orchestrator state"""
state_file = self.config.get('state_tracking', {}).get('state_file', '/var/lib/orchestrator/state.json')
state = self.load_state()
state.update(updates)
state['last_updated'] = datetime.now().isoformat()
with open(state_file, 'w') as f:
json.dump(state, f, indent=2)
def load_config(self, config_path: str) -> Dict:
"""Load configuration from YAML file"""
try:
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
logger.info(f"Configuration loaded from {config_path}")
# Replace environment variables
if 'gitea' in config and 'token' in config['gitea']:
if config['gitea']['token'] == '${GITEA_TOKEN}':
config['gitea']['token'] = os.environ.get('GITEA_TOKEN', '')
return config
except Exception as e:
logger.error(f"Failed to load config: {e}")
# Use defaults if config fails
return {
'elasticsearch': {
'host': 'INTERNAL_IP:9200',
'index': 'netflow-*'
},
'analysis': {
'interval_minutes': 60,
'window_hours': 24
},
'gitea': {
'url': 'https://git.salmutt.dev',
'repo': 'sal/srx-config',
'token': os.environ.get('GITEA_TOKEN', '')
}
}
def setup_elasticsearch(self):
"""Initialize Elasticsearch connection (synchronous for ES 7.x)"""
try:
es_config = self.config['elasticsearch']
self.es_client = Elasticsearch(
hosts=[es_config['host']],
verify_certs=False,
timeout=30
)
# Test connection
info = self.es_client.info()
logger.info(f"Connected to Elasticsearch: {info['version']['number']}")
return True
except Exception as e:
logger.error(f"Failed to connect to Elasticsearch: {e}")
self.es_client = None
return False
def collect_traffic_data(self) -> Dict:
"""Collect traffic data from Elasticsearch (synchronous)"""
if not self.es_client:
logger.warning("Elasticsearch not connected, using mock data")
return self.generate_mock_data()
try:
window_hours = self.config['analysis']['window_hours']
query = {
"query": {
"range": {
"@timestamp": {
"gte": f"now-{window_hours}h",
"lte": "now"
}
}
},
"size": 0,
"aggs": {
"top_talkers": {
"terms": {
"field": "source_ip",
"size": 20
},
"aggs": {
"bytes": {"sum": {"field": "bytes"}},
"packets": {"sum": {"field": "packets"}}
}
},
"protocols": {
"terms": {
"field": "protocol",
"size": 10
}
},
"vlans": {
"terms": {
"field": "vlan_id",
"size": 20
},
"aggs": {
"bytes": {"sum": {"field": "bytes"}}
}
},
"hourly_traffic": {
"date_histogram": {
"field": "@timestamp",
"calendar_interval": "hour"
},
"aggs": {
"bytes": {"sum": {"field": "bytes"}}
}
}
}
}
result = self.es_client.search(
index=self.config['elasticsearch']['index'],
body=query
)
total_hits = result['hits']['total']
# Handle both ES 6.x and 7.x response formats
if isinstance(total_hits, dict):
total_count = total_hits['value']
else:
total_count = total_hits
logger.info(f"Collected traffic data: {total_count} flows")
return result['aggregations']
except Exception as e:
logger.error(f"Error collecting traffic data: {e}")
return self.generate_mock_data()
def generate_mock_data(self) -> Dict:
"""Generate mock traffic data for testing"""
return {
"top_talkers": {
"buckets": [
{"key": "INTERNAL_IP", "doc_count": 15000,
"bytes": {"value": 5000000}, "packets": {"value": 10000}},
{"key": "INTERNAL_IP", "doc_count": 12000,
"bytes": {"value": 4000000}, "packets": {"value": 8000}},
{"key": "INTERNAL_IP", "doc_count": 8000,
"bytes": {"value": 2000000}, "packets": {"value": 5000}},
{"key": "10.0.0.5", "doc_count": 6000,
"bytes": {"value": 1500000}, "packets": {"value": 3000}}
]
},
"protocols": {
"buckets": [
{"key": "tcp", "doc_count": 25000},
{"key": "udp", "doc_count": 15000},
{"key": "icmp", "doc_count": 2000}
]
},
"vlans": {
"buckets": [
{"key": 100, "doc_count": 20000, "bytes": {"value": 8000000}},
{"key": 200, "doc_count": 15000, "bytes": {"value": 6000000}},
{"key": 300, "doc_count": 5000, "bytes": {"value": 2000000}}
]
}
}
def request_ai_analysis(self, traffic_data: Dict) -> Optional[Dict]:
"""Send traffic data to AI for analysis"""
request_id = str(uuid.uuid4())
request_file = self.request_dir / f"{request_id}.json"
request_data = {
"request_id": request_id,
"timestamp": datetime.now().isoformat(),
"type": "traffic_analysis",
"data": traffic_data,
"prompt": self.build_analysis_prompt(traffic_data)
}
try:
with open(request_file, 'w') as f:
json.dump(request_data, f, indent=2)
logger.info(f"AI request created: {request_id}")
# Wait for response (with timeout)
response = self.wait_for_ai_response(request_id, timeout=120)
return response
except Exception as e:
logger.error(f"Error requesting AI analysis: {e}")
return None
def build_analysis_prompt(self, traffic_data: Dict) -> str:
"""Build prompt for AI analysis"""
prompt = """Analyze this network traffic data and suggest optimizations for a Juniper SRX firewall:
Traffic Summary:
- Top Talkers: {}
- Active VLANs: {}
- Protocol Distribution: {}
Based on this data, please provide:
1. Security rule optimizations (as Juniper SRX configuration commands)
2. QoS improvements for high-traffic hosts
3. VLAN segmentation recommendations
4. Potential security concerns or anomalies
Format your response with specific Juniper SRX configuration commands that can be applied.
Include comments explaining each change."""
# Extract key metrics
top_ips = [b['key'] for b in traffic_data.get('top_talkers', {}).get('buckets', [])][:5]
vlans = [str(b['key']) for b in traffic_data.get('vlans', {}).get('buckets', [])][:5]
protocols = [b['key'] for b in traffic_data.get('protocols', {}).get('buckets', [])][:3]
return prompt.format(
', '.join(top_ips) if top_ips else 'No data',
', '.join(vlans) if vlans else 'No VLANs',
', '.join(protocols) if protocols else 'No protocols'
)
def wait_for_ai_response(self, request_id: str, timeout: int = 120) -> Optional[Dict]:
"""Wait for AI response file"""
response_file = self.response_dir / f"{request_id}_response.json"
start_time = time.time()
while time.time() - start_time < timeout:
if response_file.exists():
try:
time.sleep(1) # Give AI time to finish writing
with open(response_file, 'r') as f:
response = json.load(f)
logger.info(f"AI response received: {request_id}")
# Log a snippet of the response
if 'response' in response:
snippet = response['response'][:200] + '...' if len(response['response']) > 200 else response['response']
logger.info(f"AI suggestion snippet: {snippet}")
# Clean up files
response_file.unlink()
(self.request_dir / f"{request_id}.json").unlink(missing_ok=True)
return response
except Exception as e:
logger.error(f"Error reading AI response: {e}")
return None
time.sleep(2)
logger.warning(f"AI response timeout for {request_id}")
return None
# REPLACE THE EXISTING create_gitea_pr METHOD WITH THIS ENHANCED VERSION
def create_gitea_pr(self, ai_response: Dict = None) -> bool:
"""Create pull request in Gitea with suggested changes"""
try:
# If no AI response provided, get the latest one
if not ai_response:
latest_suggestion = self._get_latest_ai_suggestion()
if not latest_suggestion:
logger.warning("No AI suggestions found to create PR")
return False
# Read the suggestion file
with open(latest_suggestion['path'], 'r') as f:
ai_response = json.load(f)
# Check if we should create a PR
if not self.should_create_pr():
logger.info("Skipping PR creation - conditions not met")
return False
# Check for existing pending PR
if self.state.get('pending_pr'):
logger.info(f"Skipping PR creation - pending PR exists: {self.state['pending_pr']}")
return False
logger.info("Creating Gitea pull request with AI suggestions...")
# Initialize Gitea integration
gitea = GiteaIntegration(self.config['gitea'])
# Extract the SRX configuration
srx_config = ai_response.get('suggestions', ai_response.get('response', ''))
if not srx_config or srx_config.strip() == '':
logger.warning("Empty or invalid suggestions, skipping PR creation")
return False
# Create the PR
pr_info = gitea.create_pr_with_config(
srx_config=srx_config,
title=f"AI Network Configuration Suggestions - {datetime.now().strftime('%B %d, %Y')}",
description=None # Will auto-generate
)
if pr_info:
# Update state with PR information
self.state['pending_pr'] = pr_info['number']
self.state['last_pr_created'] = datetime.now().isoformat()
self.state['pr_url'] = pr_info['url']
self.save_state(self.state)
logger.info(f"Successfully created PR #{pr_info['number']}: {pr_info['url']}")
# Log to a separate file for notifications/monitoring
with open('/var/log/orchestrator/pr_created.log', 'a') as f:
f.write(f"{datetime.now().isoformat()} - Created PR #{pr_info['number']} - {pr_info['url']}\n")
return True
else:
logger.error("Failed to create PR in Gitea")
return False
except Exception as e:
logger.error(f"Error creating Gitea PR: {e}", exc_info=True)
return False
# ADD THIS NEW METHOD
def _get_latest_ai_suggestion(self) -> Optional[Dict]:
"""Get the most recent AI suggestion file"""
response_dir = '/shared/ai-gitops/responses'
try:
# List all response files
response_files = []
for filename in os.listdir(response_dir):
if filename.startswith('response_') and filename.endswith('.json'):
filepath = os.path.join(response_dir, filename)
# Get file modification time
mtime = os.path.getmtime(filepath)
response_files.append({
'path': filepath,
'filename': filename,
'mtime': mtime
})
if not response_files:
return None
# Sort by modification time and get the latest
response_files.sort(key=lambda x: x['mtime'], reverse=True)
return response_files[0]
except Exception as e:
logger.error(f"Error finding latest AI suggestion: {e}")
return None
# ADD THIS NEW METHOD
def check_pr_status(self):
"""Check the status of pending pull requests"""
if not self.state.get('pending_pr'):
return
try:
gitea = GiteaIntegration(self.config['gitea'])
pr_status = gitea.get_pr_status(self.state['pending_pr'])
if pr_status:
logger.info(f"PR #{pr_status['number']} status: {pr_status['state']} (merged: {pr_status['merged']})")
# If PR is closed or merged, clear the pending_pr flag
if pr_status['state'] == 'closed':
logger.info(f"PR #{pr_status['number']} has been closed")
self.state['pending_pr'] = None
self.state['last_pr_status'] = 'closed'
self.state['last_pr_closed'] = datetime.now().isoformat()
if pr_status['merged']:
self.state['last_pr_status'] = 'merged'
logger.info(f"PR #{pr_status['number']} was merged!")
# Mark for deployment
self.state['pending_deployment'] = True
self.state['deployment_pr'] = pr_status['number']
self.save_state(self.state)
except Exception as e:
logger.error(f"Error checking PR status: {e}")
def run_analysis_cycle(self):
"""Run a complete analysis cycle"""
logger.info("="*60)
logger.info("Starting traffic analysis cycle")
logger.info("="*60)
try:
# Always collect traffic data
logger.info("Step 1: Collecting traffic data from Elasticsearch...")
traffic_data = self.collect_traffic_data()
if not traffic_data:
logger.warning("No traffic data available, skipping analysis")
return
# Log summary of collected data
top_talkers = traffic_data.get('top_talkers', {}).get('buckets', [])
if top_talkers:
logger.info(f"Found {len(top_talkers)} top talkers")
logger.info(f"Top IP: {top_talkers[0]['key']} with {top_talkers[0]['doc_count']} flows")
# Always request AI analysis
logger.info("Step 2: Requesting AI analysis...")
ai_response = self.request_ai_analysis(traffic_data)
# Save state for analysis
self.save_state({
'last_analysis_run': datetime.now().isoformat(),
'last_analysis_data': {
'top_talkers_count': len(top_talkers),
'response_received': bool(ai_response)
}
})
# Check if we should create PR
if self.should_create_pr():
if ai_response and (ai_response.get('response') or ai_response.get('suggestions')):
logger.info("Step 3: Creating PR with AI suggestions...")
if self.create_gitea_pr(ai_response):
logger.info("✓ PR created successfully")
else:
logger.warning("Failed to create Gitea PR")
else:
logger.info("No actionable suggestions from AI analysis")
else:
logger.info("Not time for PR creation - analysis data saved for future use")
except Exception as e:
logger.error(f"Error in analysis cycle: {e}")
logger.info("="*60)
def main_loop(self):
"""Main orchestrator loop"""
logger.info("Starting Network AI Orchestrator")
# Setup Elasticsearch connection
if not self.setup_elasticsearch():
logger.warning("Running without Elasticsearch connection - using mock data")
interval = self.config['analysis'].get('interval_minutes', 60) * 60
# Run first analysis immediately
self.run_analysis_cycle()
while self.running:
try:
logger.info(f"Next analysis scheduled in {interval/60} minutes")
logger.info(f"Next run at: {(datetime.now() + timedelta(seconds=interval)).strftime('%H:%M:%S')}")
# MODIFIED: Check PR status every 15 minutes during the wait
for i in range(int(interval / 60)): # Check every minute
if not self.running:
break
time.sleep(60)
# Check PR status every 15 minutes
if i % 15 == 14 and self.state.get('pending_pr'):
logger.info("Checking PR status...")
self.check_pr_status()
if self.running: # Check again after sleep
self.run_analysis_cycle()
except KeyboardInterrupt:
logger.info("Received keyboard interrupt")
break
except Exception as e:
logger.error(f"Error in main loop: {e}")
time.sleep(60) # Wait before retry
logger.info("Orchestrator shutdown complete")
def shutdown(self, signum=None, frame=None):
"""Graceful shutdown handler"""
if signum:
logger.info(f"Received signal {signum}, initiating shutdown...")
else:
logger.info("Initiating shutdown...")
self.running = False
if self.es_client:
try:
# Close Elasticsearch connection
self.es_client.transport.close()
except:
pass
def main():
"""Main entry point"""
orchestrator = NetworkOrchestrator()
# Set up signal handlers
signal.signal(signal.SIGTERM, orchestrator.shutdown)
signal.signal(signal.SIGINT, orchestrator.shutdown)
try:
orchestrator.main_loop()
except Exception as e:
logger.error(f"Fatal error: {e}", exc_info=True)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python3
"""
SRX AI GitOps Pipeline Status Monitor - Final Version
Shows the complete status of the automation pipeline
"""
import os
import json
import yaml
from datetime import datetime, timedelta
from tabulate import tabulate
import requests
import subprocess
class PipelineMonitor:
def __init__(self):
# Load configuration
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
self.config = yaml.safe_load(f)
# Load state
state_file = self.config.get('state_tracking', {}).get('state_file', '/var/lib/orchestrator/state.json')
if os.path.exists(state_file):
try:
with open(state_file, 'r') as f:
self.state = json.load(f)
except:
self.state = {}
else:
self.state = {}
def check_services(self):
"""Check if services are running"""
status = []
# Check local orchestrator
try:
result = subprocess.run(
['systemctl', 'is-active', 'orchestrator.service'],
capture_output=True,
text=True
)
is_active = result.stdout.strip() == 'active'
status.append(['Orchestrator (Local)', '✅ Active' if is_active else '❌ Inactive'])
except:
status.append(['Orchestrator (Local)', '❓ Unknown'])
# Check AI Processor by looking at recent activity
ai_status = self.check_ai_processor_activity()
status.append(['AI Processor (INTERNAL_IP)', ai_status])
# Check deployment timer
try:
result = subprocess.run(
['systemctl', 'is-active', 'srx-deployment.timer'],
capture_output=True,
text=True
)
is_active = result.stdout.strip() == 'active'
status.append(['Deployment Timer', '✅ Active' if is_active else '❌ Not configured'])
except:
status.append(['Deployment Timer', '❓ Unknown'])
return status
def check_ai_processor_activity(self):
"""Check AI processor activity through shared files and state"""
# Check if we've had recent AI responses
if self.state.get('last_analysis_data', {}).get('response_received'):
last_analysis = self.state.get('last_analysis_run', '')
if last_analysis:
try:
last_time = datetime.fromisoformat(last_analysis)
if datetime.now() - last_time < timedelta(hours=2):
return '✅ Active (Recent activity)'
except:
pass
# Check response directory
response_dir = '/shared/ai-gitops/responses'
if os.path.exists(response_dir):
files = os.listdir(response_dir)
if len(files) > 0:
return '✅ Active (Has responses)'
# Check if requests are pending
request_dir = '/shared/ai-gitops/requests'
if os.path.exists(request_dir):
files = os.listdir(request_dir)
if len(files) > 0:
return '⏳ Processing requests'
return '💤 Idle'
def check_pr_status(self):
"""Check current PR status"""
if self.state.get('pending_pr'):
pr_num = self.state['pending_pr']
return f"PR #{pr_num} - Pending Review"
else:
return "No pending PR"
def get_next_events(self):
"""Calculate next scheduled events"""
now = datetime.now()
# Next analysis (hourly)
next_analysis = now.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
# Next PR creation (Saturday 5 AM)
days_until_saturday = (5 - now.weekday()) % 7
if days_until_saturday == 0 and now.hour >= 5:
days_until_saturday = 7
next_pr = now.replace(hour=5, minute=0, second=0, microsecond=0)
next_pr += timedelta(days=days_until_saturday)
# Next deployment (Daily 5 AM)
next_deploy = now.replace(hour=5, minute=0, second=0, microsecond=0)
if now.hour >= 5:
next_deploy += timedelta(days=1)
return [
['Next Analysis', next_analysis.strftime('%Y-%m-%d %H:%M')],
['Next PR Creation', next_pr.strftime('%Y-%m-%d %H:%M')],
['Next Deployment Check', next_deploy.strftime('%Y-%m-%d %H:%M')]
]
def get_recent_activity(self):
"""Get recent pipeline activity"""
activity = []
# Last analysis
if self.state.get('last_analysis_run'):
try:
last_analysis = datetime.fromisoformat(self.state['last_analysis_run'])
activity.append(['Last Analysis', last_analysis.strftime('%Y-%m-%d %H:%M')])
# Check if AI responded
if self.state.get('last_analysis_data', {}).get('response_received'):
activity.append(['AI Response', '✅ Received'])
else:
activity.append(['AI Response', '❌ Not received'])
except:
pass
# Last PR created
if self.state.get('last_pr_created'):
try:
last_pr = datetime.fromisoformat(self.state['last_pr_created'])
activity.append(['Last PR Created', last_pr.strftime('%Y-%m-%d %H:%M')])
except:
pass
# Last deployment
if self.state.get('last_successful_deployment'):
try:
last_deploy = datetime.fromisoformat(self.state['last_successful_deployment'])
activity.append(['Last Deployment', last_deploy.strftime('%Y-%m-%d %H:%M')])
except:
pass
return activity if activity else [['Status', 'No recent activity']]
def display_status(self):
"""Display complete pipeline status"""
print("\n" + "="*60)
print("🚀 SRX AI GitOps Pipeline Status")
print("="*60)
print(f"Current Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print("\n📊 Service Status:")
print(tabulate(self.check_services(), headers=['Service', 'Status']))
print("\n🔄 Current State:")
print(f"PR Status: {self.check_pr_status()}")
print(f"Pending Deployment: {'Yes' if self.state.get('pending_deployment') else 'No'}")
print("\n📅 Scheduled Events:")
print(tabulate(self.get_next_events(), headers=['Event', 'Time']))
print("\n📜 Recent Activity:")
print(tabulate(self.get_recent_activity(), headers=['Event', 'Details']))
print("\n💾 Data Locations:")
print("Requests: /shared/ai-gitops/requests/")
print("Responses: /shared/ai-gitops/responses/")
print("Approved: /shared/ai-gitops/approved/")
print("Deployed: /shared/ai-gitops/deployed/")
print("\n🏗️ Architecture:")
print("Orchestrator VM: INTERNAL_IP (this VM)")
print("AI Processor VM: INTERNAL_IP")
print("Elasticsearch VM: INTERNAL_IP")
print("Gitea Server: git.salmutt.dev")
print("\n📋 Pipeline Flow:")
print("1. Every 60 min → Analyze traffic → Generate suggestions")
print("2. Saturday 5 AM → Create PR if suggestions exist")
print("3. Manual → Review and approve/reject PR")
print("4. Daily 5 AM → Deploy approved configurations")
print("="*60 + "\n")
if __name__ == "__main__":
monitor = PipelineMonitor()
monitor.display_status()

View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python3
"""
SRX AI GitOps Pipeline Status Monitor - Final Version
Shows the complete status of the automation pipeline
"""
import os
import json
import yaml
from datetime import datetime, timedelta
from tabulate import tabulate
import requests
import subprocess
class PipelineMonitor:
def __init__(self):
# Load configuration
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
self.config = yaml.safe_load(f)
# Load state
state_file = self.config.get('state_tracking', {}).get('state_file', '/var/lib/orchestrator/state.json')
if os.path.exists(state_file):
try:
with open(state_file, 'r') as f:
self.state = json.load(f)
except:
self.state = {}
else:
self.state = {}
def check_services(self):
"""Check if services are running"""
status = []
# Check local orchestrator
try:
result = subprocess.run(
['systemctl', 'is-active', 'orchestrator.service'],
capture_output=True,
text=True
)
is_active = result.stdout.strip() == 'active'
status.append(['Orchestrator (Local)', '✅ Active' if is_active else '❌ Inactive'])
except:
status.append(['Orchestrator (Local)', '❓ Unknown'])
# Check AI Processor by looking at recent activity
ai_status = self.check_ai_processor_activity()
status.append(['AI Processor (INTERNAL_IP)', ai_status])
# Check deployment timer
try:
result = subprocess.run(
['systemctl', 'is-active', 'srx-deployment.timer'],
capture_output=True,
text=True
)
is_active = result.stdout.strip() == 'active'
status.append(['Deployment Timer', '✅ Active' if is_active else '❌ Not configured'])
except:
status.append(['Deployment Timer', '❓ Unknown'])
return status
def check_ai_processor_activity(self):
"""Check AI processor activity through shared files and state"""
# Check if we've had recent AI responses
if self.state.get('last_analysis_data', {}).get('response_received'):
last_analysis = self.state.get('last_analysis_run', '')
if last_analysis:
try:
last_time = datetime.fromisoformat(last_analysis)
if datetime.now() - last_time < timedelta(hours=2):
return '✅ Active (Recent activity)'
except:
pass
# Check response directory
response_dir = '/shared/ai-gitops/responses'
if os.path.exists(response_dir):
files = os.listdir(response_dir)
if len(files) > 0:
return '✅ Active (Has responses)'
# Check if requests are pending
request_dir = '/shared/ai-gitops/requests'
if os.path.exists(request_dir):
files = os.listdir(request_dir)
if len(files) > 0:
return '⏳ Processing requests'
return '💤 Idle'
def check_pr_status(self):
"""Check current PR status"""
if self.state.get('pending_pr'):
pr_num = self.state['pending_pr']
return f"PR #{pr_num} - Pending Review"
else:
return "No pending PR"
def get_next_events(self):
"""Calculate next scheduled events"""
now = datetime.now()
# Next analysis (hourly)
next_analysis = now.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
# Next PR creation (Saturday 5 AM)
days_until_saturday = (5 - now.weekday()) % 7
if days_until_saturday == 0 and now.hour >= 5:
days_until_saturday = 7
next_pr = now.replace(hour=5, minute=0, second=0, microsecond=0)
next_pr += timedelta(days=days_until_saturday)
# Next deployment (Daily 5 AM)
next_deploy = now.replace(hour=5, minute=0, second=0, microsecond=0)
if now.hour >= 5:
next_deploy += timedelta(days=1)
return [
['Next Analysis', next_analysis.strftime('%Y-%m-%d %H:%M')],
['Next PR Creation', next_pr.strftime('%Y-%m-%d %H:%M')],
['Next Deployment Check', next_deploy.strftime('%Y-%m-%d %H:%M')]
]
def get_recent_activity(self):
"""Get recent pipeline activity"""
activity = []
# Last analysis
if self.state.get('last_analysis_run'):
try:
last_analysis = datetime.fromisoformat(self.state['last_analysis_run'])
activity.append(['Last Analysis', last_analysis.strftime('%Y-%m-%d %H:%M')])
# Check if AI responded
if self.state.get('last_analysis_data', {}).get('response_received'):
activity.append(['AI Response', '✅ Received'])
else:
activity.append(['AI Response', '❌ Not received'])
except:
pass
# Last PR created
if self.state.get('last_pr_created'):
try:
last_pr = datetime.fromisoformat(self.state['last_pr_created'])
activity.append(['Last PR Created', last_pr.strftime('%Y-%m-%d %H:%M')])
except:
pass
# Last deployment
if self.state.get('last_successful_deployment'):
try:
last_deploy = datetime.fromisoformat(self.state['last_successful_deployment'])
activity.append(['Last Deployment', last_deploy.strftime('%Y-%m-%d %H:%M')])
except:
pass
return activity if activity else [['Status', 'No recent activity']]
def display_status(self):
"""Display complete pipeline status"""
print("\n" + "="*60)
print("🚀 SRX AI GitOps Pipeline Status")
print("="*60)
print(f"Current Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print("\n📊 Service Status:")
print(tabulate(self.check_services(), headers=['Service', 'Status']))
print("\n🔄 Current State:")
print(f"PR Status: {self.check_pr_status()}")
print(f"Pending Deployment: {'Yes' if self.state.get('pending_deployment') else 'No'}")
print("\n📅 Scheduled Events:")
print(tabulate(self.get_next_events(), headers=['Event', 'Time']))
print("\n📜 Recent Activity:")
print(tabulate(self.get_recent_activity(), headers=['Event', 'Details']))
print("\n💾 Data Locations:")
print("Requests: /shared/ai-gitops/requests/")
print("Responses: /shared/ai-gitops/responses/")
print("Approved: /shared/ai-gitops/approved/")
print("Deployed: /shared/ai-gitops/deployed/")
print("\n🏗️ Architecture:")
print("Orchestrator VM: INTERNAL_IP (this VM)")
print("AI Processor VM: INTERNAL_IP")
print("Elasticsearch VM: INTERNAL_IP")
print("Gitea Server: git.salmutt.dev")
print("\n📋 Pipeline Flow:")
print("1. Every 60 min → Analyze traffic → Generate suggestions")
print("2. Saturday 5 AM → Create PR if suggestions exist")
print("3. Manual → Review and approve/reject PR")
print("4. Daily 5 AM → Deploy approved configurations")
print("="*60 + "\n")
if __name__ == "__main__":
monitor = PipelineMonitor()
monitor.display_status()

View File

@@ -0,0 +1,239 @@
#!/usr/bin/env python3
"""
PR Feedback System - Helps AI learn from rejected configurations
Records why PRs were rejected and uses this to improve future suggestions
"""
import os
import json
import yaml
from datetime import datetime
from typing import Dict, List, Optional
import requests
class PRFeedbackSystem:
def __init__(self):
# Load configuration
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
self.config = yaml.safe_load(f)
# Feedback storage location
self.feedback_dir = '/shared/ai-gitops/feedback'
self.feedback_file = os.path.join(self.feedback_dir, 'pr_feedback_history.json')
# Create feedback directory
os.makedirs(self.feedback_dir, exist_ok=True)
# Load existing feedback
self.feedback_history = self.load_feedback_history()
def load_feedback_history(self) -> List[Dict]:
"""Load existing feedback history"""
if os.path.exists(self.feedback_file):
try:
with open(self.feedback_file, 'r') as f:
return json.load(f)
except:
return []
return []
def save_feedback_history(self):
"""Save feedback history to file"""
with open(self.feedback_file, 'w') as f:
json.dump(self.feedback_history, f, indent=2)
def record_pr_feedback(self, pr_number: int, feedback_type: str, details: Dict):
"""Record feedback for a PR"""
feedback_entry = {
'pr_number': pr_number,
'timestamp': datetime.now().isoformat(),
'feedback_type': feedback_type, # 'rejected', 'modified', 'approved'
'details': details,
'configuration_issues': []
}
# Get the PR content from Gitea
pr_config = self.get_pr_configuration(pr_number)
if pr_config:
feedback_entry['original_config'] = pr_config
self.feedback_history.append(feedback_entry)
self.save_feedback_history()
# Also save individual feedback file for this PR
pr_feedback_file = os.path.join(self.feedback_dir, f'pr_{pr_number}_feedback.json')
with open(pr_feedback_file, 'w') as f:
json.dump(feedback_entry, f, indent=2)
print(f"✅ Feedback recorded for PR #{pr_number}")
return feedback_entry
def get_pr_configuration(self, pr_number: int) -> Optional[str]:
"""Fetch the configuration from a PR"""
# This would fetch from Gitea API
# For now, return None (would be implemented with actual Gitea API calls)
return None
def analyze_feedback_patterns(self) -> Dict:
"""Analyze patterns in rejected configurations"""
patterns = {
'total_prs': len(self.feedback_history),
'rejected': 0,
'approved': 0,
'modified': 0,
'common_issues': {},
'security_concerns': 0,
'performance_issues': 0,
'incorrect_syntax': 0
}
for feedback in self.feedback_history:
patterns[feedback['feedback_type']] += 1
# Count specific issues
for issue in feedback.get('configuration_issues', []):
issue_type = issue.get('type', 'other')
patterns['common_issues'][issue_type] = patterns['common_issues'].get(issue_type, 0) + 1
if 'security' in issue_type.lower():
patterns['security_concerns'] += 1
elif 'performance' in issue_type.lower():
patterns['performance_issues'] += 1
elif 'syntax' in issue_type.lower():
patterns['incorrect_syntax'] += 1
return patterns
def generate_learning_prompt(self) -> str:
"""Generate a learning prompt based on feedback history"""
patterns = self.analyze_feedback_patterns()
prompt = "\n# IMPORTANT LEARNING FROM PAST FEEDBACK:\n"
prompt += f"# Total PRs analyzed: {patterns['total_prs']}\n"
prompt += f"# Rejected: {patterns['rejected']}, Approved: {patterns['approved']}\n\n"
if patterns['security_concerns'] > 0:
prompt += "# ⚠️ SECURITY ISSUES FOUND IN PAST SUGGESTIONS:\n"
prompt += "# - Avoid any/any/any permit rules\n"
prompt += "# - Be specific with source/destination addresses\n"
prompt += "# - Limit applications to necessary services only\n\n"
if patterns['common_issues']:
prompt += "# 📊 COMMON ISSUES TO AVOID:\n"
for issue, count in patterns['common_issues'].items():
prompt += f"# - {issue}: {count} occurrences\n"
prompt += "\n"
# Add specific examples from recent rejections
recent_rejections = [f for f in self.feedback_history if f['feedback_type'] == 'rejected'][-3:]
if recent_rejections:
prompt += "# 🚫 RECENT REJECTED CONFIGURATIONS:\n"
for rejection in recent_rejections:
prompt += f"# PR #{rejection['pr_number']}: {rejection['details'].get('reason', 'No reason provided')}\n"
return prompt
def provide_feedback_interactive():
"""Interactive feedback collection"""
feedback_system = PRFeedbackSystem()
print("\n📝 PR FEEDBACK SYSTEM")
print("="*50)
# Get PR number
pr_number = input("Enter PR number to provide feedback for: ").strip()
if not pr_number.isdigit():
print("❌ Invalid PR number")
return
pr_number = int(pr_number)
# Get feedback type
print("\nFeedback type:")
print("1. Rejected - Configuration was not suitable")
print("2. Modified - Configuration needed changes")
print("3. Approved - Configuration was good")
feedback_type_choice = input("Select (1-3): ").strip()
feedback_types = {
'1': 'rejected',
'2': 'modified',
'3': 'approved'
}
feedback_type = feedback_types.get(feedback_type_choice, 'rejected')
# Collect specific issues
print("\nWhat issues did you find? (select all that apply)")
print("1. Security - Too permissive rules")
print("2. Security - Missing security policies")
print("3. Performance - Inefficient rules")
print("4. Syntax - Incorrect SRX syntax")
print("5. Network - Wrong IP addresses/VLANs")
print("6. Interface - Wrong interface names")
print("7. Other")
issues_input = input("Enter issue numbers (comma-separated, e.g., 1,3,5): ").strip()
issue_types = {
'1': {'type': 'security_permissive', 'description': 'Rules too permissive (any/any/any)'},
'2': {'type': 'security_missing', 'description': 'Missing essential security policies'},
'3': {'type': 'performance', 'description': 'Inefficient rule ordering or design'},
'4': {'type': 'syntax', 'description': 'Incorrect SRX syntax'},
'5': {'type': 'network', 'description': 'Wrong IP addresses or VLANs'},
'6': {'type': 'interface', 'description': 'Wrong interface names'},
'7': {'type': 'other', 'description': 'Other issues'}
}
configuration_issues = []
if issues_input:
for issue_num in issues_input.split(','):
issue_num = issue_num.strip()
if issue_num in issue_types:
configuration_issues.append(issue_types[issue_num])
# Get detailed feedback
print("\nProvide additional details (optional):")
detailed_reason = input("Reason for feedback: ").strip()
# Specific examples of problems
if feedback_type in ['rejected', 'modified']:
print("\nProvide specific examples of problematic configurations:")
print("Example: 'The any/any/any permit rule is too open'")
specific_issues = input("Specific issues: ").strip()
else:
specific_issues = ""
# Record the feedback
details = {
'reason': detailed_reason,
'specific_issues': specific_issues,
'configuration_issues': configuration_issues
}
feedback_system.record_pr_feedback(pr_number, feedback_type, details)
feedback_entry = {
'configuration_issues': configuration_issues
}
# Store configuration issues in the details
details['configuration_issues'] = configuration_issues
feedback_system.record_pr_feedback(pr_number, feedback_type, details)
# Show learning analysis
print("\n📊 CURRENT LEARNING PATTERNS:")
patterns = feedback_system.analyze_feedback_patterns()
print(f"Total PRs with feedback: {patterns['total_prs']}")
print(f"Rejected: {patterns['rejected']}")
print(f"Security concerns found: {patterns['security_concerns']}")
# Generate and show learning prompt
print("\n🧠 AI LEARNING PROMPT GENERATED:")
print("-"*50)
print(feedback_system.generate_learning_prompt())
print("\n✅ Feedback recorded! The AI will use this to improve future suggestions.")
if __name__ == "__main__":
provide_feedback_interactive()

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
"""
Prepare PR from AI response
Converts response format to PR format
"""
import json
from pathlib import Path
from datetime import datetime
import sys
def convert_response_to_pr():
"""Convert AI response to PR format"""
# Find latest response
response_dir = Path('/shared/ai-gitops/responses')
response_files = list(response_dir.glob('*_response.json'))
if not response_files:
print("No response files found")
return False
latest = max(response_files, key=lambda p: p.stat().st_mtime)
print(f"Converting response: {latest.name}")
with open(latest, 'r') as f:
response = json.load(f)
# Extract suggestions and build config
suggestions = response.get('suggestions', [])
config_lines = []
for suggestion in suggestions:
if 'config' in suggestion:
config_lines.append(suggestion['config'])
if not config_lines:
print("No configuration in response")
return False
# Create pending PR directory and file
pr_dir = Path('/shared/ai-gitops/pending_prs')
pr_dir.mkdir(parents=True, exist_ok=True)
pr_file = pr_dir / f"pr_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
pr_data = {
"title": f"AI Network Optimization - {response.get('focus_area', 'general').title()}",
"suggestions": '\n'.join(config_lines),
"model": "llama2:13b",
"feedback_aware": response.get('feedback_aware', True),
"feedback_count": 6,
"timestamp": datetime.now().isoformat(),
"focus_area": response.get('focus_area', 'security')
}
with open(pr_file, 'w') as f:
json.dump(pr_data, f, indent=2)
print(f"✅ Created PR file: {pr_file.name}")
return True
if __name__ == "__main__":
if convert_response_to_pr():
sys.exit(0)
else:
sys.exit(1)

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env python3
"""SRX Rollback Manager - Production Ready"""
import json
import os
import subprocess
from datetime import datetime
from pathlib import Path
import sys
class SRXRollbackManager:
def __init__(self):
self.base = Path("/shared/ai-gitops")
self.backup_dir = self.base / "configs" / "backups"
self.state_file = self.base / "rollback" / "state.json"
self.backup_dir.mkdir(parents=True, exist_ok=True)
self.state_file.parent.mkdir(parents=True, exist_ok=True)
def backup_current(self):
"""Backup current SRX config"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_file = self.backup_dir / f"srx_backup_{timestamp}.conf"
print(f"📸 Creating backup: {backup_file.name}")
# SSH to SRX and get config
cmd = [
"ssh", "-o", "StrictHostKeyChecking=no",
"netops@INTERNAL_IP",
"show configuration | display set | no-more"
]
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
if result.returncode == 0:
backup_file.write_text(result.stdout)
print(f"✅ Backup saved: {backup_file.name}")
# Update state
state = self._load_state()
state['last_backup'] = timestamp
state['total_backups'] = state.get('total_backups', 0) + 1
self._save_state(state)
return str(backup_file)
else:
print(f"❌ Backup failed: {result.stderr}")
return None
except Exception as e:
print(f"❌ Error: {e}")
return None
def list_backups(self):
"""List all backups"""
backups = sorted(self.backup_dir.glob("srx_backup_*.conf"))
if backups:
print("📋 Available backups:")
for i, backup in enumerate(backups[-10:], 1): # Last 10
size = backup.stat().st_size / 1024
print(f" {i}. {backup.name} ({size:.1f} KB)")
else:
print("No backups found")
def status(self):
"""Show status"""
state = self._load_state()
print("🔄 SRX Rollback Manager Status")
print("-" * 40)
print(f"Last backup: {state.get('last_backup', 'Never')}")
print(f"Total backups: {state.get('total_backups', 0)}")
print(f"Rollback count: {state.get('rollback_count', 0)}")
# Check PR status
orch_state_file = self.base / "state" / "orchestrator_state.json"
if orch_state_file.exists():
with open(orch_state_file) as f:
orch_state = json.load(f)
if orch_state.get('pending_pr'):
print(f"⚠️ Pending PR: #{orch_state['pending_pr']}")
def _load_state(self):
if self.state_file.exists():
with open(self.state_file) as f:
return json.load(f)
return {}
def _save_state(self, state):
with open(self.state_file, 'w') as f:
json.dump(state, f, indent=2)
if __name__ == "__main__":
mgr = SRXRollbackManager()
if len(sys.argv) < 2:
print("Usage: rollback_manager.py [status|backup|list]")
sys.exit(1)
cmd = sys.argv[1]
if cmd == "status":
mgr.status()
elif cmd == "backup":
mgr.backup_current()
elif cmd == "list":
mgr.list_backups()
else:
print(f"Unknown command: {cmd}")

View File

@@ -0,0 +1,330 @@
#!/usr/bin/env python3
"""
Enhanced pipeline runner with context support for split architecture
Works with AI processor running on separate VM (INTERNAL_IP)
"""
import argparse
import json
import subprocess
import sys
import time
from pathlib import Path
from datetime import datetime
import uuid
def load_feedback_history():
"""Load PR feedback history to understand what's already configured"""
feedback_path = Path('/shared/ai-gitops/feedback/pr_feedback_history.json')
if feedback_path.exists():
with open(feedback_path, 'r') as f:
return json.load(f)
return []
def load_existing_config():
"""Load current SRX config to identify already-configured features"""
config_path = Path('/shared/ai-gitops/configs/current_srx_config.conf')
if config_path.exists():
with open(config_path, 'r') as f:
return f.read()
return ""
def build_ai_context(args):
"""Build comprehensive context for AI based on arguments and history"""
context = {
"timestamp": datetime.now().isoformat(),
"focus_area": args.context,
"skip_basic": True, # Always skip basic connectivity suggestions
"existing_features": [],
"priority_features": [],
"constraints": []
}
# Load existing configuration to prevent redundant suggestions
current_config = load_existing_config()
# Identify already-configured features
if "security-zone" in current_config:
context["existing_features"].append("zones_configured")
if "port-forwarding" in current_config:
context["existing_features"].append("gaming_optimizations")
if "wireguard" in current_config.lower():
context["existing_features"].append("vpn_configured")
# Set priorities based on context argument
if args.context == "performance":
context["priority_features"] = [
"qos_policies",
"traffic_shaping",
"bandwidth_management",
"flow_optimization"
]
context["constraints"].append("Focus on QoS and traffic optimization")
elif args.context == "security":
context["priority_features"] = [
"rate_limiting",
"ddos_protection",
"ids_ips_rules",
"geo_blocking",
"threat_feeds"
]
context["constraints"].append("Focus on advanced security features")
elif args.context == "monitoring":
context["priority_features"] = [
"syslog_enhancements",
"snmp_traps",
"flow_analytics",
"performance_metrics"
]
context["constraints"].append("Focus on visibility and monitoring")
elif args.context == "automation":
context["priority_features"] = [
"event_scripts",
"automated_responses",
"dynamic_policies",
"api_integrations"
]
context["constraints"].append("Focus on automation capabilities")
# Add learned constraints from feedback history
feedback = load_feedback_history()
if feedback:
# Extract patterns AI should avoid
rejected_patterns = []
for entry in feedback:
if entry.get("status") == "rejected" or entry.get("feedback_type") == "rejected":
rejected_patterns.append(entry.get("reason", ""))
if rejected_patterns:
context["constraints"].append("Avoid patterns that were previously rejected")
context["rejected_patterns"] = rejected_patterns[-5:] # Last 5 rejections
# Add instruction to avoid redundant suggestions
context["instructions"] = [
"DO NOT suggest basic connectivity policies - all zones are properly configured",
"DO NOT suggest any/any/any rules - this has been rejected multiple times",
"FOCUS on advanced features that enhance the existing configuration",
"CHECK if feature already exists before suggesting",
f"PRIORITY: {args.context} optimizations and enhancements"
]
return context
def run_collection():
"""Run the config collection script"""
print("📊 Collecting current SRX configuration...")
result = subprocess.run(
["python3", "/home/netops/orchestrator/collect_srx_config.py"],
capture_output=True,
text=True
)
if result.returncode != 0:
print(f"❌ Collection failed: {result.stderr}")
return False
print("✅ Configuration collected successfully")
return True
def create_ai_request(context):
"""Create an AI analysis request in the shared directory"""
print(f"🤖 Creating AI analysis request with context: {context['focus_area']}...")
# Generate unique request ID
request_id = f"pipeline_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4().hex[:8]}"
# Create request data
request_data = {
"request_id": request_id,
"timestamp": datetime.now().isoformat(),
"type": "analyze_config",
"context": context,
"data": {
"config_file": "/shared/ai-gitops/configs/srx_config_latest.txt",
"analysis_file": "/shared/ai-gitops/configs/srx_config_analysis_latest.json",
"top_talkers": {"buckets": []}, # Empty for context-based analysis
"vlans": {"buckets": []},
"protocols": {"buckets": []}
}
}
# Save context to shared location
context_path = Path('/shared/ai-gitops/context/current_context.json')
context_path.parent.mkdir(parents=True, exist_ok=True)
with open(context_path, 'w') as f:
json.dump(context, f, indent=2)
# Save request to trigger AI processor
request_path = Path('/shared/ai-gitops/requests') / f"{request_id}.json"
request_path.parent.mkdir(parents=True, exist_ok=True)
with open(request_path, 'w') as f:
json.dump(request_data, f, indent=2)
print(f"✅ Request created: {request_id}")
return request_id
def wait_for_ai_response(request_id, timeout=60):
"""Wait for AI processor to complete analysis"""
print(f"⏳ Waiting for AI processor response (timeout: {timeout}s)...")
response_path = Path('/shared/ai-gitops/responses') / f"{request_id}_response.json"
for i in range(timeout):
if response_path.exists():
print("✅ AI analysis completed")
# Read and display key info
with open(response_path, 'r') as f:
response = json.load(f)
if 'focus_area' in response:
print(f" Focus area: {response['focus_area']}")
if 'feedback_aware' in response:
print(f" Feedback aware: {response['feedback_aware']}")
return True
# Show progress every 5 seconds
if i % 5 == 0 and i > 0:
print(f" Still waiting... ({i}/{timeout}s)")
time.sleep(1)
print(f"❌ Timeout waiting for AI response after {timeout} seconds")
print(" Check AI processor logs: ssh netops@INTERNAL_IP 'sudo tail /var/log/ai-processor/ai-processor.log'")
return False
def create_pr():
"""Create pull request in Gitea"""
print("📝 Creating pull request...")
# Check if create_ai_pr.py exists
create_pr_script = Path('/home/netops/orchestrator/create_ai_pr.py')
if not create_pr_script.exists():
print("❌ create_ai_pr.py not found - using placeholder")
print(" To create PRs, ensure create_ai_pr.py is available")
return False
result = subprocess.run(
["python3", str(create_pr_script)],
capture_output=True,
text=True
)
if result.returncode != 0:
print(f"❌ PR creation failed: {result.stderr}")
return False
print("✅ Pull request created")
return True
def check_ai_processor_status():
"""Check if AI processor service is running on remote VM"""
print("🔍 Checking AI processor status...")
# Try without sudo first (systemctl can check status without sudo)
result = subprocess.run(
["ssh", "netops@INTERNAL_IP", "systemctl is-active ai-processor"],
capture_output=True,
text=True
)
if result.stdout.strip() == "active":
print("✅ AI processor service is running")
return True
else:
# Try checking if the process is running another way
result = subprocess.run(
["ssh", "netops@INTERNAL_IP", "ps aux | grep -v grep | grep ai_processor"],
capture_output=True,
text=True
)
if "ai_processor.py" in result.stdout:
print("✅ AI processor is running (detected via process)")
return True
else:
print("⚠️ Cannot verify AI processor status (but it may still be running)")
print(" Continuing anyway...")
return True # Continue anyway since we know it's running
def main():
parser = argparse.ArgumentParser(
description='Run AI-driven network optimization pipeline with context'
)
parser.add_argument(
'--context',
choices=['performance', 'security', 'monitoring', 'automation'],
default='security',
help='Focus area for AI analysis (default: security)'
)
parser.add_argument(
'--skip-collection',
action='store_true',
help='Skip config collection (use existing)'
)
parser.add_argument(
'--dry-run',
action='store_true',
help='Run analysis but do not create PR'
)
parser.add_argument(
'--verbose',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--timeout',
type=int,
default=60,
help='Timeout waiting for AI response (default: 60s)'
)
args = parser.parse_args()
print(f"🚀 Starting pipeline with context: {args.context}")
print("=" * 50)
# Step 0: Check AI processor is running
if not check_ai_processor_status():
print("\n⚠️ Please start the AI processor service first")
sys.exit(1)
# Step 1: Collect current config (unless skipped)
if not args.skip_collection:
if not run_collection():
sys.exit(1)
# Step 2: Build context for AI
context = build_ai_context(args)
if args.verbose:
print("\n📋 AI Context:")
print(json.dumps(context, indent=2))
# Step 3: Create AI request (this triggers the remote AI processor)
request_id = create_ai_request(context)
# Step 4: Wait for AI processor to complete
if not wait_for_ai_response(request_id, args.timeout):
print("\n⚠️ AI processor may be busy or not running properly")
print(" Check status: ssh netops@INTERNAL_IP 'sudo systemctl status ai-processor'")
sys.exit(1)
# Step 5: Create PR (unless dry-run)
if not args.dry_run:
if not create_pr():
print("⚠️ PR creation failed but analysis is complete")
print(f" View results: cat /shared/ai-gitops/responses/{request_id}_response.json")
else:
print("⚡ Dry run - skipping PR creation")
print(f" View analysis: cat /shared/ai-gitops/responses/{request_id}_response.json | jq .suggestions")
print("\n✨ Pipeline completed successfully!")
print(f"Focus area: {args.context}")
if not args.dry_run:
print("Next steps: Review the PR in Gitea")
else:
print(f"Next steps: Review the suggestions and run without --dry-run to create PR")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,309 @@
#!/usr/bin/env python3
"""
SRX Configuration Manager
Handles all interactions with the Juniper SRX device
"""
import subprocess
import logging
import json
from datetime import datetime
from typing import Dict, Optional, List
import re
logger = logging.getLogger(__name__)
class SRXManager:
"""Manages SRX configuration retrieval and deployment"""
def __init__(self, host: str, user: str, ssh_key: str):
"""
Initialize SRX Manager
Args:
host: SRX IP address
user: SSH username
ssh_key: Path to SSH private key
"""
self.host = host
self.user = user
self.ssh_key = ssh_key
def _execute_ssh_command(self, command: str) -> tuple[bool, str]:
"""
Execute command on SRX via SSH
Returns:
(success, output) tuple
"""
ssh_cmd = [
'ssh',
'-i', self.ssh_key,
'-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout=10',
f'{self.user}@{self.host}',
command
]
try:
result = subprocess.run(
ssh_cmd,
capture_output=True,
text=True,
timeout=30
)
if result.returncode == 0:
logger.info(f"Successfully executed: {command[:50]}...")
return True, result.stdout
else:
logger.error(f"Command failed: {result.stderr}")
return False, result.stderr
except subprocess.TimeoutExpired:
logger.error("SSH command timed out")
return False, "Command timed out"
except Exception as e:
logger.error(f"SSH execution error: {e}")
return False, str(e)
def get_current_config(self, format: str = "set") -> Optional[str]:
"""
Retrieve current SRX configuration
Args:
format: Configuration format ('set', 'json', 'xml')
Returns:
Configuration string or None if failed
"""
format_map = {
"set": "display set",
"json": "display json",
"xml": "display xml"
}
display_format = format_map.get(format, "display set")
command = f"show configuration | {display_format} | no-more"
logger.info(f"Pulling SRX configuration in {format} format")
success, output = self._execute_ssh_command(command)
if success:
logger.info(f"Retrieved {len(output)} characters of configuration")
return output
else:
logger.error("Failed to retrieve configuration")
return None
def get_config_section(self, section: str) -> Optional[str]:
"""
Get specific configuration section
Args:
section: Config section (e.g., 'security policies', 'interfaces')
Returns:
Configuration section or None
"""
command = f"show configuration {section} | display set | no-more"
success, output = self._execute_ssh_command(command)
if success:
return output
return None
def parse_security_policies(self, config: str) -> Dict:
"""
Parse security policies from configuration
Returns:
Dictionary of policies organized by zones
"""
policies = {
"zone_pairs": {},
"total_policies": 0,
"applications": set(),
"addresses": set()
}
# Regex patterns for parsing
policy_pattern = r'set security policies from-zone (\S+) to-zone (\S+) policy (\S+)'
app_pattern = r'set security policies .* application (\S+)'
addr_pattern = r'set security policies .* (source|destination)-address (\S+)'
for line in config.split('\n'):
# Parse policy definitions
policy_match = re.match(policy_pattern, line)
if policy_match:
from_zone, to_zone, policy_name = policy_match.groups()
zone_pair = f"{from_zone}->{to_zone}"
if zone_pair not in policies["zone_pairs"]:
policies["zone_pairs"][zone_pair] = []
if policy_name not in policies["zone_pairs"][zone_pair]:
policies["zone_pairs"][zone_pair].append(policy_name)
policies["total_policies"] += 1
# Parse applications
app_match = re.search(app_pattern, line)
if app_match:
policies["applications"].add(app_match.group(1))
# Parse addresses
addr_match = re.search(addr_pattern, line)
if addr_match:
policies["addresses"].add(addr_match.group(2))
# Convert sets to lists for JSON serialization
policies["applications"] = list(policies["applications"])
policies["addresses"] = list(policies["addresses"])
return policies
def validate_config_syntax(self, config_lines: List[str]) -> tuple[bool, List[str]]:
"""
Validate SRX configuration syntax
Args:
config_lines: List of configuration commands
Returns:
(valid, errors) tuple
"""
errors = []
valid_commands = [
'set security policies',
'set security zones',
'set security address-book',
'set applications application',
'set firewall policer',
'set firewall filter',
'set class-of-service',
'set interfaces',
'set routing-options'
]
for i, line in enumerate(config_lines, 1):
line = line.strip()
# Skip comments and empty lines
if not line or line.startswith('#'):
continue
# Check if line starts with valid command
if not any(line.startswith(cmd) for cmd in valid_commands):
errors.append(f"Line {i}: Invalid command prefix: {line[:50]}")
# Check for required keywords in policies
if 'security policies' in line and 'policy' in line:
if not any(keyword in line for keyword in ['match', 'then', 'from-zone', 'to-zone']):
errors.append(f"Line {i}: Policy missing required keywords: {line[:50]}")
return len(errors) == 0, errors
def test_connectivity(self) -> bool:
"""
Test SSH connectivity to SRX
Returns:
True if connected successfully
"""
logger.info(f"Testing connectivity to {self.host}")
success, output = self._execute_ssh_command("show version | match Junos:")
if success and "Junos:" in output:
version = output.strip()
logger.info(f"Connected successfully: {version}")
return True
else:
logger.error("Connectivity test failed")
return False
def get_traffic_statistics(self) -> Optional[Dict]:
"""
Get interface traffic statistics
Returns:
Dictionary of traffic stats or None
"""
command = "show interfaces statistics | display json"
success, output = self._execute_ssh_command(command)
if success:
try:
# Parse JSON output
stats = json.loads(output)
return stats
except json.JSONDecodeError:
logger.error("Failed to parse traffic statistics JSON")
return None
return None
def create_config_diff(self, current_config: str, proposed_config: List[str]) -> Dict:
"""
Create a diff between current and proposed configurations
Args:
current_config: Current SRX configuration
proposed_config: List of proposed configuration lines
Returns:
Dictionary with additions and analysis
"""
current_lines = set(current_config.split('\n'))
proposed_set = set(proposed_config)
# Find truly new configurations
new_configs = []
duplicate_configs = []
for config in proposed_set:
if config.strip() and not config.startswith('#'):
if config not in current_lines:
new_configs.append(config)
else:
duplicate_configs.append(config)
return {
"new_configurations": new_configs,
"duplicate_configurations": duplicate_configs,
"total_proposed": len(proposed_config),
"total_new": len(new_configs),
"total_duplicates": len(duplicate_configs)
}
# Test function for standalone execution
if __name__ == "__main__":
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Test the SRX Manager
srx = SRXManager(
host="INTERNAL_IP",
user="netops",
ssh_key="/home/netops/.ssh/srx_key"
)
# Test connectivity
if srx.test_connectivity():
print("✅ Connectivity test passed")
# Get current config
config = srx.get_current_config()
if config:
print(f"✅ Retrieved {len(config)} characters of configuration")
# Parse policies
policies = srx.parse_security_policies(config)
print(f"📊 Found {policies['total_policies']} security policies")
print(f"📊 Zone pairs: {list(policies['zone_pairs'].keys())}")
else:
print("❌ Failed to retrieve configuration")
else:
print("❌ Connectivity test failed")

View File

@@ -0,0 +1,189 @@
#!/usr/bin/env python3
"""
Strengthen the feedback learning by adding more specific examples
Run this to add stronger feedback about destination-address any
"""
import json
from pathlib import Path
from datetime import datetime
def add_strong_feedback():
"""Add very specific feedback about destination-address any"""
feedback_dir = Path('/shared/ai-gitops/feedback')
feedback_file = feedback_dir / 'pr_feedback_history.json'
# Load existing feedback
with open(feedback_file, 'r') as f:
history = json.load(f)
print(f"Current feedback entries: {len(history)}")
print(f"Rejected: {len([h for h in history if h['feedback_type'] == 'rejected'])}")
# Add VERY specific feedback about destination-address
new_feedback = {
'pr_number': 'TRAINING-002',
'timestamp': datetime.now().isoformat(),
'feedback_type': 'rejected',
'reviewer': 'security_architect',
'details': {
'reason': 'CRITICAL: destination-address any is NOT acceptable',
'specific_issues': 'NEVER use destination-address any - it allows access to ANY destination which is a security risk',
'configuration_issues': [
{
'line': 'match destination-address any',
'issue': 'destination-address MUST be specific - use actual destination IPs or address-sets',
'type': 'security',
'severity': 'critical',
'correct_example': 'match destination-address SPECIFIC-SERVERS'
},
{
'line': 'to-zone untrust policy X match destination-address any',
'issue': 'For untrust zone, specify exact external services needed',
'type': 'security',
'severity': 'critical',
'correct_example': 'match destination-address WEB-SERVICES-ONLY'
}
],
'mandatory_rules': [
'NEVER use destination-address any',
'ALWAYS specify exact destination addresses or address-sets',
'For untrust zone, create address-sets for allowed external services',
'For internal zones, specify exact internal server groups'
]
}
}
history.append(new_feedback)
# Save updated history
with open(feedback_file, 'w') as f:
json.dump(history, f, indent=2)
print(f"\n✅ Added strong feedback about destination-address any")
print(f"📊 Total feedback entries now: {len(history)}")
# Show the learning that should happen
print("\n🎯 Expected AI behavior after this feedback:")
print(" ❌ NEVER: match destination-address any")
print(" ✅ ALWAYS: match destination-address SPECIFIC-SERVERS")
print(" ✅ ALWAYS: match destination-address WEB-SERVICES")
print(" ✅ ALWAYS: Define destination address-sets first")
return len(history)
def create_test_request_for_destination():
"""Create a test request specifically asking about destinations"""
request = {
'request_id': f'destination_test_{datetime.now().strftime("%Y%m%d_%H%M%S")}',
'timestamp': datetime.now().isoformat(),
'type': 'destination_test',
'data': {
'message': 'Need policies for HOME zone to access internet services',
'specific_requirements': [
'HOME zone needs web browsing',
'HOME zone needs DNS access',
'Must be secure with specific destinations'
]
},
'context': {
'test_destination_learning': True,
'expect_specific_destinations': True
}
}
request_file = Path('/shared/ai-gitops/requests') / f"{request['request_id']}.json"
request_file.parent.mkdir(parents=True, exist_ok=True)
with open(request_file, 'w') as f:
json.dump(request, f, indent=2)
print(f"\n✅ Created test request: {request['request_id']}")
print(" This specifically tests if AI uses specific destinations")
return request['request_id']
def verify_improvements():
"""Check if the latest response avoids destination-address any"""
responses_dir = Path('/shared/ai-gitops/responses')
latest_responses = sorted(responses_dir.glob('*.json'),
key=lambda x: x.stat().st_mtime, reverse=True)
if not latest_responses:
print("No responses found")
return
latest = latest_responses[0]
print(f"\n📋 Checking latest response: {latest.name}")
with open(latest, 'r') as f:
data = json.load(f)
suggestions = data.get('suggestions', '')
# Check for issues
issues = []
improvements = []
lines = suggestions.split('\n')
for line in lines:
if 'destination-address any' in line:
issues.append(f"❌ Still using: {line.strip()}")
elif 'destination-address' in line and 'any' not in line:
improvements.append(f"✅ Good: {line.strip()}")
elif 'then log' in line:
improvements.append(f"✅ Logging: {line.strip()}")
print(f"\nFeedback aware: {data.get('feedback_aware')}")
print(f"Model: {data.get('model')}")
if issues:
print("\n⚠️ Issues found:")
for issue in issues[:3]:
print(f" {issue}")
if improvements:
print("\n✅ Improvements found:")
for improvement in improvements[:5]:
print(f" {improvement}")
if not issues:
print("\n🎉 SUCCESS! No destination-address any found!")
return True
else:
print(f"\n⚠️ Still needs work - {len(issues)} instances of destination-address any")
return False
def main():
print("="*60)
print(" STRENGTHENING FEEDBACK LEARNING")
print("="*60)
# Step 1: Add strong feedback
total = add_strong_feedback()
# Step 2: Create test request
request_id = create_test_request_for_destination()
print("\n" + "="*60)
print(" NEXT STEPS")
print("="*60)
print("\n1. Wait for AI to process the test request (2-3 minutes)")
print("2. Check the response:")
print(f" cat /shared/ai-gitops/responses/{request_id}_response.json | grep destination-address")
print("\n3. Or run this script again with --verify flag")
print("\n4. Run full pipeline to see improvements:")
print(" python3 run_pipeline.py --skip-netflow")
# Check if --verify flag
import sys
if len(sys.argv) > 1 and sys.argv[1] == '--verify':
print("\n" + "="*60)
print(" VERIFICATION")
print("="*60)
verify_improvements()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,193 @@
#!/usr/bin/env python3
"""
Test script to verify context-aware AI processing
Run this before full deployment to ensure everything works
"""
import json
import time
from pathlib import Path
from datetime import datetime
def create_test_context(focus_area="security"):
"""Create a test context file"""
context = {
"timestamp": datetime.now().isoformat(),
"focus_area": focus_area,
"skip_basic": True,
"existing_features": [
"zones_configured",
"gaming_optimizations",
"vpn_configured"
],
"priority_features": [],
"instructions": [
"DO NOT suggest basic connectivity policies",
"DO NOT suggest any/any/any rules",
f"FOCUS on {focus_area} optimizations"
]
}
# Add focus-specific priorities
if focus_area == "security":
context["priority_features"] = [
"rate_limiting",
"ddos_protection",
"ids_ips_rules"
]
elif focus_area == "performance":
context["priority_features"] = [
"qos_policies",
"traffic_shaping",
"bandwidth_management"
]
elif focus_area == "monitoring":
context["priority_features"] = [
"syslog_enhancements",
"snmp_traps",
"flow_analytics"
]
# Save context
context_dir = Path('/shared/ai-gitops/context')
context_dir.mkdir(parents=True, exist_ok=True)
context_file = context_dir / 'current_context.json'
with open(context_file, 'w') as f:
json.dump(context, f, indent=2)
print(f"✅ Created context file for {focus_area}")
return context_file
def create_test_request():
"""Create a test analysis request"""
request = {
"request_id": f"test_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
"timestamp": datetime.now().isoformat(),
"data": {
"top_talkers": {
"buckets": [
{"key": "INTERNAL_IP", "doc_count": 1000},
{"key": "INTERNAL_IP", "doc_count": 800}
]
},
"vlans": {"buckets": []},
"protocols": {"buckets": []}
}
}
# Save request
request_dir = Path('/shared/ai-gitops/requests')
request_dir.mkdir(parents=True, exist_ok=True)
request_file = request_dir / f"{request['request_id']}.json"
with open(request_file, 'w') as f:
json.dump(request, f, indent=2)
print(f"✅ Created test request: {request['request_id']}")
return request['request_id']
def check_response(request_id, focus_area):
"""Check if response was generated with correct context"""
response_dir = Path('/shared/ai-gitops/responses')
response_file = response_dir / f"{request_id}_response.json"
# Wait for response (max 30 seconds)
for i in range(30):
if response_file.exists():
with open(response_file, 'r') as f:
response = json.load(f)
print(f"\n✅ Response generated!")
print(f" Focus area: {response.get('focus_area', 'unknown')}")
print(f" Feedback aware: {response.get('feedback_aware', False)}")
# Check if context was applied
if response.get('focus_area') == focus_area:
print(f" ✅ Context correctly applied: {focus_area}")
else:
print(f" ❌ Context mismatch! Expected: {focus_area}, Got: {response.get('focus_area')}")
# Show sample of suggestions
suggestions = response.get('suggestions', '').split('\n')[:5]
print(f"\n Sample suggestions:")
for line in suggestions:
if line.strip():
print(f" {line}")
return True
time.sleep(1)
print(f" Waiting for response... ({i+1}/30)")
print(f"❌ No response generated after 30 seconds")
return False
def run_test(focus_area="security"):
"""Run a complete test cycle"""
print(f"\n{'='*60}")
print(f"Testing Context System - Focus: {focus_area.upper()}")
print(f"{'='*60}")
# Step 1: Create context
context_file = create_test_context(focus_area)
# Step 2: Create request
request_id = create_test_request()
# Step 3: Check if AI processor is running
print("\n⏳ Waiting for AI processor to pick up request...")
print(" (Make sure ai_processor.py is running)")
# Step 4: Check response
success = check_response(request_id, focus_area)
if success:
print(f"\n🎉 Test PASSED for {focus_area} context!")
else:
print(f"\n❌ Test FAILED for {focus_area} context")
print("\nTroubleshooting:")
print("1. Is ai_processor.py running?")
print("2. Check logs: tail -f /var/log/ai-processor/ai-processor.log")
print("3. Verify Ollama is running: curl http://localhost:11434/api/tags")
return success
def main():
"""Main test function"""
print("🧪 AI Context System Test Suite")
print("================================")
import argparse
parser = argparse.ArgumentParser(description='Test context-aware AI processing')
parser.add_argument('--focus',
choices=['security', 'performance', 'monitoring', 'automation'],
default='security',
help='Focus area to test')
parser.add_argument('--all',
action='store_true',
help='Test all focus areas')
args = parser.parse_args()
if args.all:
# Test all focus areas
areas = ['security', 'performance', 'monitoring', 'automation']
results = {}
for area in areas:
results[area] = run_test(area)
time.sleep(5) # Wait between tests
# Summary
print(f"\n{'='*60}")
print("TEST SUMMARY")
print(f"{'='*60}")
for area, result in results.items():
status = "✅ PASSED" if result else "❌ FAILED"
print(f"{area.capitalize():15} {status}")
else:
# Test single focus area
run_test(args.focus)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,364 @@
#!/usr/bin/env python3
"""
Complete Feedback Loop Test
Tests creating a PR, rejecting it with feedback, and AI learning from it
"""
import os
import sys
import json
import time
import subprocess
from datetime import datetime
from pathlib import Path
import requests
class FeedbackLoopTester:
def __init__(self):
self.shared_dir = Path('/shared/ai-gitops')
self.feedback_dir = self.shared_dir / 'feedback'
self.pending_prs_dir = self.shared_dir / 'pending_prs'
self.responses_dir = self.shared_dir / 'responses'
# Ensure feedback directory exists
self.feedback_dir.mkdir(parents=True, exist_ok=True)
# Load config for Gitea (if exists)
self.config_file = Path('/home/netops/orchestrator/config.yaml')
if self.config_file.exists():
import yaml
with open(self.config_file, 'r') as f:
self.config = yaml.safe_load(f)
else:
self.config = {}
def step1_create_test_pr_data(self):
"""Create a test PR with intentionally problematic config"""
print("\n" + "="*60)
print("STEP 1: Creating Test PR with Problematic Config")
print("="*60)
# Create intentionally bad config for testing
bad_suggestions = """# TEST: Intentionally problematic config for feedback testing
# This should be rejected for security reasons
# ❌ BAD: Any/Any/Any rule (security risk)
set security policies from-zone trust to-zone untrust policy ALLOW-ALL match source-address any
set security policies from-zone trust to-zone untrust policy ALLOW-ALL match destination-address any
set security policies from-zone trust to-zone untrust policy ALLOW-ALL match application any
set security policies from-zone trust to-zone untrust policy ALLOW-ALL then permit
# ❌ BAD: No logging enabled
set security policies from-zone dmz to-zone untrust policy DMZ-OUT match source-address any
set security policies from-zone dmz to-zone untrust policy DMZ-OUT match destination-address any
set security policies from-zone dmz to-zone untrust policy DMZ-OUT then permit
# ❌ BAD: Overly permissive IoT access
set security policies from-zone IOT to-zone HOME policy IOT-ACCESS match source-address any
set security policies from-zone IOT to-zone HOME policy IOT-ACCESS match destination-address any
set security policies from-zone IOT to-zone HOME policy IOT-ACCESS match application any
set security policies from-zone IOT to-zone HOME policy IOT-ACCESS then permit"""
pr_data = {
'pr_number': f'TEST-{datetime.now().strftime("%Y%m%d-%H%M%S")}',
'title': 'TEST: AI Network Optimization for Feedback Testing',
'description': 'This is a test PR with intentionally problematic config to test feedback learning',
'suggestions': bad_suggestions,
'timestamp': datetime.now().isoformat(),
'test_pr': True,
'expected_rejection_reasons': [
'Any/any/any rule detected',
'No logging enabled',
'IoT to HOME unrestricted access'
]
}
# Save test PR
pr_file = self.pending_prs_dir / f"test_pr_{pr_data['pr_number']}.json"
self.pending_prs_dir.mkdir(parents=True, exist_ok=True)
with open(pr_file, 'w') as f:
json.dump(pr_data, f, indent=2)
print(f"✅ Created test PR: {pr_file.name}")
print("\n📋 Problematic configurations included:")
for reason in pr_data['expected_rejection_reasons']:
print(f"{reason}")
return pr_data
def step2_simulate_pr_rejection(self, pr_data):
"""Simulate rejecting the PR with specific feedback"""
print("\n" + "="*60)
print("STEP 2: Simulating PR Rejection with Feedback")
print("="*60)
rejection_feedback = {
'pr_number': pr_data['pr_number'],
'timestamp': datetime.now().isoformat(),
'feedback_type': 'rejected',
'reviewer': 'security_team',
'details': {
'reason': 'Security policy violations detected',
'specific_issues': 'Multiple any/any/any rules found which violate zero-trust principles',
'configuration_issues': [
{
'line': 'policy ALLOW-ALL match source-address any',
'issue': 'Never use source-address any in permit rules',
'type': 'security',
'severity': 'critical'
},
{
'line': 'policy DMZ-OUT then permit',
'issue': 'No logging enabled for DMZ traffic',
'type': 'security',
'severity': 'high'
},
{
'line': 'from-zone IOT to-zone HOME',
'issue': 'IoT devices should never have unrestricted access to HOME zone',
'type': 'security',
'severity': 'critical'
}
],
'recommendations': [
'Use specific address-sets instead of any',
'Always enable logging with "then log session-init"',
'IoT devices should only access specific services, not entire zones',
'Implement proper zone segmentation'
]
}
}
print(f"📝 PR Number: {rejection_feedback['pr_number']}")
print(f"❌ Status: REJECTED")
print(f"👤 Reviewer: {rejection_feedback['reviewer']}")
print(f"\n📋 Issues identified:")
for issue in rejection_feedback['details']['configuration_issues']:
print(f"{issue['issue']}")
print(f" Severity: {issue['severity'].upper()}")
return rejection_feedback
def step3_save_feedback(self, feedback):
"""Save feedback to the feedback history file"""
print("\n" + "="*60)
print("STEP 3: Saving Feedback to History")
print("="*60)
feedback_file = self.feedback_dir / 'pr_feedback_history.json'
# Load existing feedback if exists
if feedback_file.exists():
with open(feedback_file, 'r') as f:
feedback_history = json.load(f)
print(f"📂 Loaded existing feedback history ({len(feedback_history)} entries)")
else:
feedback_history = []
print("📂 Creating new feedback history")
# Add new feedback
feedback_history.append(feedback)
# Save updated history
with open(feedback_file, 'w') as f:
json.dump(feedback_history, f, indent=2)
print(f"✅ Saved feedback to: {feedback_file}")
print(f"📊 Total feedback entries: {len(feedback_history)}")
# Count types
rejected = len([f for f in feedback_history if f.get('feedback_type') == 'rejected'])
approved = len([f for f in feedback_history if f.get('feedback_type') == 'approved'])
print(f" • Rejected: {rejected}")
print(f" • Approved: {approved}")
return feedback_file
def step4_trigger_new_ai_request(self):
"""Create a new AI request to test if it learned from feedback"""
print("\n" + "="*60)
print("STEP 4: Creating New AI Request to Test Learning")
print("="*60)
# Create a new request that should avoid the rejected patterns
test_request = {
'request_id': f'feedback_test_{datetime.now().strftime("%Y%m%d_%H%M%S")}',
'timestamp': datetime.now().isoformat(),
'type': 'feedback_test',
'data': {
'message': 'Testing if AI learned from rejection feedback',
'zones_to_configure': ['IOT', 'HOME', 'DMZ'],
'requirements': [
'Configure IoT to HOME access',
'Configure DMZ outbound rules',
'Ensure security best practices'
]
},
'context': {
'test_feedback_learning': True,
'previous_rejection': True
}
}
request_file = self.shared_dir / 'requests' / f"{test_request['request_id']}.json"
request_file.parent.mkdir(parents=True, exist_ok=True)
with open(request_file, 'w') as f:
json.dump(test_request, f, indent=2)
print(f"✅ Created test request: {request_file.name}")
print(f" Request ID: {test_request['request_id']}")
print("\n🎯 This request specifically asks for:")
for req in test_request['data']['requirements']:
print(f"{req}")
print("\n⏳ AI should now avoid the mistakes from the rejection...")
return test_request['request_id']
def step5_wait_and_verify_learning(self, request_id, timeout=150):
"""Wait for AI response and verify it learned from feedback"""
print("\n" + "="*60)
print("STEP 5: Waiting for AI Response and Verifying Learning")
print("="*60)
response_file = self.responses_dir / f"{request_id}_response.json"
start_time = time.time()
print(f"⏳ Waiting for AI response (timeout: {timeout}s)...")
# Wait for response
while time.time() - start_time < timeout:
if response_file.exists():
print(f"✅ Response received after {int(time.time() - start_time)} seconds")
break
if int(time.time() - start_time) % 20 == 0 and time.time() - start_time > 0:
print(f" ... still waiting ({int(time.time() - start_time)}s elapsed)")
time.sleep(2)
else:
print(f"❌ Timeout waiting for response")
return False
# Analyze response
with open(response_file, 'r') as f:
response = json.load(f)
print(f"\n📋 AI Response Analysis:")
print(f" Model: {response.get('model')}")
print(f" Feedback aware: {response.get('feedback_aware')}")
suggestions = response.get('suggestions', '')
# Check if AI avoided the mistakes
print("\n🔍 Checking if AI learned from feedback:")
learned_correctly = True
checks = [
('source-address any', 'Still using "any" in source-address', False),
('destination-address any', 'Still using "any" in destination-address', False),
('application any', 'Still using "any" in application', False),
('then log', 'Now includes logging', True),
('address-set', 'Uses address-sets', True),
('specific', 'Uses specific addresses/applications', True)
]
for pattern, description, should_exist in checks:
if should_exist:
if pattern in suggestions.lower():
print(f" ✅ LEARNED: {description}")
else:
print(f" ❌ NOT LEARNED: {description}")
learned_correctly = False
else:
if pattern not in suggestions.lower():
print(f" ✅ AVOIDED: Not {description}")
else:
print(f" ❌ MISTAKE: {description}")
learned_correctly = False
# Show sample of new suggestions
print("\n📝 Sample of new AI suggestions:")
print("-" * 50)
for line in suggestions.split('\n')[:10]:
if line.strip():
print(f" {line}")
print("-" * 50)
return learned_correctly
def run_complete_test(self):
"""Run the complete feedback loop test"""
print("\n" + "="*70)
print(" 🔄 COMPLETE FEEDBACK LOOP TEST")
print("="*70)
print("\nThis test will:")
print("1. Create a PR with intentionally bad config")
print("2. Simulate rejection with specific feedback")
print("3. Save feedback for AI learning")
print("4. Create new request to test learning")
print("5. Verify AI avoided previous mistakes")
input("\nPress Enter to start the test...")
# Run all steps
pr_data = self.step1_create_test_pr_data()
feedback = self.step2_simulate_pr_rejection(pr_data)
self.step3_save_feedback(feedback)
request_id = self.step4_trigger_new_ai_request()
learned = self.step5_wait_and_verify_learning(request_id)
# Final summary
print("\n" + "="*70)
print(" 📊 FEEDBACK LOOP TEST RESULTS")
print("="*70)
if learned:
print("\n🎉 SUCCESS! The AI learned from the rejection feedback!")
print("\nThe AI now:")
print(" ✅ Avoids any/any/any rules")
print(" ✅ Includes logging in policies")
print(" ✅ Uses specific address-sets")
print(" ✅ Implements proper zone segmentation")
else:
print("\n⚠️ PARTIAL SUCCESS - AI needs more training")
print("\nRecommendations:")
print(" • Add more rejected examples")
print(" • Adjust the prompt in ai_processor.py")
print(" • Consider using a larger model")
print("\n📁 Files created during test:")
print(f" • Test PR: {self.pending_prs_dir}/test_pr_*.json")
print(f" • Feedback: {self.feedback_dir}/pr_feedback_history.json")
print(f" • AI Response: {self.responses_dir}/{request_id}_response.json")
return learned
def main():
"""Main entry point"""
tester = FeedbackLoopTester()
# Check if AI processor is running
result = subprocess.run(['pgrep', '-f', 'ai_processor.py'],
capture_output=True, text=True)
if not result.stdout:
print("⚠️ AI processor not running on AI VM")
print(" Start it with: ssh netops@INTERNAL_IP")
print(" Then: sudo systemctl start ai-processor")
response = input("\nContinue anyway? (y/n): ")
if response.lower() != 'y':
return
# Run the test
success = tester.run_complete_test()
if success:
print("\n✅ Your feedback learning system is working correctly!")
else:
print("\n⚠️ Review the feedback and adjust as needed")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,90 @@
#!/usr/bin/env python3
"""Test Git authentication for Gitea"""
import subprocess
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Your Gitea configuration
GITEA_URL = "https://git.salmutt.dev"
GITEA_TOKEN = "da3be18aad877edb94e896e6c1e7c449581444420"
REPO = "sal/srx-config"
def test_auth_methods():
"""Test different authentication methods"""
git_url = f"{GITEA_URL}/{REPO}.git"
# Method 1: oauth2 format
print("\n1. Testing oauth2 authentication format...")
auth_url = f"https://oauth2:{GITEA_TOKEN}@git.salmutt.dev/{REPO}.git"
try:
result = subprocess.run(
['git', 'ls-remote', auth_url, 'HEAD'],
capture_output=True,
text=True
)
if result.returncode == 0:
print("✅ OAuth2 authentication successful!")
print(f" HEAD: {result.stdout.strip()}")
return True
else:
print("❌ OAuth2 authentication failed")
print(f" Error: {result.stderr}")
except Exception as e:
print(f"❌ OAuth2 test error: {e}")
# Method 2: Direct token format
print("\n2. Testing direct token authentication...")
auth_url = f"https://{GITEA_TOKEN}@git.salmutt.dev/{REPO}.git"
try:
result = subprocess.run(
['git', 'ls-remote', auth_url, 'HEAD'],
capture_output=True,
text=True
)
if result.returncode == 0:
print("✅ Direct token authentication successful!")
print(f" HEAD: {result.stdout.strip()}")
return True
else:
print("❌ Direct token authentication failed")
print(f" Error: {result.stderr}")
except Exception as e:
print(f"❌ Direct token test error: {e}")
# Method 3: Username:token format (using 'git' as username)
print("\n3. Testing username:token format...")
auth_url = f"https://git:{GITEA_TOKEN}@git.salmutt.dev/{REPO}.git"
try:
result = subprocess.run(
['git', 'ls-remote', auth_url, 'HEAD'],
capture_output=True,
text=True
)
if result.returncode == 0:
print("✅ Username:token authentication successful!")
print(f" HEAD: {result.stdout.strip()}")
return True
else:
print("❌ Username:token authentication failed")
print(f" Error: {result.stderr}")
except Exception as e:
print(f"❌ Username:token test error: {e}")
return False
if __name__ == "__main__":
print("Testing Gitea authentication methods...")
print(f"Repository: {REPO}")
print(f"Token: {GITEA_TOKEN[:10]}..." + "*" * (len(GITEA_TOKEN) - 10))
if test_auth_methods():
print("\n✅ At least one authentication method works!")
else:
print("\n❌ All authentication methods failed")
print("\nPlease verify:")
print("1. The token is correct and has appropriate permissions")
print("2. The repository exists and is accessible")
print("3. Network connectivity to Gitea is working")

View File

@@ -0,0 +1,26 @@
import yaml
from gitea_integration import GiteaIntegration
# Load your config
with open('/home/netops/orchestrator/config.yaml', 'r') as f:
config = yaml.safe_load(f)
# Test PR creation
gitea = GiteaIntegration(config['gitea'])
# Test with sample config
test_config = """# Test configuration
set security zones security-zone DMZ address 192.168.50.0/24
set applications application TEST-APP destination-port 8080"""
pr_info = gitea.create_pr_with_config(
srx_config=test_config,
title="Test PR - Please Close",
description="This is a test PR to verify Gitea integration"
)
if pr_info:
print(f"Success! Created PR #{pr_info['number']}")
print(f"URL: {pr_info['url']}")
else:
print("Failed to create PR")

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env python3
import sys
sys.path.append('/home/netops/orchestrator')
from orchestrator_main import NetworkOrchestrator
from datetime import datetime
orch = NetworkOrchestrator()
print(f"Current time: {datetime.now()}")
print(f"Current day: {datetime.now().strftime('%A')}")
print(f"Should create PR: {orch.should_create_pr()}")
print(f"State: {orch.load_state()}")

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
"""
Create a simple test request for the AI processor
Run this on the orchestrator VM to create a test request
"""
import json
from datetime import datetime
from pathlib import Path
print("Creating test request for AI processor...")
# Create test request
request_data = {
'request_id': f'test_{datetime.now().strftime("%Y%m%d_%H%M%S")}',
'timestamp': datetime.now().isoformat(),
'type': 'test_request',
'data': {
'top_talkers': {
'buckets': [
{'key': 'INTERNAL_IP', 'doc_count': 1000},
{'key': '192.168.10.100', 'doc_count': 500},
{'key': '10.0.1.25', 'doc_count': 250}
]
},
'message': 'This is a test request to verify AI processor is working'
},
'context': {
'config_available': True,
'analysis_available': True,
'test': True
}
}
# Save request
request_dir = Path('/shared/ai-gitops/requests')
request_dir.mkdir(parents=True, exist_ok=True)
request_file = request_dir / f"{request_data['request_id']}.json"
with open(request_file, 'w') as f:
json.dump(request_data, f, indent=2)
print(f"✅ Test request created: {request_file}")
print(f" Request ID: {request_data['request_id']}")
print("\nNow check if AI processor picks it up:")
print(" 1. Wait 10-20 seconds")
print(" 2. Check for response: ls -la /shared/ai-gitops/responses/")
print(f" 3. Look for: {request_data['request_id']}_response.json")

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
import subprocess
import tempfile
import os
TOKEN = "da3be18aad877edb94e896e6c1e7c449581444420"
REPO_URL = f"https://oauth2:{TOKEN}@git.salmutt.dev/sal/srx-config.git"
print("Testing simple git push...")
with tempfile.TemporaryDirectory() as tmpdir:
print(f"Working in: {tmpdir}")
# Clone
print("1. Cloning...")
result = subprocess.run(['git', 'clone', '--depth', '1', REPO_URL, '.'],
cwd=tmpdir, capture_output=True, text=True)
if result.returncode != 0:
print(f"Clone failed: {result.stderr}")
exit(1)
print("✅ Clone successful")
# Configure git
subprocess.run(['git', 'config', 'user.email', 'test@example.com'], cwd=tmpdir)
subprocess.run(['git', 'config', 'user.name', 'Test User'], cwd=tmpdir)
# Set push URL explicitly
print("2. Setting push URL...")
subprocess.run(['git', 'remote', 'set-url', 'origin', REPO_URL], cwd=tmpdir)
# Create test branch
test_branch = "test-push-permissions"
print(f"3. Creating branch {test_branch}...")
subprocess.run(['git', 'checkout', '-b', test_branch], cwd=tmpdir)
# Create a test file
test_file = os.path.join(tmpdir, 'test-permissions.txt')
with open(test_file, 'w') as f:
f.write("Testing push permissions\n")
# Add and commit
subprocess.run(['git', 'add', '.'], cwd=tmpdir)
subprocess.run(['git', 'commit', '-m', 'Test push permissions'], cwd=tmpdir)
# Try to push
print("4. Attempting push...")
result = subprocess.run(['git', 'push', '-u', 'origin', test_branch],
cwd=tmpdir, capture_output=True, text=True)
if result.returncode == 0:
print("✅ Push successful! Token has write permissions.")
print(" You may want to delete the test branch from Gitea")
else:
print(f"❌ Push failed: {result.stderr}")

View File

@@ -0,0 +1,118 @@
#!/usr/bin/env python3
"""
Simple test script to verify the split architecture communication
Run this on Orchestrator VM to test AI processing
"""
import json
import time
from pathlib import Path
from datetime import datetime
REQUEST_DIR = Path("/shared/ai-gitops/requests")
RESPONSE_DIR = Path("/shared/ai-gitops/responses")
def test_ai_communication():
"""Test basic communication with AI VM"""
print("Testing Split Architecture Communication")
print("=" * 50)
# Test 1: Simple message
print("\nTest 1: Simple message exchange")
request_id = f"test_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
request_file = REQUEST_DIR / f"{request_id}.json"
request_data = {
"type": "test",
"message": "Hello AI, please confirm you received this",
"timestamp": datetime.now().isoformat()
}
print(f"Sending request: {request_id}")
with open(request_file, 'w') as f:
json.dump(request_data, f, indent=2)
# Wait for response
response_file = RESPONSE_DIR / f"{request_id}.json"
print("Waiting for response...", end="")
for i in range(30): # Wait up to 30 seconds
if response_file.exists():
print(" Received!")
with open(response_file, 'r') as f:
response = json.load(f)
print(f"Response: {json.dumps(response, indent=2)}")
response_file.unlink()
break
print(".", end="", flush=True)
time.sleep(1)
else:
print(" Timeout!")
return False
# Test 2: Traffic analysis request
print("\nTest 2: Simulated traffic analysis")
request_id = f"analyze_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
request_file = REQUEST_DIR / f"{request_id}.json"
request_data = {
"type": "analyze_traffic",
"traffic_data": {
"top_sources": [
{"ip": "INTERNAL_IP", "bytes": 1000000},
{"ip": "INTERNAL_IP", "bytes": 500000}
],
"top_ports": [443, 80, 22, 3389]
},
"srx_context": {
"zones": ["trust", "untrust", "dmz"],
"rule_count": 15
}
}
print(f"Sending analysis request: {request_id}")
with open(request_file, 'w') as f:
json.dump(request_data, f, indent=2)
# Wait for response
response_file = RESPONSE_DIR / f"{request_id}.json"
print("Waiting for AI analysis...", end="")
for i in range(60): # Wait up to 60 seconds for analysis
if response_file.exists():
print(" Received!")
with open(response_file, 'r') as f:
response = json.load(f)
if response.get("status") == "success":
print("AI Analysis completed successfully!")
print(f"Analysis preview: {response.get('analysis', '')[:200]}...")
else:
print(f"Analysis failed: {response}")
response_file.unlink()
break
print(".", end="", flush=True)
time.sleep(1)
else:
print(" Timeout!")
return False
print("\n" + "=" * 50)
print("All tests completed successfully!")
return True
if __name__ == "__main__":
# Make sure AI processor is running on AI VM first
print("Make sure ai_processor.py is running on the AI VM!")
print("Press Ctrl+C to cancel, or Enter to continue...")
try:
input()
except KeyboardInterrupt:
print("\nCancelled")
exit(1)
if test_ai_communication():
print("\n✅ Split architecture is working correctly!")
else:
print("\n❌ Communication test failed")

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python3
import json
from pathlib import Path
# Get latest response
responses = sorted(Path('/shared/ai-gitops/responses').glob('pipeline_*.json'),
key=lambda x: x.stat().st_mtime, reverse=True)
if responses:
with open(responses[0], 'r') as f:
data = json.load(f)
print(f"Checking: {responses[0].name}")
print(f"Feedback aware: {data.get('feedback_aware')}")
suggestions = data.get('suggestions', '')
# Count issues
dest_any_count = suggestions.count('destination-address any')
src_any_count = suggestions.count('source-address any')
app_any_count = suggestions.count('application any')
log_count = suggestions.count('then log')
print(f"\n📊 Analysis:")
print(f" source-address any: {src_any_count} {'' if src_any_count else ''}")
print(f" destination-address any: {dest_any_count} {'' if dest_any_count else ''}")
print(f" application any: {app_any_count} {'' if app_any_count else ''}")
print(f" logging statements: {log_count} {'' if log_count else ''}")
if dest_any_count > 0:
print(f"\n⚠️ AI still needs to learn about destination-address!")
print(" Add more feedback and update the prompt")
else:
print(f"\n✅ AI has learned to avoid any/any/any!")

View File

@@ -0,0 +1,165 @@
#!/usr/bin/env python3
"""
Verify that AI suggestions are based on current SRX configuration
"""
import json
from pathlib import Path
from datetime import datetime
def load_json_file(filepath):
"""Safely load a JSON file"""
try:
with open(filepath, 'r') as f:
return json.load(f)
except:
return None
def verify_config_usage():
"""Verify AI is using current SRX config"""
print("="*60)
print(" 🔍 VERIFYING AI USES CURRENT SRX CONFIG")
print("="*60)
# 1. Check current SRX config
print("\n📋 STEP 1: Current SRX Configuration")
print("-"*40)
config_file = Path('/shared/ai-gitops/configs/current_srx_config.json')
if not config_file.exists():
print("❌ No current config file found")
print(" Run: python3 collect_srx_config.py")
return
srx_config = load_json_file(config_file)
if srx_config:
srx_zones = set(zone['name'] for zone in srx_config.get('zones', []))
srx_policies = srx_config.get('policies', [])
srx_addresses = srx_config.get('address_book', [])
print(f"✅ Config loaded from: {config_file}")
print(f" Timestamp: {srx_config.get('timestamp', 'Unknown')}")
print(f" Zones ({len(srx_zones)}): {', '.join(sorted(srx_zones))}")
print(f" Policies: {len(srx_policies)}")
print(f" Address entries: {len(srx_addresses)}")
# Show some actual addresses
if srx_addresses:
print("\n Sample addresses from your SRX:")
for addr in srx_addresses[:3]:
print(f"{addr.get('name', 'Unknown')}: {addr.get('address', 'Unknown')}")
else:
print("❌ Could not load config file")
return
# 2. Check latest AI response
print("\n🤖 STEP 2: Latest AI Suggestions")
print("-"*40)
response_dir = Path('/shared/ai-gitops/responses')
responses = sorted(response_dir.glob('pipeline_*.json'),
key=lambda x: x.stat().st_mtime, reverse=True)
if not responses:
print("❌ No AI responses found")
return
latest_response = responses[0]
ai_response = load_json_file(latest_response)
if ai_response:
print(f"✅ Latest response: {latest_response.name}")
print(f" Timestamp: {ai_response.get('timestamp', 'Unknown')}")
print(f" Feedback aware: {ai_response.get('feedback_aware', False)}")
suggestions = ai_response.get('suggestions', '')
# Check which zones AI references
ai_zones = set()
for line in suggestions.split('\n'):
if 'from-zone' in line:
parts = line.split()
for i, part in enumerate(parts):
if part == 'from-zone' and i+1 < len(parts):
ai_zones.add(parts[i+1])
elif part == 'to-zone' and i+1 < len(parts):
ai_zones.add(parts[i+1])
print(f"\n Zones referenced by AI: {', '.join(sorted(ai_zones))}")
# Check if AI uses actual network addresses
print("\n Addresses defined by AI:")
for line in suggestions.split('\n'):
if 'address trust-network' in line and '192.168' in line:
print(f"{line.strip()}")
elif 'address web-servers' in line:
print(f"{line.strip()}")
# 3. Compare and verify
print("\n✅ STEP 3: Verification Results")
print("-"*40)
# Check zone overlap
if srx_zones and ai_zones:
common_zones = srx_zones.intersection(ai_zones)
if common_zones:
print(f"✅ AI correctly uses your zones: {', '.join(sorted(common_zones))}")
else:
print("⚠️ AI zones don't match your SRX zones")
# Check for zones AI mentioned that don't exist
extra_zones = ai_zones - srx_zones
if extra_zones:
print(f"⚠️ AI referenced non-existent zones: {', '.join(extra_zones)}")
# Check for zones AI missed
missed_zones = srx_zones - ai_zones
if missed_zones:
print(f"📝 Zones not used in suggestions: {', '.join(missed_zones)}")
# Check if AI uses your network ranges
print("\n📊 STEP 4: Network Address Verification")
print("-"*40)
if 'INTERNAL_IP/24' in suggestions:
print("✅ AI uses your HOME network (INTERNAL_IP/24)")
if 'trust-network' in suggestions and '192.168' in suggestions:
print("✅ AI defines trust-network matching your subnet")
# Check the actual request that was sent
print("\n📤 STEP 5: Checking AI Request Content")
print("-"*40)
request_file = Path('/shared/ai-gitops/requests') / f"{latest_response.stem.replace('_response', '')}.json"
if request_file.exists():
request_data = load_json_file(request_file)
if request_data and 'srx_config' in request_data:
print("✅ SRX config WAS included in AI request")
req_config = request_data['srx_config']
if isinstance(req_config, dict):
print(f" Config zones: {len(req_config.get('zones', []))}")
print(f" Config policies: {len(req_config.get('policies', []))}")
else:
print("⚠️ SRX config might not be in request")
# Final assessment
print("\n="*60)
print(" 📊 FINAL ASSESSMENT")
print("="*60)
if common_zones and '192.168' in suggestions:
print("✅ CONFIRMED: AI is using your current SRX configuration!")
print(" - References your actual zones")
print(" - Uses your network addressing scheme")
print(" - Builds upon existing policies")
else:
print("⚠️ PARTIAL: AI may not be fully using your config")
print(" Check the pipeline to ensure config is being passed")
print("\n💡 To ensure config is always used:")
print(" 1. Always run: python3 collect_srx_config.py first")
print(" 2. Verify: /shared/ai-gitops/configs/current_srx_config.json exists")
print(" 3. Check: AI request includes 'srx_config' field")
if __name__ == "__main__":
verify_config_usage()

View File

@@ -0,0 +1,316 @@
#!/usr/bin/env python3
"""
Gitea Webhook Listener - Automatically captures PR approvals/rejections
Runs on orchestrator VM to capture feedback in real-time
"""
from flask import Flask, request, jsonify
import json
import logging
import subprocess
from datetime import datetime
from pathlib import Path
from dotenv import load_dotenv
import os
import hmac
import hashlib
# Load environment variables from home directory
env_path = Path.home() / '.env'
load_dotenv(env_path)
from flask import Flask, request, jsonify
# This loads from .env file
WEBHOOK_SECRET = os.environ.get('WEBHOOK_SECRET', '')
app = Flask(__name__)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('/var/log/webhook-listener.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# Configuration
FEEDBACK_FILE = "/shared/ai-gitops/feedback/pr_feedback_history.json"
LEARNING_FILE = "/shared/ai-gitops/learning/patterns.json"
def ensure_directories():
"""Ensure required directories exist"""
Path(FEEDBACK_FILE).parent.mkdir(parents=True, exist_ok=True)
Path(LEARNING_FILE).parent.mkdir(parents=True, exist_ok=True)
# Ensure deployment log directory exists
Path('/var/log/orchestrator').mkdir(parents=True, exist_ok=True)
def load_feedback_history():
"""Load existing feedback history"""
if Path(FEEDBACK_FILE).exists():
try:
with open(FEEDBACK_FILE, 'r') as f:
return json.load(f)
except:
return []
return []
def save_feedback_history(feedback):
"""Save updated feedback history"""
with open(FEEDBACK_FILE, 'w') as f:
json.dump(feedback, f, indent=2)
logger.info(f"Saved feedback history with {len(feedback)} entries")
def load_learning_patterns():
"""Load learning patterns"""
if Path(LEARNING_FILE).exists():
try:
with open(LEARNING_FILE, 'r') as f:
return json.load(f)
except:
pass
return {"avoid_patterns": [], "successful_patterns": []}
def save_learning_patterns(patterns):
"""Save learning patterns"""
with open(LEARNING_FILE, 'w') as f:
json.dump(patterns, f, indent=2)
logger.info("Updated learning patterns")
def extract_config_changes(pr_body):
"""Extract SRX config commands from PR body"""
if not pr_body:
return []
configs = []
lines = pr_body.split('\n')
in_code_block = False
for line in lines:
line = line.strip()
if line.startswith('```'):
in_code_block = not in_code_block
elif in_code_block and line.startswith('set '):
configs.append(line)
elif not in_code_block and line.startswith('set '):
configs.append(line)
return configs
def update_learning(feedback_entry):
"""Update AI learning patterns based on feedback"""
patterns = load_learning_patterns()
if feedback_entry["status"] == "rejected":
# Add rejected patterns
for config in feedback_entry.get("config_changes", []):
if config not in patterns["avoid_patterns"]:
patterns["avoid_patterns"].append(config)
# Mark common rejection reasons
reason = feedback_entry.get("reason", "").lower()
if "any any any" in reason or "any/any/any" in reason:
patterns["avoid_patterns"].append("any-any-any-pattern")
if "redundant" in reason or "already configured" in reason:
patterns["avoid_patterns"].append("redundant-config")
if "too broad" in reason or "overly permissive" in reason:
patterns["avoid_patterns"].append("overly-permissive")
elif feedback_entry["status"] == "approved":
# Track successful patterns
for config in feedback_entry.get("config_changes", []):
if config not in patterns["successful_patterns"]:
patterns["successful_patterns"].append(config)
save_learning_patterns(patterns)
logger.info(f"Learning updated: {len(patterns['avoid_patterns'])} patterns to avoid")
@app.route('/webhook', methods=['POST'])
def handle_webhook():
"""Main webhook handler for Gitea PR events"""
try:
# Verify webhook signature for security
if WEBHOOK_SECRET:
signature = request.headers.get('X-Gitea-Signature', '')
if not signature:
logger.warning("No signature provided in webhook request")
return jsonify({"error": "No signature"}), 403
# Calculate expected signature
expected = 'sha256=' + hmac.new(
WEBHOOK_SECRET.encode(),
request.data,
hashlib.sha256
).hexdigest()
# Compare signatures
if not hmac.compare_digest(signature, expected):
logger.warning(f"Invalid signature from {request.remote_addr}")
return jsonify({"error": "Invalid signature"}), 403
logger.debug("Webhook signature verified successfully")
# Get event data
data = request.json
event = request.headers.get('X-Gitea-Event', '')
logger.info(f"Received event: {event}")
if event != "pull_request":
return jsonify({"status": "ignored", "reason": "Not a PR event"}), 200
action = data.get('action', '')
pr = data.get('pull_request', {})
# Check if this is an AI-generated PR
pr_title = pr.get('title', '')
if 'AI-Generated' not in pr_title and 'Network Configuration Update' not in pr_title:
logger.info(f"Ignoring non-AI PR: {pr_title}")
return jsonify({"status": "ignored", "reason": "Not AI-generated"}), 200
# Process closed PRs (either merged or rejected)
if action == "closed":
pr_number = pr.get('number', 0)
pr_body = pr.get('body', '')
merged = pr.get('merged', False)
# Extract config changes from PR body
config_changes = extract_config_changes(pr_body)
# Create feedback entry
feedback_entry = {
"timestamp": datetime.now().isoformat(),
"pr_number": pr_number,
"pr_title": pr_title,
"status": "approved" if merged else "rejected",
"config_changes": config_changes,
"merged": merged
}
# For rejected PRs, try to extract reason from PR comments or description
if not merged:
feedback_entry["feedback_type"] = "rejected" # For compatibility
# Look for common rejection patterns in title or last comment
if "any" in str(config_changes).lower():
feedback_entry["reason"] = "Contains any/any/any patterns"
else:
feedback_entry["reason"] = "Changes not needed or incorrect"
logger.info(f"❌ PR #{pr_number} REJECTED - {pr_title}")
else:
feedback_entry["feedback_type"] = "approved" # For compatibility
logger.info(f"✅ PR #{pr_number} APPROVED - {pr_title}")
# Save feedback
feedback = load_feedback_history()
feedback.append(feedback_entry)
save_feedback_history(feedback)
# Update learning patterns
update_learning(feedback_entry)
# AUTO-DEPLOYMENT CODE - If PR was merged, trigger deployment
if merged:
logger.info(f"PR #{pr_number} was merged - triggering auto-deployment")
try:
result = subprocess.run(
[
'/home/netops/orchestrator/venv/bin/python',
'/home/netops/orchestrator/deploy_approved.py'
],
capture_output=True,
text=True,
timeout=300
)
if result.returncode == 0:
logger.info(f"✅ Successfully auto-deployed PR #{pr_number}")
# Log deployment
with open('/var/log/orchestrator/deployments.log', 'a') as f:
f.write(f"{datetime.now().isoformat()} - Auto-deployed PR #{pr_number}\n")
else:
logger.error(f"❌ Auto-deployment failed: {result.stderr}")
except subprocess.TimeoutExpired:
logger.error("Deployment timed out after 5 minutes")
except Exception as e:
logger.error(f"Deployment error: {e}")
return jsonify({
"status": "recorded",
"pr_number": pr_number,
"decision": feedback_entry["status"],
"configs_captured": len(config_changes),
"deployed": merged # Indicate if deployment was triggered
}), 200
return jsonify({"status": "ignored", "reason": f"Action {action} not processed"}), 200
except Exception as e:
logger.error(f"Error processing webhook: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/health', methods=['GET'])
def health_check():
"""Health check endpoint"""
return jsonify({
"status": "healthy",
"service": "webhook-listener",
"feedback_file": str(Path(FEEDBACK_FILE).exists()),
"learning_file": str(Path(LEARNING_FILE).exists())
}), 200
@app.route('/stats', methods=['GET'])
def get_stats():
"""Get feedback statistics"""
try:
feedback = load_feedback_history()
patterns = load_learning_patterns()
approved = len([f for f in feedback if f.get("status") == "approved"])
rejected = len([f for f in feedback if f.get("status") == "rejected"])
return jsonify({
"total_prs": len(feedback),
"approved": approved,
"rejected": rejected,
"approval_rate": f"{(approved/len(feedback)*100):.1f}%" if feedback else "0%",
"patterns_to_avoid": len(patterns.get("avoid_patterns", [])),
"successful_patterns": len(patterns.get("successful_patterns", [])),
"last_feedback": feedback[-1]["timestamp"] if feedback else None
}), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/feedback/recent', methods=['GET'])
def recent_feedback():
"""Get recent feedback entries"""
try:
feedback = load_feedback_history()
recent = feedback[-5:] if len(feedback) > 5 else feedback
recent.reverse() # Newest first
return jsonify(recent), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/learning/patterns', methods=['GET'])
def get_patterns():
"""Get current learning patterns"""
try:
patterns = load_learning_patterns()
return jsonify(patterns), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
if __name__ == "__main__":
# Ensure directories exist
ensure_directories()
logger.info("Starting Gitea webhook listener...")
logger.info(f"Feedback file: {FEEDBACK_FILE}")
logger.info(f"Learning file: {LEARNING_FILE}")
# Run Flask app
app.run(host='0.0.0.0', port=5000, debug=False)