feat(engineering,ra-qm): add secrets-vault-manager, sql-database-assistant, gcp-cloud-architect, soc2-compliance
secrets-vault-manager (403-line SKILL.md, 3 scripts, 3 references): - HashiCorp Vault, AWS SM, Azure KV, GCP SM integration - Secret rotation, dynamic secrets, audit logging, emergency procedures sql-database-assistant (457-line SKILL.md, 3 scripts, 3 references): - Query optimization, migration generation, schema exploration - Multi-DB support (PostgreSQL, MySQL, SQLite, SQL Server) - ORM patterns (Prisma, Drizzle, TypeORM, SQLAlchemy) gcp-cloud-architect (418-line SKILL.md, 3 scripts, 3 references): - 6-step workflow mirroring aws-solution-architect for GCP - Cloud Run, GKE, BigQuery, Cloud Functions, cost optimization - Completes cloud trifecta (AWS + Azure + GCP) soc2-compliance (417-line SKILL.md, 3 scripts, 3 references): - SOC 2 Type I & II preparation, Trust Service Criteria mapping - Control matrix generation, evidence tracking, gap analysis - First SOC 2 skill in ra-qm-team (joins GDPR, ISO 27001, ISO 13485) All 12 scripts pass --help. Docs generated, mkdocs.yml nav updated. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
330
engineering/secrets-vault-manager/scripts/audit_log_analyzer.py
Normal file
330
engineering/secrets-vault-manager/scripts/audit_log_analyzer.py
Normal file
@@ -0,0 +1,330 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Analyze Vault or cloud secret manager audit logs for anomalies.
|
||||
|
||||
Reads JSON-lines or JSON-array audit log files and flags unusual access
|
||||
patterns including volume spikes, off-hours access, new source IPs,
|
||||
and failed authentication attempts.
|
||||
|
||||
Usage:
|
||||
python audit_log_analyzer.py --log-file vault-audit.log --threshold 5
|
||||
python audit_log_analyzer.py --log-file audit.json --threshold 3 --json
|
||||
|
||||
Expected log entry format (JSON lines or JSON array):
|
||||
{
|
||||
"timestamp": "2026-03-20T14:32:00Z",
|
||||
"type": "request",
|
||||
"auth": {"accessor": "token-abc123", "entity_id": "eid-001", "display_name": "approle-payment-svc"},
|
||||
"request": {"path": "secret/data/production/payment/api-keys", "operation": "read"},
|
||||
"response": {"status_code": 200},
|
||||
"remote_address": "10.0.1.15"
|
||||
}
|
||||
|
||||
Fields are optional — the analyzer works with whatever is available.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import textwrap
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def load_logs(path):
|
||||
"""Load audit log entries from file. Supports JSON lines and JSON array."""
|
||||
entries = []
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
content = f.read().strip()
|
||||
except FileNotFoundError:
|
||||
print(f"ERROR: Log file not found: {path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not content:
|
||||
return entries
|
||||
|
||||
# Try JSON array first
|
||||
if content.startswith("["):
|
||||
try:
|
||||
entries = json.loads(content)
|
||||
return entries
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try JSON lines
|
||||
for i, line in enumerate(content.split("\n"), 1):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
entries.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
print(f"WARNING: Skipping malformed line {i}", file=sys.stderr)
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
def extract_fields(entry):
|
||||
"""Extract normalized fields from a log entry."""
|
||||
timestamp_raw = entry.get("timestamp", entry.get("time", ""))
|
||||
ts = None
|
||||
if timestamp_raw:
|
||||
for fmt in ("%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%S%z", "%Y-%m-%d %H:%M:%S"):
|
||||
try:
|
||||
ts = datetime.strptime(timestamp_raw.replace("+00:00", "Z").rstrip("Z") + "Z", fmt.rstrip("Z") + "Z") if "Z" not in fmt else datetime.strptime(timestamp_raw, fmt)
|
||||
break
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
if ts is None:
|
||||
# Fallback: try basic parse
|
||||
try:
|
||||
ts = datetime.fromisoformat(timestamp_raw.replace("Z", "+00:00").replace("+00:00", ""))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
auth = entry.get("auth", {})
|
||||
request = entry.get("request", {})
|
||||
response = entry.get("response", {})
|
||||
|
||||
return {
|
||||
"timestamp": ts,
|
||||
"hour": ts.hour if ts else None,
|
||||
"identity": auth.get("display_name", auth.get("entity_id", "unknown")),
|
||||
"path": request.get("path", entry.get("path", "unknown")),
|
||||
"operation": request.get("operation", entry.get("operation", "unknown")),
|
||||
"status_code": response.get("status_code", entry.get("status_code")),
|
||||
"remote_address": entry.get("remote_address", entry.get("source_address", "unknown")),
|
||||
"entry_type": entry.get("type", "unknown"),
|
||||
}
|
||||
|
||||
|
||||
def analyze(entries, threshold):
|
||||
"""Run anomaly detection across all log entries."""
|
||||
parsed = [extract_fields(e) for e in entries]
|
||||
|
||||
# Counters
|
||||
access_by_identity = defaultdict(int)
|
||||
access_by_path = defaultdict(int)
|
||||
access_by_ip = defaultdict(set) # identity -> set of IPs
|
||||
ip_to_identities = defaultdict(set) # IP -> set of identities
|
||||
failed_by_source = defaultdict(int)
|
||||
off_hours_access = []
|
||||
path_by_identity = defaultdict(set) # identity -> set of paths
|
||||
hourly_distribution = defaultdict(int)
|
||||
|
||||
for p in parsed:
|
||||
identity = p["identity"]
|
||||
path = p["path"]
|
||||
ip = p["remote_address"]
|
||||
status = p["status_code"]
|
||||
hour = p["hour"]
|
||||
|
||||
access_by_identity[identity] += 1
|
||||
access_by_path[path] += 1
|
||||
access_by_ip[identity].add(ip)
|
||||
ip_to_identities[ip].add(identity)
|
||||
path_by_identity[identity].add(path)
|
||||
|
||||
if hour is not None:
|
||||
hourly_distribution[hour] += 1
|
||||
|
||||
# Failed access (non-200 or 4xx/5xx)
|
||||
if status and (status >= 400 or status == 0):
|
||||
failed_by_source[f"{identity}@{ip}"] += 1
|
||||
|
||||
# Off-hours: before 6 AM or after 10 PM
|
||||
if hour is not None and (hour < 6 or hour >= 22):
|
||||
off_hours_access.append(p)
|
||||
|
||||
# Build anomalies
|
||||
anomalies = []
|
||||
|
||||
# 1. Volume spikes — identities accessing secrets more than threshold * average
|
||||
if access_by_identity:
|
||||
avg_access = sum(access_by_identity.values()) / len(access_by_identity)
|
||||
spike_threshold = max(threshold * avg_access, threshold)
|
||||
for identity, count in access_by_identity.items():
|
||||
if count >= spike_threshold:
|
||||
anomalies.append({
|
||||
"type": "volume_spike",
|
||||
"severity": "HIGH",
|
||||
"identity": identity,
|
||||
"access_count": count,
|
||||
"threshold": round(spike_threshold, 1),
|
||||
"description": f"Identity '{identity}' made {count} accesses (threshold: {round(spike_threshold, 1)})",
|
||||
})
|
||||
|
||||
# 2. Multi-IP access — single identity from many IPs
|
||||
for identity, ips in access_by_ip.items():
|
||||
if len(ips) >= threshold:
|
||||
anomalies.append({
|
||||
"type": "multi_ip_access",
|
||||
"severity": "MEDIUM",
|
||||
"identity": identity,
|
||||
"ip_count": len(ips),
|
||||
"ips": sorted(ips),
|
||||
"description": f"Identity '{identity}' accessed from {len(ips)} different IPs",
|
||||
})
|
||||
|
||||
# 3. Failed access attempts
|
||||
for source, count in failed_by_source.items():
|
||||
if count >= threshold:
|
||||
anomalies.append({
|
||||
"type": "failed_access",
|
||||
"severity": "HIGH",
|
||||
"source": source,
|
||||
"failure_count": count,
|
||||
"description": f"Source '{source}' had {count} failed access attempts",
|
||||
})
|
||||
|
||||
# 4. Off-hours access
|
||||
if off_hours_access:
|
||||
off_hours_identities = defaultdict(int)
|
||||
for p in off_hours_access:
|
||||
off_hours_identities[p["identity"]] += 1
|
||||
|
||||
for identity, count in off_hours_identities.items():
|
||||
if count >= max(threshold, 2):
|
||||
anomalies.append({
|
||||
"type": "off_hours_access",
|
||||
"severity": "MEDIUM",
|
||||
"identity": identity,
|
||||
"access_count": count,
|
||||
"description": f"Identity '{identity}' made {count} accesses outside business hours (before 6 AM / after 10 PM)",
|
||||
})
|
||||
|
||||
# 5. Broad path access — single identity touching many paths
|
||||
for identity, paths in path_by_identity.items():
|
||||
if len(paths) >= threshold * 2:
|
||||
anomalies.append({
|
||||
"type": "broad_access",
|
||||
"severity": "MEDIUM",
|
||||
"identity": identity,
|
||||
"path_count": len(paths),
|
||||
"paths": sorted(paths)[:10],
|
||||
"description": f"Identity '{identity}' accessed {len(paths)} distinct secret paths",
|
||||
})
|
||||
|
||||
# Sort anomalies by severity
|
||||
severity_order = {"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3}
|
||||
anomalies.sort(key=lambda x: severity_order.get(x["severity"], 4))
|
||||
|
||||
# Summary stats
|
||||
summary = {
|
||||
"total_entries": len(entries),
|
||||
"parsed_entries": len(parsed),
|
||||
"unique_identities": len(access_by_identity),
|
||||
"unique_paths": len(access_by_path),
|
||||
"unique_source_ips": len(ip_to_identities),
|
||||
"total_failures": sum(failed_by_source.values()),
|
||||
"off_hours_events": len(off_hours_access),
|
||||
"anomalies_found": len(anomalies),
|
||||
}
|
||||
|
||||
# Top accessed paths
|
||||
top_paths = sorted(access_by_path.items(), key=lambda x: -x[1])[:10]
|
||||
|
||||
return {
|
||||
"summary": summary,
|
||||
"anomalies": anomalies,
|
||||
"top_accessed_paths": [{"path": p, "count": c} for p, c in top_paths],
|
||||
"hourly_distribution": dict(sorted(hourly_distribution.items())),
|
||||
}
|
||||
|
||||
|
||||
def print_human(result, threshold):
|
||||
"""Print human-readable analysis report."""
|
||||
summary = result["summary"]
|
||||
anomalies = result["anomalies"]
|
||||
|
||||
print("=== Audit Log Analysis Report ===")
|
||||
print(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}")
|
||||
print(f"Anomaly threshold: {threshold}")
|
||||
print()
|
||||
|
||||
print("--- Summary ---")
|
||||
print(f" Total log entries: {summary['total_entries']}")
|
||||
print(f" Unique identities: {summary['unique_identities']}")
|
||||
print(f" Unique secret paths: {summary['unique_paths']}")
|
||||
print(f" Unique source IPs: {summary['unique_source_ips']}")
|
||||
print(f" Total failures: {summary['total_failures']}")
|
||||
print(f" Off-hours events: {summary['off_hours_events']}")
|
||||
print(f" Anomalies detected: {summary['anomalies_found']}")
|
||||
print()
|
||||
|
||||
if anomalies:
|
||||
print("--- Anomalies ---")
|
||||
for i, a in enumerate(anomalies, 1):
|
||||
print(f" [{a['severity']}] {a['type']}: {a['description']}")
|
||||
print()
|
||||
else:
|
||||
print("--- No anomalies detected ---")
|
||||
print()
|
||||
|
||||
if result["top_accessed_paths"]:
|
||||
print("--- Top Accessed Paths ---")
|
||||
for item in result["top_accessed_paths"]:
|
||||
print(f" {item['count']:5d} {item['path']}")
|
||||
print()
|
||||
|
||||
if result["hourly_distribution"]:
|
||||
print("--- Hourly Distribution ---")
|
||||
max_count = max(result["hourly_distribution"].values()) if result["hourly_distribution"] else 1
|
||||
for hour in range(24):
|
||||
count = result["hourly_distribution"].get(hour, 0)
|
||||
bar_len = int((count / max_count) * 40) if max_count > 0 else 0
|
||||
marker = " *" if (hour < 6 or hour >= 22) else ""
|
||||
print(f" {hour:02d}:00 {'#' * bar_len:40s} {count}{marker}")
|
||||
print(" (* = off-hours)")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Analyze Vault/cloud secret manager audit logs for anomalies.",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=textwrap.dedent("""\
|
||||
The analyzer detects:
|
||||
- Volume spikes (identity accessing secrets above threshold * average)
|
||||
- Multi-IP access (single identity from many source IPs)
|
||||
- Failed access attempts (repeated auth/access failures)
|
||||
- Off-hours access (before 6 AM or after 10 PM)
|
||||
- Broad path access (single identity accessing many distinct paths)
|
||||
|
||||
Log format: JSON lines or JSON array. Each entry should include
|
||||
timestamp, auth info, request path/operation, response status,
|
||||
and remote address. Missing fields are handled gracefully.
|
||||
|
||||
Examples:
|
||||
%(prog)s --log-file vault-audit.log --threshold 5
|
||||
%(prog)s --log-file audit.json --threshold 3 --json
|
||||
"""),
|
||||
)
|
||||
parser.add_argument("--log-file", required=True, help="Path to audit log file (JSON lines or JSON array)")
|
||||
parser.add_argument(
|
||||
"--threshold",
|
||||
type=int,
|
||||
default=5,
|
||||
help="Anomaly sensitivity threshold — lower = more sensitive (default: 5)",
|
||||
)
|
||||
parser.add_argument("--json", action="store_true", dest="json_output", help="Output as JSON")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
entries = load_logs(args.log_file)
|
||||
if not entries:
|
||||
print("No log entries found in file.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
result = analyze(entries, args.threshold)
|
||||
result["log_file"] = args.log_file
|
||||
result["threshold"] = args.threshold
|
||||
result["analyzed_at"] = datetime.now().isoformat()
|
||||
|
||||
if args.json_output:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print_human(result, args.threshold)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
280
engineering/secrets-vault-manager/scripts/rotation_planner.py
Normal file
280
engineering/secrets-vault-manager/scripts/rotation_planner.py
Normal file
@@ -0,0 +1,280 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Create a rotation schedule from a secret inventory file.
|
||||
|
||||
Reads a JSON inventory of secrets and produces a rotation plan based on
|
||||
the selected policy (30d, 60d, 90d) with urgency classification.
|
||||
|
||||
Usage:
|
||||
python rotation_planner.py --inventory secrets.json --policy 30d
|
||||
python rotation_planner.py --inventory secrets.json --policy 90d --json
|
||||
|
||||
Inventory file format (JSON):
|
||||
[
|
||||
{
|
||||
"name": "prod-db-password",
|
||||
"type": "database",
|
||||
"store": "vault",
|
||||
"last_rotated": "2026-01-15",
|
||||
"owner": "platform-team",
|
||||
"environment": "production"
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import textwrap
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
POLICY_DAYS = {
|
||||
"30d": 30,
|
||||
"60d": 60,
|
||||
"90d": 90,
|
||||
}
|
||||
|
||||
# Default rotation period by secret type if not overridden by policy
|
||||
TYPE_DEFAULTS = {
|
||||
"database": 30,
|
||||
"api-key": 90,
|
||||
"tls-certificate": 60,
|
||||
"ssh-key": 90,
|
||||
"service-token": 1,
|
||||
"encryption-key": 90,
|
||||
"oauth-secret": 90,
|
||||
"password": 30,
|
||||
}
|
||||
|
||||
URGENCY_THRESHOLDS = {
|
||||
"critical": 0, # Already overdue
|
||||
"high": 7, # Due within 7 days
|
||||
"medium": 14, # Due within 14 days
|
||||
"low": 30, # Due within 30 days
|
||||
}
|
||||
|
||||
|
||||
def load_inventory(path):
|
||||
"""Load and validate secret inventory from JSON file."""
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"ERROR: Inventory file not found: {path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"ERROR: Invalid JSON in {path}: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not isinstance(data, list):
|
||||
print("ERROR: Inventory must be a JSON array of secret objects", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
validated = []
|
||||
for i, entry in enumerate(data):
|
||||
if not isinstance(entry, dict):
|
||||
print(f"WARNING: Skipping entry {i} — not an object", file=sys.stderr)
|
||||
continue
|
||||
|
||||
name = entry.get("name", f"unnamed-{i}")
|
||||
secret_type = entry.get("type", "unknown")
|
||||
last_rotated = entry.get("last_rotated")
|
||||
|
||||
if not last_rotated:
|
||||
print(f"WARNING: '{name}' has no last_rotated date — marking as overdue", file=sys.stderr)
|
||||
last_rotated_dt = None
|
||||
else:
|
||||
try:
|
||||
last_rotated_dt = datetime.strptime(last_rotated, "%Y-%m-%d")
|
||||
except ValueError:
|
||||
print(f"WARNING: '{name}' has invalid date '{last_rotated}' — marking as overdue", file=sys.stderr)
|
||||
last_rotated_dt = None
|
||||
|
||||
validated.append({
|
||||
"name": name,
|
||||
"type": secret_type,
|
||||
"store": entry.get("store", "unknown"),
|
||||
"last_rotated": last_rotated_dt,
|
||||
"owner": entry.get("owner", "unassigned"),
|
||||
"environment": entry.get("environment", "unknown"),
|
||||
})
|
||||
|
||||
return validated
|
||||
|
||||
|
||||
def compute_schedule(inventory, policy_days):
|
||||
"""Compute rotation schedule for each secret."""
|
||||
now = datetime.now()
|
||||
schedule = []
|
||||
|
||||
for secret in inventory:
|
||||
# Determine rotation interval
|
||||
type_default = TYPE_DEFAULTS.get(secret["type"], 90)
|
||||
rotation_interval = min(policy_days, type_default)
|
||||
|
||||
if secret["last_rotated"] is None:
|
||||
days_since = 999
|
||||
next_rotation = now # Immediate
|
||||
days_until = -999
|
||||
else:
|
||||
days_since = (now - secret["last_rotated"]).days
|
||||
next_rotation = secret["last_rotated"] + timedelta(days=rotation_interval)
|
||||
days_until = (next_rotation - now).days
|
||||
|
||||
# Classify urgency
|
||||
if days_until <= URGENCY_THRESHOLDS["critical"]:
|
||||
urgency = "CRITICAL"
|
||||
elif days_until <= URGENCY_THRESHOLDS["high"]:
|
||||
urgency = "HIGH"
|
||||
elif days_until <= URGENCY_THRESHOLDS["medium"]:
|
||||
urgency = "MEDIUM"
|
||||
else:
|
||||
urgency = "LOW"
|
||||
|
||||
schedule.append({
|
||||
"name": secret["name"],
|
||||
"type": secret["type"],
|
||||
"store": secret["store"],
|
||||
"owner": secret["owner"],
|
||||
"environment": secret["environment"],
|
||||
"last_rotated": secret["last_rotated"].strftime("%Y-%m-%d") if secret["last_rotated"] else "NEVER",
|
||||
"rotation_interval_days": rotation_interval,
|
||||
"next_rotation": next_rotation.strftime("%Y-%m-%d"),
|
||||
"days_until_due": days_until,
|
||||
"days_since_rotation": days_since,
|
||||
"urgency": urgency,
|
||||
})
|
||||
|
||||
# Sort by urgency (critical first), then by days until due
|
||||
urgency_order = {"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3}
|
||||
schedule.sort(key=lambda x: (urgency_order.get(x["urgency"], 4), x["days_until_due"]))
|
||||
|
||||
return schedule
|
||||
|
||||
|
||||
def build_summary(schedule):
|
||||
"""Build summary statistics."""
|
||||
total = len(schedule)
|
||||
by_urgency = {}
|
||||
by_type = {}
|
||||
by_owner = {}
|
||||
|
||||
for entry in schedule:
|
||||
urg = entry["urgency"]
|
||||
by_urgency[urg] = by_urgency.get(urg, 0) + 1
|
||||
t = entry["type"]
|
||||
by_type[t] = by_type.get(t, 0) + 1
|
||||
o = entry["owner"]
|
||||
by_owner[o] = by_owner.get(o, 0) + 1
|
||||
|
||||
return {
|
||||
"total_secrets": total,
|
||||
"by_urgency": by_urgency,
|
||||
"by_type": by_type,
|
||||
"by_owner": by_owner,
|
||||
"overdue_count": by_urgency.get("CRITICAL", 0),
|
||||
"due_within_7d": by_urgency.get("HIGH", 0),
|
||||
}
|
||||
|
||||
|
||||
def print_human(schedule, summary, policy):
|
||||
"""Print human-readable rotation plan."""
|
||||
print(f"=== Secret Rotation Plan (Policy: {policy}) ===")
|
||||
print(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}")
|
||||
print(f"Total secrets: {summary['total_secrets']}")
|
||||
print()
|
||||
|
||||
print("--- Urgency Summary ---")
|
||||
for urg in ["CRITICAL", "HIGH", "MEDIUM", "LOW"]:
|
||||
count = summary["by_urgency"].get(urg, 0)
|
||||
if count > 0:
|
||||
print(f" {urg:10s} {count}")
|
||||
print()
|
||||
|
||||
if not schedule:
|
||||
print("No secrets in inventory.")
|
||||
return
|
||||
|
||||
print("--- Rotation Schedule ---")
|
||||
print(f" {'Name':30s} {'Type':15s} {'Urgency':10s} {'Last Rotated':12s} {'Next Due':12s} {'Owner'}")
|
||||
print(f" {'-'*30} {'-'*15} {'-'*10} {'-'*12} {'-'*12} {'-'*15}")
|
||||
|
||||
for entry in schedule:
|
||||
overdue_marker = " **OVERDUE**" if entry["urgency"] == "CRITICAL" else ""
|
||||
print(
|
||||
f" {entry['name']:30s} {entry['type']:15s} {entry['urgency']:10s} "
|
||||
f"{entry['last_rotated']:12s} {entry['next_rotation']:12s} "
|
||||
f"{entry['owner']}{overdue_marker}"
|
||||
)
|
||||
|
||||
print()
|
||||
print("--- Action Items ---")
|
||||
critical = [e for e in schedule if e["urgency"] == "CRITICAL"]
|
||||
high = [e for e in schedule if e["urgency"] == "HIGH"]
|
||||
|
||||
if critical:
|
||||
print(f" IMMEDIATE: Rotate {len(critical)} overdue secret(s):")
|
||||
for e in critical:
|
||||
print(f" - {e['name']} ({e['type']}, owner: {e['owner']})")
|
||||
if high:
|
||||
print(f" THIS WEEK: Rotate {len(high)} secret(s) due within 7 days:")
|
||||
for e in high:
|
||||
print(f" - {e['name']} (due: {e['next_rotation']}, owner: {e['owner']})")
|
||||
if not critical and not high:
|
||||
print(" No urgent rotations needed.")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create rotation schedule from a secret inventory file.",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=textwrap.dedent("""\
|
||||
Policies:
|
||||
30d Aggressive — all secrets rotate within 30 days max
|
||||
60d Standard — 60-day maximum rotation window
|
||||
90d Relaxed — 90-day maximum rotation window
|
||||
|
||||
Note: Some secret types (e.g., database passwords) have shorter
|
||||
built-in defaults that override the policy maximum.
|
||||
|
||||
Example inventory file (secrets.json):
|
||||
[
|
||||
{"name": "prod-db", "type": "database", "store": "vault",
|
||||
"last_rotated": "2026-01-15", "owner": "platform-team",
|
||||
"environment": "production"}
|
||||
]
|
||||
"""),
|
||||
)
|
||||
parser.add_argument("--inventory", required=True, help="Path to JSON inventory file")
|
||||
parser.add_argument(
|
||||
"--policy",
|
||||
required=True,
|
||||
choices=["30d", "60d", "90d"],
|
||||
help="Rotation policy (maximum rotation interval)",
|
||||
)
|
||||
parser.add_argument("--json", action="store_true", dest="json_output", help="Output as JSON")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
policy_days = POLICY_DAYS[args.policy]
|
||||
inventory = load_inventory(args.inventory)
|
||||
schedule = compute_schedule(inventory, policy_days)
|
||||
summary = build_summary(schedule)
|
||||
|
||||
result = {
|
||||
"policy": args.policy,
|
||||
"policy_days": policy_days,
|
||||
"generated_at": datetime.now().isoformat(),
|
||||
"summary": summary,
|
||||
"schedule": schedule,
|
||||
}
|
||||
|
||||
if args.json_output:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print_human(schedule, summary, args.policy)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,302 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate Vault policy and auth configuration from application requirements.
|
||||
|
||||
Produces HCL policy files and auth method setup commands for HashiCorp Vault
|
||||
based on application name, auth method, and required secret paths.
|
||||
|
||||
Usage:
|
||||
python vault_config_generator.py --app-name payment-service --auth-method approle --secrets "db-creds,api-key,tls-cert"
|
||||
python vault_config_generator.py --app-name api-gateway --auth-method kubernetes --secrets "db-creds" --namespace production --json
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import textwrap
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
# Default TTLs by auth method
|
||||
AUTH_METHOD_DEFAULTS = {
|
||||
"approle": {
|
||||
"token_ttl": "1h",
|
||||
"token_max_ttl": "4h",
|
||||
"secret_id_num_uses": 1,
|
||||
"secret_id_ttl": "10m",
|
||||
},
|
||||
"kubernetes": {
|
||||
"token_ttl": "1h",
|
||||
"token_max_ttl": "4h",
|
||||
},
|
||||
"oidc": {
|
||||
"token_ttl": "8h",
|
||||
"token_max_ttl": "12h",
|
||||
},
|
||||
}
|
||||
|
||||
# Secret type templates
|
||||
SECRET_TYPE_MAP = {
|
||||
"db-creds": {
|
||||
"engine": "database",
|
||||
"path": "database/creds/{app}-readonly",
|
||||
"capabilities": ["read"],
|
||||
"description": "Dynamic database credentials",
|
||||
},
|
||||
"db-admin": {
|
||||
"engine": "database",
|
||||
"path": "database/creds/{app}-readwrite",
|
||||
"capabilities": ["read"],
|
||||
"description": "Dynamic database admin credentials",
|
||||
},
|
||||
"api-key": {
|
||||
"engine": "kv-v2",
|
||||
"path": "secret/data/{env}/{app}/api-keys",
|
||||
"capabilities": ["read"],
|
||||
"description": "Static API keys (KV v2)",
|
||||
},
|
||||
"tls-cert": {
|
||||
"engine": "pki",
|
||||
"path": "pki/issue/{app}-cert",
|
||||
"capabilities": ["create", "update"],
|
||||
"description": "TLS certificate issuance",
|
||||
},
|
||||
"encryption": {
|
||||
"engine": "transit",
|
||||
"path": "transit/encrypt/{app}-key",
|
||||
"capabilities": ["update"],
|
||||
"description": "Transit encryption operations",
|
||||
},
|
||||
"ssh-cert": {
|
||||
"engine": "ssh",
|
||||
"path": "ssh/sign/{app}-role",
|
||||
"capabilities": ["create", "update"],
|
||||
"description": "SSH certificate signing",
|
||||
},
|
||||
"config": {
|
||||
"engine": "kv-v2",
|
||||
"path": "secret/data/{env}/{app}/config",
|
||||
"capabilities": ["read"],
|
||||
"description": "Application configuration secrets",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def parse_secrets(secrets_str):
|
||||
"""Parse comma-separated secret types into list."""
|
||||
secrets = [s.strip() for s in secrets_str.split(",") if s.strip()]
|
||||
valid = []
|
||||
unknown = []
|
||||
for s in secrets:
|
||||
if s in SECRET_TYPE_MAP:
|
||||
valid.append(s)
|
||||
else:
|
||||
unknown.append(s)
|
||||
return valid, unknown
|
||||
|
||||
|
||||
def generate_policy_hcl(app_name, secrets, environment="production"):
|
||||
"""Generate HCL policy document."""
|
||||
lines = [
|
||||
f'# Vault policy for {app_name}',
|
||||
f'# Generated: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}',
|
||||
f'# Environment: {environment}',
|
||||
'',
|
||||
]
|
||||
|
||||
for secret_type in secrets:
|
||||
tmpl = SECRET_TYPE_MAP[secret_type]
|
||||
path = tmpl["path"].format(app=app_name, env=environment)
|
||||
caps = ", ".join(f'"{c}"' for c in tmpl["capabilities"])
|
||||
|
||||
lines.append(f'# {tmpl["description"]}')
|
||||
lines.append(f'path "{path}" {{')
|
||||
lines.append(f' capabilities = [{caps}]')
|
||||
lines.append('}')
|
||||
lines.append('')
|
||||
|
||||
# Always deny sys paths
|
||||
lines.append('# Deny admin paths')
|
||||
lines.append('path "sys/*" {')
|
||||
lines.append(' capabilities = ["deny"]')
|
||||
lines.append('}')
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def generate_auth_config(app_name, auth_method, policy_name, namespace=None):
|
||||
"""Generate auth method setup commands."""
|
||||
commands = []
|
||||
defaults = AUTH_METHOD_DEFAULTS.get(auth_method, {})
|
||||
|
||||
if auth_method == "approle":
|
||||
cmd = (
|
||||
f"vault write auth/approle/role/{app_name} \\\n"
|
||||
f" token_ttl={defaults['token_ttl']} \\\n"
|
||||
f" token_max_ttl={defaults['token_max_ttl']} \\\n"
|
||||
f" secret_id_num_uses={defaults['secret_id_num_uses']} \\\n"
|
||||
f" secret_id_ttl={defaults['secret_id_ttl']} \\\n"
|
||||
f" token_policies=\"{policy_name}\""
|
||||
)
|
||||
commands.append({"description": f"Create AppRole for {app_name}", "command": cmd})
|
||||
|
||||
commands.append({
|
||||
"description": "Fetch RoleID",
|
||||
"command": f"vault read auth/approle/role/{app_name}/role-id",
|
||||
})
|
||||
commands.append({
|
||||
"description": "Generate SecretID (single-use)",
|
||||
"command": f"vault write -f auth/approle/role/{app_name}/secret-id",
|
||||
})
|
||||
|
||||
elif auth_method == "kubernetes":
|
||||
ns = namespace or "default"
|
||||
cmd = (
|
||||
f"vault write auth/kubernetes/role/{app_name} \\\n"
|
||||
f" bound_service_account_names={app_name} \\\n"
|
||||
f" bound_service_account_namespaces={ns} \\\n"
|
||||
f" policies={policy_name} \\\n"
|
||||
f" ttl={defaults['token_ttl']}"
|
||||
)
|
||||
commands.append({"description": f"Create Kubernetes auth role for {app_name}", "command": cmd})
|
||||
|
||||
elif auth_method == "oidc":
|
||||
cmd = (
|
||||
f"vault write auth/oidc/role/{app_name} \\\n"
|
||||
f" bound_audiences=\"vault\" \\\n"
|
||||
f" allowed_redirect_uris=\"https://vault.example.com/ui/vault/auth/oidc/oidc/callback\" \\\n"
|
||||
f" user_claim=\"email\" \\\n"
|
||||
f" oidc_scopes=\"openid,profile,email\" \\\n"
|
||||
f" policies=\"{policy_name}\" \\\n"
|
||||
f" ttl={defaults['token_ttl']}"
|
||||
)
|
||||
commands.append({"description": f"Create OIDC role for {app_name}", "command": cmd})
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def build_output(app_name, auth_method, secrets, environment, namespace):
|
||||
"""Build complete configuration output."""
|
||||
valid_secrets, unknown_secrets = parse_secrets(secrets)
|
||||
|
||||
if not valid_secrets:
|
||||
return {
|
||||
"error": "No valid secret types provided",
|
||||
"unknown": unknown_secrets,
|
||||
"available_types": list(SECRET_TYPE_MAP.keys()),
|
||||
}
|
||||
|
||||
policy_name = f"{app_name}-policy"
|
||||
policy_hcl = generate_policy_hcl(app_name, valid_secrets, environment)
|
||||
auth_commands = generate_auth_config(app_name, auth_method, policy_name, namespace)
|
||||
|
||||
secret_details = []
|
||||
for s in valid_secrets:
|
||||
tmpl = SECRET_TYPE_MAP[s]
|
||||
secret_details.append({
|
||||
"type": s,
|
||||
"engine": tmpl["engine"],
|
||||
"path": tmpl["path"].format(app=app_name, env=environment),
|
||||
"capabilities": tmpl["capabilities"],
|
||||
"description": tmpl["description"],
|
||||
})
|
||||
|
||||
result = {
|
||||
"app_name": app_name,
|
||||
"auth_method": auth_method,
|
||||
"environment": environment,
|
||||
"policy_name": policy_name,
|
||||
"policy_hcl": policy_hcl,
|
||||
"auth_commands": auth_commands,
|
||||
"secrets": secret_details,
|
||||
"generated_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
if unknown_secrets:
|
||||
result["warnings"] = [f"Unknown secret type '{u}' — skipped. Available: {list(SECRET_TYPE_MAP.keys())}" for u in unknown_secrets]
|
||||
if namespace:
|
||||
result["namespace"] = namespace
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def print_human(result):
|
||||
"""Print human-readable output."""
|
||||
if "error" in result:
|
||||
print(f"ERROR: {result['error']}")
|
||||
if result.get("unknown"):
|
||||
print(f" Unknown types: {', '.join(result['unknown'])}")
|
||||
print(f" Available types: {', '.join(result['available_types'])}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"=== Vault Configuration for {result['app_name']} ===")
|
||||
print(f"Auth Method: {result['auth_method']}")
|
||||
print(f"Environment: {result['environment']}")
|
||||
print(f"Policy Name: {result['policy_name']}")
|
||||
print()
|
||||
|
||||
if result.get("warnings"):
|
||||
for w in result["warnings"]:
|
||||
print(f"WARNING: {w}")
|
||||
print()
|
||||
|
||||
print("--- Policy HCL ---")
|
||||
print(result["policy_hcl"])
|
||||
print()
|
||||
|
||||
print(f"Write policy: vault policy write {result['policy_name']} {result['policy_name']}.hcl")
|
||||
print()
|
||||
|
||||
print("--- Auth Method Setup ---")
|
||||
for cmd_info in result["auth_commands"]:
|
||||
print(f"# {cmd_info['description']}")
|
||||
print(cmd_info["command"])
|
||||
print()
|
||||
|
||||
print("--- Secret Paths ---")
|
||||
for s in result["secrets"]:
|
||||
caps = ", ".join(s["capabilities"])
|
||||
print(f" {s['type']:15s} {s['path']:50s} [{caps}]")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate Vault policy and auth configuration from application requirements.",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=textwrap.dedent("""\
|
||||
Secret types:
|
||||
db-creds Dynamic database credentials (read-only)
|
||||
db-admin Dynamic database credentials (read-write)
|
||||
api-key Static API keys in KV v2
|
||||
tls-cert TLS certificate issuance via PKI
|
||||
encryption Transit encryption-as-a-service
|
||||
ssh-cert SSH certificate signing
|
||||
config Application configuration secrets
|
||||
|
||||
Examples:
|
||||
%(prog)s --app-name payment-svc --auth-method approle --secrets "db-creds,api-key"
|
||||
%(prog)s --app-name api-gw --auth-method kubernetes --secrets "db-creds,config" --namespace prod --json
|
||||
"""),
|
||||
)
|
||||
parser.add_argument("--app-name", required=True, help="Application or service name")
|
||||
parser.add_argument(
|
||||
"--auth-method",
|
||||
required=True,
|
||||
choices=["approle", "kubernetes", "oidc"],
|
||||
help="Vault auth method to configure",
|
||||
)
|
||||
parser.add_argument("--secrets", required=True, help="Comma-separated secret types (e.g., db-creds,api-key,tls-cert)")
|
||||
parser.add_argument("--environment", default="production", help="Target environment (default: production)")
|
||||
parser.add_argument("--namespace", help="Kubernetes namespace (for kubernetes auth method)")
|
||||
parser.add_argument("--json", action="store_true", dest="json_output", help="Output as JSON")
|
||||
|
||||
args = parser.parse_args()
|
||||
result = build_output(args.app_name, args.auth_method, args.secrets, args.environment, args.namespace)
|
||||
|
||||
if args.json_output:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print_human(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user