Wie erstelle ich meine erste KI auf Windows für SEO- & Performance-Analyse
Möchtest Du eine eigene KI-basierte Lösung entwickeln, die automatisch Webseiten ausliest, SEO- und Performance-Daten sammelt und daraus konkrete Handlungsempfehlungen generiert? In diesem praxisorientierten Tutorial zeigen wir Dir Schritt für Schritt, wie Du unter Windows Deine erste SEO- und Performance-KI mit Python erstellst — ganz ohne komplizierte Setups.
So funktioniert Deine SEO- & Performance-KI
Unsere Lösung nutzt die PageSpeed Insights API, um relevante Messwerte wie Performance-Score, SEO-Score und Core Web Vitals (LCP, INP, CLS) auszulesen. Die KI wertet diese Daten aus, identifiziert Optimierungspotenziale und erstellt eine priorisierte To-Do-Liste mit umsetzbaren Vorschlägen für bessere Ladezeiten und höhere Suchmaschinen-Performance.
Voraussetzungen
Python installiert
hier zur Anleitung unter Windows
hier zur Anleitung unter Ubuntu
Google API-Key für PageSpeed Insights
https://console.cloud.google.com/ → Projekt anlegen
Alle API Anzeigen
PageSpeed Insights API aktivieren
„Anmeldedaten erstellen“ → API-Schlüssel erstellen
Projekt anlegen
Wechsel in ein Verzeichnis Deiner Wahl und führe diese Befehle nacheinander in der PowerShell von Windows aus
mkdir webpages-insights-ki
cd webpages-insights-ki
python -m venv .venv
. .\.venv\Scripts\Activate.ps1
cd .venv/Scripts
python.exe -m pip install --upgrade pip
cd ../../
pip install --upgrade pip
pip install requests rich pydantic streamlit
Scripte
Es müssen zwei Scripte erstellt werden:
Alle Scripte bitte in das webpages-insights-ki Verzeichnis legen
Für die Logik: insights_ki.py
Für den Export: run_report.py
insights_ki.py
# insights_ki.py
from __future__ import annotations
import requests
from typing import Dict, Any, List
from dataclasses import dataclass
from math import isfinite
PSI_ENDPOINT = "https://www.googleapis.com/pagespeedonline/v5/runPagespeed"
@dataclass
class ActionItem:
title: str
rationale: str
impact_ms: float # geschätzte Einsparung (ms) aus PSI
effort: str # "low", "medium", "high"
category: str # z.B. "Performance", "SEO", "Best Practices"
tips: List[str]
def _call_psi(url: str, api_key: str, strategy: str = "mobile") -> Dict[str, Any]:
params = {
"url": url,
"key": api_key,
"strategy": strategy,
# du kannst weitere Kategorien anfordern: category=performance,seo,accessibility etc.
}
r = requests.get(PSI_ENDPOINT, params=params, timeout=60)
r.raise_for_status()
return r.json()
def _ms(val: Any) -> float:
try:
v = float(val)
return v if isfinite(v) else 0.0
except Exception:
return 0.0
def extract_summary(psi_json: Dict[str, Any]) -> Dict[str, Any]:
lh = psi_json.get("lighthouseResult", {})
cats = lh.get("categories", {})
audits = lh.get("audits", {})
metrics = audits.get("metrics", {}).get("details", {}).get("items", [{}])[0] if audits.get("metrics", {}) else {}
core = {
"LCP_ms": _ms(audits.get("largest-contentful-paint", {}).get("numericValue")),
"CLS": audits.get("cumulative-layout-shift", {}).get("numericValue"),
"INP_ms": _ms(audits.get("interactive", {}).get("numericValue")), # Fallback, echte INP: "experimental-interaction-to-next-paint"
}
# Versuch echten INP zu greifen, falls vorhanden:
if "experimental-interaction-to-next-paint" in audits:
core["INP_ms"] = _ms(audits["experimental-interaction-to-next-paint"].get("numericValue"))
scores = {
"performance": cats.get("performance", {}).get("score"),
"seo": cats.get("seo", {}).get("score"),
"accessibility": cats.get("accessibility", {}).get("score"),
"best-practices": cats.get("best-practices", {}).get("score"),
}
# Opportunities & Diagnostics
opps = []
diags = []
for k, a in audits.items():
details = a.get("details", {})
if a.get("scoreDisplayMode") == "manual":
continue
if a.get("details", {}).get("type") == "opportunity":
opps.append({
"id": k,
"title": a.get("title"),
"description": a.get("description"),
"overallSavingsMs": _ms(details.get("overallSavingsMs")),
})
elif a.get("details", {}).get("type") in ("debugdata", "table"):
# Diagnostics & Hinweise
if a.get("score") is not None and a.get("score") < 1:
diags.append({
"id": k,
"title": a.get("title"),
"description": a.get("description"),
})
return {
"core_web_vitals": core,
"scores": scores,
"opportunities": sorted(opps, key=lambda x: x["overallSavingsMs"], reverse=True),
"diagnostics": diags,
}
def _effort_from_audit_id(audit_id: str) -> str:
# heuristisch – gern für Dein Setup feiner abstimmen
low = {"uses-text-compression","uses-optimized-images","uses-responsive-images","unused-css-rules",
"render-blocking-resources","font-display","uses-rel-preload","uses-rel-preconnect",
"image-aspect-ratio","offscreen-images"}
high = {"server-response-time","efficient-animated-content","uses-long-cache-ttl"}
if audit_id in low: return "low"
if audit_id in high: return "high"
return "medium"
def _category_from_audit_id(audit_id: str) -> str:
perf = {"server-response-time","render-blocking-resources","unused-css-rules","unminified-css",
"unminified-javascript","uses-text-compression","uses-long-cache-ttl","uses-rel-preload",
"uses-rel-preconnect","largest-contentful-paint-element","third-party-facades",
"efficient-animated-content","uses-optimized-images","uses-responsive-images"}
seo = {"meta-description","hreflang","link-text","crawlable-anchors","robots-txt"}
if audit_id in seo: return "SEO"
if audit_id in perf: return "Performance"
return "Best Practices"
def _tips_for(audit_id: str) -> List[str]:
mapping = {
"server-response-time": [
"PHP 8.2/8.3, OPcache aktivieren",
"Datenbank-Indizes & Query-Cache prüfen",
"CDN und Edge-Caching erwägen (HTML micro-cache)"
],
"render-blocking-resources": [
"CSS critical path extrahieren, Rest `media=print` + `onload`",
"JS `defer` setzen; Inline-kritische Styles < 10 KB"
],
"uses-text-compression": [
"Gzip/Brotli für HTML, CSS, JS aktivieren",
"Brotli-Level 5–7 als Startwert"
],
"uses-long-cache-ttl": [
"Cache-Header für statische Assets 6–12 Monate",
"Dateiversionierung/Hashes einsetzen"
],
"uses-optimized-images": [
"Bilder in AVIF/WebP bereitstellen",
"Exakte Größen, keine zu großen Assets"
],
"uses-responsive-images": [
"`srcset`/`sizes` korrekt pflegen",
"LCP-Bild als `fetchpriority=high` markieren"
],
"font-display": [
"`font-display: swap` setzen",
"WOFF2 nutzen, Subset-Fonts prüfen"
],
"largest-contentful-paint-element": [
"LCP-Element identifizieren (Hero-Bild/Headline)",
"Preload LCP-Ressourcen; Inline-kritische CSS"
],
"third-party-facades": [
"Consent-Mode/CMP korrekt konfigurieren",
"Facades/Delay für YouTube, Maps, Social Widgets"
],
"efficient-animated-content": [
"Schwere GIFs vermeiden, auf MP4/WebM umstellen",
"CSS-Animations sparsam einsetzen"
],
}
return mapping.get(audit_id, ["Audit in Lighthouse öffnen und konkrete Hinweise übernehmen."])
def propose_actions(summary: Dict[str, Any]) -> List[ActionItem]:
items: List[ActionItem] = []
for o in summary["opportunities"]:
audit_id = o["id"]
items.append(ActionItem(
title=o["title"],
rationale=o["description"] or "",
impact_ms=o["overallSavingsMs"],
effort=_effort_from_audit_id(audit_id),
category=_category_from_audit_id(audit_id),
tips=_tips_for(audit_id)
))
# Zusätzliche CWV-gesteuerte Vorschläge
cwv = summary["core_web_vitals"]
if cwv.get("LCP_ms", 0) > 2500:
items.append(ActionItem(
title="LCP unter 2.5 s bringen",
rationale="Großes LCP-Element bremst. Priorisiere Ladepfad & Ressourcen.",
impact_ms=500,
effort="medium",
category="Performance",
tips=[
"LCP-Bild identifizieren (DevTools → Performance → LCP)",
"Bild in AVIF/WebP + `preload` + `fetchpriority=high`",
"Critical-CSS inlined, Rest async/defer"
]
))
if (cwv.get("INP_ms") or 0) > 200:
items.append(ActionItem(
title="Interaktivität (INP) verbessern",
rationale="Langsame Eingabereaktionen nach User-Events.",
impact_ms=200,
effort="medium",
category="Performance",
tips=[
"Event-Handler splitten, lange Tasks < 50 ms",
"JS-Bündel verkleinern; Hydration verzögern (islands)"
]
))
if (cwv.get("CLS") or 0) > 0.1:
items.append(ActionItem(
title="Layout-Verschiebungen (CLS) reduzieren",
rationale="Instabiles Layout frustriert Nutzer:innen.",
impact_ms=100,
effort="low",
category="Best Practices",
tips=[
"Immer feste Breite/Höhe für Bilder/Embeds",
"`font-display: swap` + FOUT tolerieren",
"Ads/Embeds Platzhalter reservieren"
]
))
# Priorisierung: erst Impact, dann geringerer Effort
effort_order = {"low": 0, "medium": 1, "high": 2}
items.sort(key=lambda x: (-x.impact_ms, effort_order.get(x.effort, 3)))
return items
def format_ms(ms: float) -> str:
return f"{int(ms)} ms" if ms >= 1 else f"{ms:.1f} ms"
def analyze_url(url: str, api_key: str) -> Dict[str, Any]:
mobile = _call_psi(url, api_key, "mobile")
desktop = _call_psi(url, api_key, "desktop")
m = extract_summary(mobile)
d = extract_summary(desktop)
actions = propose_actions(m) # du kannst mobile/desktop auch mergen
return {
"url": url,
"mobile": m,
"desktop": d,
"actions": actions
}
def to_markdown(report: Dict[str, Any]) -> str:
def score_pct(s):
return f"{int(round((s or 0)*100))}%" if s is not None else "–"
m, d = report["mobile"], report["desktop"]
lines = []
lines.append(f"# Insights-Report für {report['url']}\n")
lines.append("## Core Web Vitals (Mobile)")
c = m["core_web_vitals"]
lines.append(f"- **LCP:** {format_ms(c.get('LCP_ms',0))}")
lines.append(f"- **INP:** {format_ms(c.get('INP_ms',0))}")
lines.append(f"- **CLS:** {c.get('CLS','–')}\n")
lines.append("## Scores")
lines.append(f"- Performance (M/D): {score_pct(m['scores']['performance'])} / {score_pct(d['scores']['performance'])}")
lines.append(f"- SEO (M/D): {score_pct(m['scores']['seo'])} / {score_pct(d['scores']['seo'])}")
lines.append(f"- Accessibility (M/D): {score_pct(m['scores']['accessibility'])} / {score_pct(d['scores']['accessibility'])}")
lines.append(f"- Best Practices (M/D): {score_pct(m['scores']['best-practices'])} / {score_pct(d['scores']['best-practices'])}\n")
lines.append("## Top-Chancen (Mobile, nach Einsparung)")
for o in m["opportunities"][:8]:
lines.append(f"- **{o['title']}** – Einsparung: {format_ms(o['overallSavingsMs'])}")
lines.append("\n## Handlungsvorschläge (Priorisiert)")
for i, a in enumerate(report["actions"], 1):
lines.append(f"{i}. **{a.title}** \n Kategorie: {a.category} • Aufwand: {a.effort} • Impact: {format_ms(a.impact_ms)}")
lines.append(f" – {a.rationale}")
for t in a.tips:
lines.append(f" - {t}")
return "\n".join(lines)
run_report.py
# run_report.py
import sys, json, argparse, pathlib
from insights_ki import analyze_url, to_markdown
def main():
p = argparse.ArgumentParser(description="Website Insights KI – PSI Auswertung")
p.add_argument("--url", required=True, help="Zu prüfende URL (mit )")
p.add_argument("--key", required=True, help="Google PSI API Key")
p.add_argument("--out", default="report.md", help="Ausgabedatei (Markdown)")
args = p.parse_args()
report = analyze_url(args.url, args.key)
md = to_markdown(report)
out = pathlib.Path(args.out)
out.write_text(md, encoding="utf-8")
# zusätzlich kompakte JSON-Rohdaten speichern
pathlib.Path("report.json").write_text(
json.dumps(report, default=lambda o: o.__dict__, ensure_ascii=False, indent=2),
encoding="utf-8"
)
print(f"✅ Fertig. Markdown: {out} • JSON: report.json")
if __name__ == "__main__":
main()
Script Ausführen
python .\run_report.py --url www.deineseite.de --key DEIN_API_KEY --out report.md