Public REST API exposing Wyltek's AI research findings. Zero auth for consumers β designed for agent-to-agent access.
{
"status": "ok",
"service": "wyltek-research-api",
"version": "2.0.0",
"source": "github:toastmanAu/kernel-workspace/research/findings",
"endpoints": [...]
}
| Parameter | Type | Description |
|---|---|---|
| tagoptional | string | Filter by tag (case-insensitive). All results include title, date, tags and summary. |
| qoptional | string | Full-text search across id, title, tags and summary fields. |
| limitoptional | integer | Maximum number of results to return. Default: all. |
# All findings (includes title, tags, summary) curl https://api.wyltekindustries.com/api/research/findings # Filter by tag curl "https://api.wyltekindustries.com/api/research/findings?tag=fiber&limit=10" # Full-text search curl "https://api.wyltekindustries.com/api/research/findings?q=payment+channel&limit=5"
// All findings const res = await fetch('https://api.wyltekindustries.com/api/research/findings'); const { count, findings } = await res.json(); // With metadata + tag filter const res = await fetch( 'https://api.wyltekindustries.com/api/research/findings?tag=ckb&limit=20' ); const data = await res.json(); data.findings.forEach(f => console.log(f.title, f.tags));
import requests
BASE = "https://api.wyltekindustries.com"
# List with metadata
r = requests.get(f"{BASE}/api/research/findings", params={"tag": "fiber", "limit": 20})
for finding in r.json()["findings"]:
print(finding["title"], finding["tags"])
{
"count": 188,
"findings": [
{
"id": "fiber-payment-channels",
"filename": "fiber-payment-channels.md",
"url": "https://raw.githubusercontent.com/...",
"size": 14230,
"title": "Fiber Network Payment Channels",
"date": "2026-03-18",
"tags": ["fiber", "ckb", "payment-channels"],
"summary": "Overview of Fiber Network's payment channel architecture..."
}
]
}
| Parameter | Type | Description |
|---|---|---|
| id | string | Finding ID (filename without .md extension). |
| Query Parameter | Type | Description |
|---|---|---|
| formatoptional | markdown | Return raw markdown instead of JSON. Content-Type: text/markdown. |
# JSON (metadata + content) curl https://api.wyltekindustries.com/api/research/findings/fiber-payment-channels # Raw markdown curl "https://api.wyltekindustries.com/api/research/findings/fiber-payment-channels?format=markdown"
// JSON response const res = await fetch( 'https://api.wyltekindustries.com/api/research/findings/fiber-payment-channels' ); const { title, date, tags, content } = await res.json(); // Raw markdown (e.g. to pass directly to an LLM) const md = await fetch( 'https://api.wyltekindustries.com/api/research/findings/fiber-payment-channels?format=markdown' ).then(r => r.text());
import requests BASE = "https://api.wyltekindustries.com" finding_id = "fiber-payment-channels" # JSON data = requests.get(f"{BASE}/api/research/findings/{finding_id}").json() print(data["title"], data["tags"]) # Raw markdown (feed directly to LLM context) md = requests.get( f"{BASE}/api/research/findings/{finding_id}", params={"format": "markdown"} ).text
{
"id": "fiber-payment-channels",
"title": "Fiber Network Payment Channels",
"date": "2026-03-18",
"tags": ["fiber", "ckb", "payment-channels"],
"summary": "Overview of Fiber Network's payment channel architecture...",
"content": "# Research: Fiber Network Payment Channels\n\n..."
}
{ "error": "Not found", "id": "bad-id" }
{
"count": 47,
"tags": [
{ "tag": "ckb", "count": 63 },
{ "tag": "fiber", "count": 41 },
{ "tag": "esp32", "count": 28 },
...
]
}
This API is designed for agent-to-agent consumption. No API keys, no sessions, no webhooks β just plain HTTP GET. Agents can pull research context on demand and inject it into their working memory or LLM context window.
Before starting a task, query for relevant findings and inject them as context. Useful for OpenClaw agents, LangChain tools, or any agent framework.
fiber before working on payment channel tasksckb findings to inform transaction-building decisions?format=markdown for full content# In an OpenClaw skill or heartbeat task BASE = "https://api.wyltekindustries.com" async def get_research_context(tag, limit=5): r = requests.get(f"{BASE}/api/research/findings", params={"tag": tag, "limit": limit}) findings = r.json()["findings"] # Build context block for LLM context = "\n\n".join( f"## {f['title']}\n{f.get('summary', '')}" for f in findings ) return context
import requests
def fetch_research_context(tag: str, limit: int = 5) -> str:
"""Pull relevant research and format for LLM injection."""
BASE = "https://api.wyltekindustries.com"
r = requests.get(
f"{BASE}/api/research/findings",
params={"tag": tag, "limit": str(limit)}
)
findings = r.json().get("findings", [])
return "\n\n---\n\n".join(
f"**{f['title']}** ({f.get('date','?')})\n{f.get('summary','')}"
for f in findings
)
# Example: inject into LLM prompt
context = fetch_research_context("fiber", limit=3)
prompt = f"Use this research context:\n\n{context}\n\nNow answer: ..."
List findings first (lightweight), then fetch full content only for the most relevant ones. Minimises token burn on context injection.
?format=markdown for the top N hits onlyimport requests, re
BASE = "https://api.wyltekindustries.com"
def smart_fetch(query_keywords: list[str], top_n=3) -> list[str]:
# 1. Get cheap listing
ids = [f["id"] for f in
requests.get(f"{BASE}/api/research/findings").json()["findings"]]
# 2. Score by keyword match in ID string
scored = sorted(
ids,
key=lambda id: sum(kw.lower() in id for kw in query_keywords),
reverse=True
)
# 3. Fetch full markdown for top hits only
results = []
for finding_id in scored[:top_n]:
md = requests.get(
f"{BASE}/api/research/findings/{finding_id}",
params={"format": "markdown"}
).text
results.append(md)
return results
Use the tags endpoint at startup to discover what topics are covered before planning research tasks. Avoids duplicating work that's already been done.
import requests BASE = "https://api.wyltekindustries.com" # Fetch tag coverage once, cache locally tags_data = requests.get(f"{BASE}/api/research/tags").json() covered_tags = {t["tag"] for t in tags_data["tags"]} def already_researched(topic: str) -> bool: keywords = topic.lower().split() return any(kw in covered_tags for kw in keywords) # Before queuing a new research task: if not already_researched("fiber htlc"): queue_research_task("fiber htlc routing")
?tag=X&limit=5 for tagged summaries β inject top 2β3 into LLM context. On demand: fetch individual findings with ?format=markdown when full depth is needed. This keeps token cost minimal while giving your agent strong domain context.