The workflow JSON
Copy or download the full n8n JSON below. Paste it into a new n8n workflow, add your credentials, activate. Full import guide →
{
"name": "01 - Text Summarizer (Deterministic LLM)",
"nodes": [
{
"parameters": {
"httpMethod": "POST",
"path": "summarize",
"responseMode": "lastNode",
"options": {}
},
"id": "Webhook-1",
"name": "Webhook",
"type": "n8n-nodes-base.webhook",
"typeVersion": 2,
"position": [
260,
300
]
},
{
"parameters": {
"functionCode": "// Build request for an OpenAI-compatible LLM (LM Studio, OpenRouter, etc.).\n// Webhook sends the parsed request body as $json or $json.body.\nconst root = $json ?? {};\nconst body = (root.body && typeof root.body === 'object') ? root.body : root;\n\nconst textRaw = body.text ?? '';\nconst text = (textRaw ?? '').toString().trim();\nif (!text) {\n throw new Error('Missing body.text');\n}\n\n// Optional fields\nconst title = body.title ?? null;\nconst source = body.source ?? null;\nconst language = body.language ?? null;\n\nconst model = body.model || $env.OPENAI_MODEL || 'openai/gpt-oss-20b';\nconst temperature = body.temperature ?? 0.3;\nconst max_tokens = body.max_tokens ?? 600;\n\nconst SYSTEM_SUMMARY_PROMPT =\n 'You are a concise assistant. Summarize the input text in 3\u20135 sentences ' +\n 'and then provide 3 short actionable bullet points. ' +\n 'If a language is specified, respond in that language. ' +\n 'Reply as plain text without special tokens (e.g. <|...|>) or meta markers.';\n\nconst system = body.system || SYSTEM_SUMMARY_PROMPT;\n\n// Base models like openai/gpt-oss-20b are not chat-tuned.\n// Prefer /completions with a prompt for stable text output.\nconst useCompletions = body.useCompletions ?? /openai\\/gpt-oss-20b/i.test(model);\n\nconst apiBase = $env.OPENAI_API_BASE || 'http://host.docker.internal:1234/v1';\nconst apiUrl = apiBase + (useCompletions ? '/completions' : '/chat/completions');\n\nconst headers = {\n Authorization: 'Bearer ' + ($env.OPENAI_API_KEY || 'lm-studio'),\n 'Content-Type': 'application/json',\n};\n\nlet payload;\nif (useCompletions) {\n const promptParts = [system];\n if (title) promptParts.push(`Title: ${title}`);\n if (source) promptParts.push(`Source: ${source}`);\n if (language) promptParts.push(`Language: ${language}`);\n promptParts.push('---', text);\n const prompt = promptParts.join('\\n\\n');\n\n payload = { model, prompt, temperature, max_tokens };\n} else {\n const messages = [{ role: 'system', content: system }];\n const metaLines = [];\n if (title) metaLines.push(`Title: ${title}`);\n if (source) metaLines.push(`Source: ${source}`);\n if (language) metaLines.push(`Language: ${language}`);\n if (metaLines.length > 0) {\n messages.push({ role: 'system', content: metaLines.join('\\n') });\n }\n messages.push({ role: 'user', content: text });\n\n payload = { model, messages, temperature, max_tokens };\n}\n\nreturn [{ json: { apiUrl, headers, payload } }];"
},
"id": "Function-1",
"name": "Build Body",
"type": "n8n-nodes-base.function",
"typeVersion": 1,
"position": [
520,
300
]
},
{
"parameters": {
"method": "POST",
"url": "={{$json.apiUrl}}",
"sendHeaders": true,
"specifyHeaders": "json",
"jsonHeaders": "={{JSON.stringify($json.headers)}}",
"sendBody": true,
"contentType": "json",
"specifyBody": "json",
"jsonBody": "={{JSON.stringify($json.payload)}}",
"options": {}
},
"id": "HTTPRequest-1",
"name": "LM Studio Chat",
"type": "n8n-nodes-base.httpRequest",
"typeVersion": 4,
"position": [
790,
300
]
},
{
"parameters": {
"functionCode": "// Normalise and clean the raw LLM response into summary + token usage.\nconst choice = $json.choices?.[0] ?? {};\nconst raw = choice.message?.content ?? choice.text ?? '';\n\nlet cleaned = raw.replace(/<\\|[^>]+?\\|>/g, '').trim();\n\n// Some models emit both analysis/final sections in one string.\nconst lower = cleaned.toLowerCase();\nconst markerIndex = lower.indexOf('assistantfinal');\nif (markerIndex !== -1) {\n cleaned = cleaned.slice(markerIndex + 'assistantfinal'.length).trim();\n}\n\ncleaned = cleaned.replace(/^analysis\\s*/i, '').trim();\n\nconst usage = $json.usage ?? {};\n\nreturn [\n {\n json: {\n summary: cleaned,\n raw_choice: raw,\n prompt_tokens: usage.prompt_tokens ?? 0,\n completion_tokens: usage.completion_tokens ?? 0,\n total_tokens: usage.total_tokens ?? 0,\n },\n },\n];"
},
"id": "Function-CleanOutput",
"name": "Clean LLM Output",
"type": "n8n-nodes-base.function",
"typeVersion": 1,
"position": [
1030,
300
]
},
{
"parameters": {
"keepOnlySet": true,
"values": {
"string": [
{
"name": "summary",
"value": "={{$json[\"summary\"]}}"
},
{
"name": "raw_choice",
"value": "={{$json[\"raw_choice\"]}}"
},
{
"name": "model",
"value": "={{$node[\"Build Body\"].json[\"payload\"][\"model\"]}}"
},
{
"name": "endpoint",
"value": "={{$node[\"Build Body\"].json.apiUrl}}"
}
],
"number": [
{
"name": "prompt_tokens",
"value": "={{$json[\"prompt_tokens\"]}}"
},
{
"name": "completion_tokens",
"value": "={{$json[\"completion_tokens\"]}}"
},
{
"name": "total_tokens",
"value": "={{$json[\"total_tokens\"]}}"
}
]
},
"options": {}
},
"id": "Set-1",
"name": "Format Response",
"type": "n8n-nodes-base.set",
"typeVersion": 2,
"position": [
1290,
300
]
}
],
"connections": {
"Webhook": {
"main": [
[
{
"node": "Build Body",
"type": "main",
"index": 0
}
]
]
},
"Build Body": {
"main": [
[
{
"node": "LM Studio Chat",
"type": "main",
"index": 0
}
]
]
},
"LM Studio Chat": {
"main": [
[
{
"node": "Clean LLM Output",
"type": "main",
"index": 0
}
]
]
},
"Clean LLM Output": {
"main": [
[
{
"node": "Format Response",
"type": "main",
"index": 0
}
]
]
}
},
"active": false,
"settings": {},
"staticData": null
}
About this workflow
01 - Text Summarizer (Deterministic LLM). Uses httpRequest. Webhook trigger; 5 nodes.
Source: https://github.com/slayerlux/n8n-llm-workflows/blob/main/workflows/01-deterministic-text-summarizer.json — original creator credit. Request a take-down →