AutomationFlowsAI & RAG › Voice Agent Cross-Session Memory (StudioMeyer)

Voice Agent Cross-Session Memory (StudioMeyer)

Voice Agent Cross-Session Memory (StudioMeyer). Uses stickyNote, n8n-nodes-studiomeyer-memory, openAi, anthropic. Webhook trigger; 31 nodes.

Webhook trigger★★★★★ complexityAI-powered31 nodesN8N Nodes Studiomeyer MemoryOpen AiAnthropic
AI & RAG Trigger: Webhook Nodes: 31 Complexity: ★★★★★ AI nodes: yes

The workflow JSON

Copy or download the full n8n JSON below. Paste it into a new n8n workflow, add your credentials, activate. Full import guide →

Download .json
{
  "name": "Voice Agent Cross-Session Memory (StudioMeyer)",
  "nodes": [
    {
      "parameters": {
        "content": "## Voice Agent with Cross-Session Memory (Multi-Provider)\n\n**Stack:** Vapi/Retell webhook \u2192 StudioMeyer Memory \u2192 LLM (you choose: OpenAI, Anthropic, or any provider) \u2192 reply + persist outcome.\n\n**Multi-Provider Switch:** the `Set Provider` node decides which LLM responds. Default is `openai`. Change to `anthropic` (or add your own branch) without rebuilding the rest of the workflow.\n\n**Memory model:** Each unique caller phone becomes an `Entity` of type `caller`. Every call appends an observation. The next call retrieves the caller's history before the LLM speaks.",
        "height": 320,
        "width": 480,
        "color": 6
      },
      "id": "note-intro",
      "name": "Sticky Note - Intro",
      "type": "n8n-nodes-base.stickyNote",
      "typeVersion": 1,
      "position": [
        -260,
        -40
      ]
    },
    {
      "parameters": {
        "content": ">> SET ME <<\n\n**Webhook URL** for Vapi/Retell server URL.\n\nAfter activation, copy this node's Production URL into your voice provider's Server URL setting. Vapi sends `end-of-call-report` events here, Retell sends `call_ended`. The Code node below handles both shapes.",
        "height": 200,
        "width": 320,
        "color": 5
      },
      "id": "note-webhook",
      "name": "Sticky Note - Webhook",
      "type": "n8n-nodes-base.stickyNote",
      "typeVersion": 1,
      "position": [
        240,
        -40
      ]
    },
    {
      "parameters": {
        "httpMethod": "POST",
        "path": "vapi-callback",
        "responseMode": "responseNode",
        "options": {
          "rawBody": true
        }
      },
      "id": "voice-1-webhook",
      "name": "Vapi/Retell Webhook",
      "type": "n8n-nodes-base.webhook",
      "typeVersion": 2,
      "position": [
        240,
        280
      ]
    },
    {
      "parameters": {
        "jsCode": "// Normalize Vapi/Retell payload into a canonical shape\n// Vapi end-of-call-report: $json.message.call.customer.number + transcript\n// Retell call_ended:        $json.call.from_number + transcript\n// Adjust the two extractors below to match YOUR provider.\n\nconst body = $input.first().json;\n\n// --- Vapi shape (default) ---\nlet callerPhone = body?.message?.call?.customer?.number;\nlet transcript = body?.message?.transcript ?? body?.message?.artifact?.transcript;\nlet callId = body?.message?.call?.id;\n\n// --- Retell fallback ---\nif (!callerPhone) callerPhone = body?.call?.from_number;\nif (!transcript) transcript = body?.call?.transcript;\nif (!callId) callId = body?.call?.call_id;\n\nif (!callerPhone) {\n  // Unknown caller (no phone in payload). Still let the agent answer, but\n  // skip the memory lookup. The downstream IF branches on this flag.\n  return [{ json: { callerPhone: null, transcript, callId, hasPhone: false } }];\n}\n\n// E.164-ish normalization: strip spaces, parens, dashes; keep leading +\nconst normalized = String(callerPhone).replace(/[\\s\\(\\)\\-]/g, '');\n\nreturn [{\n  json: {\n    callerPhone: normalized,\n    transcript: transcript ?? '',\n    callId: callId ?? '',\n    hasPhone: true,\n    receivedAt: new Date().toISOString(),\n  },\n}];"
      },
      "id": "voice-2-normalize",
      "name": "Normalize Payload",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        880,
        280
      ]
    },
    {
      "parameters": {
        "resource": "entity",
        "operation": "search",
        "query": "={{ $json.callerPhone }}",
        "entityType": "caller",
        "limit": 1
      },
      "id": "voice-3-entity-search",
      "name": "Memory: Lookup Caller",
      "type": "n8n-nodes-studiomeyer-memory.studioMeyerMemory",
      "typeVersion": 1,
      "position": [
        1240,
        200
      ]
    },
    {
      "parameters": {
        "conditions": {
          "options": {
            "caseSensitive": true,
            "leftValue": "",
            "typeValidation": "strict",
            "version": 2
          },
          "conditions": [
            {
              "id": "cond-known-caller",
              "leftValue": "={{ ($json.entities ?? $json.results ?? []).length }}",
              "rightValue": 0,
              "operator": {
                "type": "number",
                "operation": "gt"
              }
            }
          ],
          "combinator": "and"
        },
        "options": {}
      },
      "id": "voice-4-if-known",
      "name": "Known Caller?",
      "type": "n8n-nodes-base.if",
      "typeVersion": 2.2,
      "position": [
        1440,
        200
      ]
    },
    {
      "parameters": {
        "resource": "memory",
        "operation": "search",
        "query": "=caller {{ $('Normalize Payload').item.json.callerPhone }}",
        "limit": 5,
        "project": "voice-agent",
        "recencyWeight": 0.5
      },
      "id": "voice-5a-memory-search",
      "name": "Memory: Recent Context",
      "type": "n8n-nodes-studiomeyer-memory.studioMeyerMemory",
      "typeVersion": 1,
      "position": [
        1660,
        100
      ]
    },
    {
      "parameters": {
        "resource": "entity",
        "operation": "create",
        "name": "={{ $('Normalize Payload').item.json.callerPhone }}",
        "entityType": "caller",
        "project": "voice-agent",
        "observations": "=First call on {{ $('Normalize Payload').item.json.receivedAt }}"
      },
      "id": "voice-5b-entity-create",
      "name": "Memory: Create Caller",
      "type": "n8n-nodes-studiomeyer-memory.studioMeyerMemory",
      "typeVersion": 1,
      "position": [
        1660,
        320
      ]
    },
    {
      "parameters": {
        "jsCode": "// Build a plain-text context block the LLM can read.\n// Both phone-known and anonymous (no phone) paths converge here.\n\nconst transcript = $('Normalize Payload').item.json.transcript || '(no transcript)';\nconst callerPhone = $('Normalize Payload').item.json.callerPhone || 'anonymous';\nconst hasPhone = $('Normalize Payload').item.json.hasPhone === true;\n\n// Pull recent memory results if the caller was known (phone path only)\nlet contextLines = [];\nif (hasPhone) {\n  try {\n    const memSearch = $('Memory: Recent Context').first()?.json;\n    const results = memSearch?.results ?? memSearch?.data?.results ?? [];\n    contextLines = results.slice(0, 5).map((r, i) => {\n      const text = r.content ?? r.text ?? r.formatted ?? JSON.stringify(r).slice(0, 200);\n      return `${i + 1}. ${text}`;\n    });\n  } catch (e) {\n    // First-time caller path, no prior memory to summarize\n    contextLines = ['(this is a new caller, no prior interactions on file)'];\n  }\n} else {\n  // Anonymous caller path: no memory lookup ran, no prior context to surface\n  contextLines = ['(caller is anonymous, no memory lookup performed)'];\n}\n\nconst contextBlock = contextLines.length\n  ? contextLines.join('\\n')\n  : '(no relevant prior context)';\n\nconst systemPrompt = hasPhone\n  ? `You are a friendly voice agent for a small business.\\n\\nCaller phone: ${callerPhone}\\n\\nWhat we know about this caller from prior calls:\\n${contextBlock}\\n\\nReply concisely (under 60 words) and reference past context when relevant.`\n  : `You are a friendly voice agent for a small business.\\n\\nThe caller is anonymous (no phone number was provided by the voice provider). Treat this as a first-time caller. If the conversation needs continuity, ask the caller to share an email or callback number so you can recognize them next time.\\n\\nReply concisely (under 60 words).`;\n\nreturn [{\n  json: {\n    callerPhone,\n    transcript,\n    contextBlock,\n    hasPhone,\n    systemPrompt,\n  },\n}];"
      },
      "id": "voice-6-build-prompt",
      "name": "Build LLM Prompt",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        1900,
        280
      ]
    },
    {
      "parameters": {
        "content": ">> PICK YOUR LLM <<\n\nThis Set node decides the provider. Default is `openai`. Change to `anthropic` (or any value matching a Switch branch below) without rebuilding the rest of the flow.\n\n**Add a third provider** (e.g. Gemini, Mistral) by adding a new branch in the Switch node + a corresponding LLM node + connecting it back to `Normalize LLM Output`. Memory writes stay identical.",
        "height": 240,
        "width": 360,
        "color": 5
      },
      "id": "note-provider",
      "name": "Sticky Note - Provider Switch",
      "type": "n8n-nodes-base.stickyNote",
      "typeVersion": 1,
      "position": [
        2120,
        -100
      ]
    },
    {
      "parameters": {
        "mode": "manual",
        "duplicateItem": false,
        "assignments": {
          "assignments": [
            {
              "id": "set-provider",
              "name": "provider",
              "value": "openai",
              "type": "string"
            }
          ]
        },
        "includeOtherFields": true,
        "options": {}
      },
      "id": "voice-7-set-provider",
      "name": "Set Provider",
      "type": "n8n-nodes-base.set",
      "typeVersion": 3.4,
      "position": [
        2120,
        280
      ]
    },
    {
      "parameters": {
        "rules": {
          "values": [
            {
              "conditions": {
                "options": {
                  "caseSensitive": true,
                  "leftValue": "",
                  "typeValidation": "strict",
                  "version": 2
                },
                "conditions": [
                  {
                    "id": "rule-openai",
                    "leftValue": "={{ $json.provider }}",
                    "rightValue": "openai",
                    "operator": {
                      "type": "string",
                      "operation": "equals"
                    }
                  }
                ],
                "combinator": "and"
              },
              "renameOutput": true,
              "outputKey": "openai"
            },
            {
              "conditions": {
                "options": {
                  "caseSensitive": true,
                  "leftValue": "",
                  "typeValidation": "strict",
                  "version": 2
                },
                "conditions": [
                  {
                    "id": "rule-anthropic",
                    "leftValue": "={{ $json.provider }}",
                    "rightValue": "anthropic",
                    "operator": {
                      "type": "string",
                      "operation": "equals"
                    }
                  }
                ],
                "combinator": "and"
              },
              "renameOutput": true,
              "outputKey": "anthropic"
            }
          ]
        },
        "options": {
          "fallbackOutput": "extra",
          "renameFallbackOutput": "fallback"
        }
      },
      "id": "voice-8-switch",
      "name": "Route by Provider",
      "type": "n8n-nodes-base.switch",
      "typeVersion": 3.2,
      "position": [
        2340,
        280
      ]
    },
    {
      "parameters": {
        "resource": "text",
        "operation": "message",
        "modelId": {
          "__rl": true,
          "value": "gpt-5-mini",
          "mode": "list",
          "cachedResultName": "gpt-5-mini"
        },
        "messages": {
          "values": [
            {
              "content": "={{ $json.systemPrompt }}",
              "role": "system"
            },
            {
              "content": "={{ $json.transcript }}",
              "role": "user"
            }
          ]
        },
        "jsonOutput": false,
        "options": {
          "maxTokens": 200,
          "temperature": 0.4
        }
      },
      "id": "voice-9a-openai",
      "name": "OpenAI Reply",
      "type": "n8n-nodes-base.openAi",
      "typeVersion": 1.7,
      "position": [
        2560,
        160
      ],
      "onError": "continueErrorOutput"
    },
    {
      "parameters": {
        "resource": "text",
        "operation": "message",
        "modelId": {
          "__rl": true,
          "value": "claude-haiku-4-5",
          "mode": "list",
          "cachedResultName": "claude-haiku-4-5"
        },
        "messages": {
          "values": [
            {
              "content": "={{ $json.systemPrompt }}",
              "role": "system"
            },
            {
              "content": "={{ $json.transcript }}",
              "role": "user"
            }
          ]
        },
        "options": {
          "maxTokens": 200,
          "temperature": 0.4
        }
      },
      "id": "voice-9b-anthropic",
      "name": "Anthropic Reply",
      "type": "@n8n/n8n-nodes-langchain.anthropic",
      "typeVersion": 1,
      "position": [
        2560,
        400
      ],
      "onError": "continueErrorOutput"
    },
    {
      "parameters": {
        "jsCode": "// Normalize the LLM output across providers into a single field `replyText`.\n// OpenAI:    $json.choices[0].message.content\n// Anthropic: $json.content[0].text\n// Fallback:  any string-shaped property we can find.\n\nconst raw = $input.first().json;\n\nlet replyText = '';\n\n// OpenAI shape\nif (raw?.choices?.[0]?.message?.content) {\n  replyText = raw.choices[0].message.content;\n}\n// Anthropic shape\nelse if (Array.isArray(raw?.content) && raw.content[0]?.text) {\n  replyText = raw.content[0].text;\n}\n// Some n8n-wrapped variants\nelse if (raw?.message?.content) {\n  replyText = raw.message.content;\n}\nelse if (raw?.text) {\n  replyText = raw.text;\n}\nelse if (raw?.reply) {\n  replyText = raw.reply;\n}\nelse {\n  replyText = '(LLM returned no text, check provider response shape)';\n}\n\nreturn [{\n  json: {\n    replyText: String(replyText).trim(),\n    provider: $('Set Provider').item.json.provider,\n  },\n}];"
      },
      "id": "voice-10-normalize-output",
      "name": "Normalize LLM Output",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        2780,
        280
      ]
    },
    {
      "parameters": {
        "respondWith": "json",
        "responseBody": "={{ JSON.stringify({ reply: $json.replyText, caller_phone: $('Normalize Payload').item.json.callerPhone, call_id: $('Normalize Payload').item.json.callId, provider: $json.provider }) }}",
        "options": {}
      },
      "id": "voice-11-respond",
      "name": "Respond to Voice Provider",
      "type": "n8n-nodes-base.respondToWebhook",
      "typeVersion": 1.1,
      "position": [
        3000,
        280
      ]
    },
    {
      "parameters": {
        "resource": "entity",
        "operation": "observe",
        "entityRef": "={{ $('Normalize Payload').item.json.callerPhone }}",
        "observations": "=Call on {{ $('Normalize Payload').item.json.receivedAt }}, Caller said: {{ $('Normalize Payload').item.json.transcript.slice(0, 200) }} | Agent replied ({{ $json.provider }}): {{ $json.replyText.slice(0, 200) }}"
      },
      "id": "voice-12-entity-observe",
      "name": "Memory: Observe Call",
      "type": "n8n-nodes-studiomeyer-memory.studioMeyerMemory",
      "typeVersion": 1,
      "position": [
        3220,
        180
      ]
    },
    {
      "parameters": {
        "resource": "memory",
        "operation": "learn",
        "content": "=Voice call with {{ $('Normalize Payload').item.json.callerPhone }} on {{ $('Normalize Payload').item.json.receivedAt }} (provider: {{ $json.provider }}): {{ $json.replyText }}",
        "category": "insight",
        "project": "voice-agent",
        "tags": "=voice-agent, caller-{{ $('Normalize Payload').item.json.callerPhone }}, provider-{{ $json.provider }}",
        "confidence": 0.7
      },
      "id": "voice-13-memory-learn",
      "name": "Memory: Learn Outcome",
      "type": "n8n-nodes-studiomeyer-memory.studioMeyerMemory",
      "typeVersion": 1,
      "position": [
        3220,
        340
      ]
    },
    {
      "parameters": {
        "content": "## Memory writes (after Respond, execution-order-dependent)\n\nn8n's `executionOrder: v1` runs each output branch depth-first - one branch finishes before the next begins. When a node has multiple outputs, branches are ordered by canvas position (top-to-bottom, then left-to-right). `Respond to Voice Provider` is positioned higher and to the left of the memory-write branch on the canvas, so it executes first and the voice provider receives the reply before the memory writes start.\n\n**This is execution-order-dependent, not a hard async guarantee.** If you reposition nodes the order can change. Verify in your n8n setup (Settings -> Execution Order) if call latency is critical. For a stricter contract use a separate Execute-Workflow trigger or a queue (Redis, BullMQ, n8n Queue Mode).\n\nIf you don't care about per-call observations, delete `Memory: Observe Call` and keep only `Memory: Learn Outcome`. Both share `Normalize LLM Output` so they use whichever provider answered.",
        "height": 280,
        "width": 380,
        "color": 7
      },
      "id": "note-async-writes",
      "name": "Sticky Note - Memory Writes",
      "type": "n8n-nodes-base.stickyNote",
      "typeVersion": 1,
      "position": [
        3220,
        -40
      ]
    },
    {
      "parameters": {
        "jsCode": "// Webhook HMAC verification (opt-in via VAPI_SIGNING_SECRET or RETELL_SIGNING_SECRET).\n// Vapi sends 'x-vapi-signature' header with HMAC-SHA256 of raw body, hex-encoded.\n// Retell sends 'x-retell-signature' (same shape).\n//\n// The Webhook node has rawBody: true so the byte-stream is preserved on\n// $json.rawBody as a string. We MUST compute HMAC against the raw bytes\n// the provider signed, NOT against a re-stringified JSON object (which\n// has different byte-order and whitespace).\n//\n// To enable: set VAPI_SIGNING_SECRET (or RETELL_SIGNING_SECRET) env var on\n// the n8n host AND configure the same secret in your voice provider dashboard.\n// To disable: leave both env vars unset (default). The node passes through.\n//\n// Length-guard before timingSafeEqual is mandatory. Without it, an attacker\n// can trigger a RangeError with a 1-char signature and crash the workflow.\n\nconst vapiSecret = $env.VAPI_SIGNING_SECRET;\nconst retellSecret = $env.RETELL_SIGNING_SECRET;\n\nif (!vapiSecret && !retellSecret) {\n  return [{ json: $input.first().json }];\n}\n\nconst item = $input.first().json;\nconst headers = item?.headers ?? {};\n\n// n8n Webhook v2 with rawBody: true populates $json.rawBody as a string.\n// Fallback chain: rawBody field > body if it is already a string > stringify.\n// The stringify fallback only runs if the Webhook node was misconfigured\n// (rawBody: false), in which case HMAC will fail loud and the user will\n// fix the config.\nconst rawBody = item?.rawBody\n  ?? (typeof item?.body === 'string' ? item.body : JSON.stringify(item?.body ?? {}));\n\nconst crypto = require('crypto');\n\nfunction verifyHmac(secret, headerName) {\n  const sig = headers[headerName];\n  if (!sig || typeof sig !== 'string') return false;\n  const expected = crypto.createHmac('sha256', secret).update(rawBody).digest('hex');\n  const sigBuf = Buffer.from(sig, 'hex');\n  const expBuf = Buffer.from(expected, 'hex');\n  if (sigBuf.length !== expBuf.length) return false;\n  return crypto.timingSafeEqual(sigBuf, expBuf);\n}\n\nlet verified = false;\nif (vapiSecret && verifyHmac(vapiSecret, 'x-vapi-signature')) verified = true;\nif (!verified && retellSecret && verifyHmac(retellSecret, 'x-retell-signature')) verified = true;\n\nif (!verified) {\n  throw new Error('Webhook HMAC verification failed: signature missing or mismatch (check rawBody is enabled on the Webhook node)');\n}\n\nreturn [{ json: $input.first().json }];"
      },
      "id": "voice-pp-1-verify",
      "name": "Verify Webhook (opt-in)",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        400,
        280
      ]
    },
    {
      "parameters": {
        "jsCode": "// Rate limit (opt-in via RATE_LIMIT_ENABLED=1).\n// Per-caller-phone (or per-IP if anonymous) 60 requests in a 5-min window.\n// For voice this matters less than for chat (callers don't burst-call), but\n// a leaked webhook URL can spike LLM cost so we keep this layer for parity.\n//\n// To enable: set the n8n env var RATE_LIMIT_ENABLED to '1'.\n// To disable: leave the env var unset (default). The node passes through.\n//\n// Concurrency note: $getWorkflowStaticData is not atomic. For real production\n// loads use Nginx limit_req_zone or Cloudflare WAF in front of n8n.\n\nconst enabled = $env.RATE_LIMIT_ENABLED === '1';\nif (!enabled) {\n  return [{ json: $input.first().json }];\n}\n\nconst item = $input.first().json;\nconst body = item?.body ?? item;\nconst callerPhone = body?.message?.call?.customer?.number ?? body?.call?.from_number;\nconst ip = item?.headers?.['x-forwarded-for']?.split(',')[0]?.trim() ?? item?.headers?.['x-real-ip'] ?? 'unknown';\nconst bucketKey = callerPhone ? `phone:${callerPhone}` : `ip:${ip}`;\n\nconst data = $getWorkflowStaticData('global');\nconst buckets = data.rateBuckets ?? {};\nconst now = Date.now();\nconst WINDOW_MS = 5 * 60 * 1000;\nconst LIMIT = 60;\nconst MAX_BUCKETS = 5000;\n\nconst bucket = buckets[bucketKey] ?? { count: 0, windowStart: now };\nif (now - bucket.windowStart > WINDOW_MS) {\n  bucket.count = 0;\n  bucket.windowStart = now;\n}\nbucket.count++;\nbuckets[bucketKey] = bucket;\n\nif (Object.keys(buckets).length > MAX_BUCKETS) {\n  const cutoff = now - WINDOW_MS;\n  for (const k of Object.keys(buckets)) {\n    if (buckets[k].windowStart < cutoff) delete buckets[k];\n  }\n}\ndata.rateBuckets = buckets;\n\nif (bucket.count > LIMIT) {\n  throw new Error(`Rate limit exceeded for ${bucketKey}: ${bucket.count} requests in 5 min window`);\n}\n\nreturn [{ json: item }];"
      },
      "id": "voice-pp-2-ratelimit",
      "name": "Rate Limit (opt-in)",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        560,
        280
      ]
    },
    {
      "parameters": {
        "jsCode": "// Idempotency check (opt-in via IDEMPOTENCY_ENABLED=1).\n// Vapi/Retell can re-deliver end-of-call-report on transient 5xx. Without\n// dedup the workflow fires twice per call and writes duplicate observations.\n// Idempotency key is the callId (Vapi message.call.id / Retell call.call_id).\n//\n// To enable: set the n8n env var IDEMPOTENCY_ENABLED to '1'.\n// To disable: leave the env var unset (default). The node passes through.\n//\n// Concurrency note: $getWorkflowStaticData is not atomic and not cluster-\n// aware. For production scale, swap the staticData block for Redis SET NX\n// EX 300 (atomic, cluster-aware, auto-expires).\n\nconst enabled = $env.IDEMPOTENCY_ENABLED === '1';\nif (!enabled) {\n  return [{ json: $input.first().json }];\n}\n\nconst item = $input.first().json;\nconst body = item?.body ?? item;\nconst callId = body?.message?.call?.id ?? body?.call?.call_id;\nconst idempotencyKey = callId ? `voice:${callId}` : null;\n\nif (!idempotencyKey) {\n  // No callId available, pass through (better than dropping)\n  return [{ json: item }];\n}\n\nconst data = $getWorkflowStaticData('global');\nconst seen = data.seenKeys ?? {};\nconst now = Date.now();\nconst WINDOW_MS = 5 * 60 * 1000;\n\nfor (const k of Object.keys(seen)) {\n  if (now - seen[k] > WINDOW_MS) delete seen[k];\n}\n\nif (seen[idempotencyKey]) {\n  // Duplicate detected. Emit a sentinel item that the\n  // 'Skip If Duplicate' IF node routes to 'Respond Duplicate'\n  // (200 OK + { deduped: true }). Without that 200 the source\n  // provider would hold the HTTP connection until n8n's webhook\n  // timeout (default 30s) and mark delivery failed.\n  return [{ json: { skipped: true, reason: 'duplicate', dedupKey: String(idempotencyKey) } }];\n}\nseen[idempotencyKey] = now;\ndata.seenKeys = seen;\n\nreturn [{ json: item }];"
      },
      "id": "voice-pp-3-idempotency",
      "name": "Idempotency Check (opt-in)",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        720,
        280
      ]
    },
    {
      "parameters": {
        "conditions": {
          "options": {
            "caseSensitive": true,
            "leftValue": "",
            "typeValidation": "strict",
            "version": 2
          },
          "conditions": [
            {
              "id": "cond-has-phone",
              "leftValue": "={{ $json.hasPhone }}",
              "rightValue": true,
              "operator": {
                "type": "boolean",
                "operation": "true",
                "singleValue": true
              }
            }
          ],
          "combinator": "and"
        },
        "options": {}
      },
      "id": "voice-2b-has-phone",
      "name": "Has Phone?",
      "type": "n8n-nodes-base.if",
      "typeVersion": 2.2,
      "position": [
        1040,
        280
      ]
    },
    {
      "parameters": {
        "jsCode": "// LLM Fallback Reply: fires when OpenAI/Anthropic Reply errors,\n// or when Route by Provider receives an unknown provider value.\n// Builds a graceful voice-friendly reply and an error-learn payload.\n//\n// Two arrival paths land here:\n//   1. LLM error (provider returned non-2xx) ,  input has $json.error\n//   2. Router fallback (Route by Provider had no matching rule) ,  input has\n//      the original prompt object (systemPrompt, transcript, callerPhone, etc.)\n//      WITHOUT an error field. We must NOT JSON.stringify the whole input\n//      because systemPrompt contains private memory context.\n\nconst errorRaw = $input.first().json;\nconst provider = $('Set Provider').item.json.provider ?? 'unknown';\n\nconst isLlmError = !!(errorRaw?.error || errorRaw?.message);\nlet errorMessage;\nif (isLlmError) {\n  errorMessage = errorRaw?.error?.message\n    ?? errorRaw?.error?.name\n    ?? errorRaw?.message\n    ?? 'Unknown LLM error';\n} else {\n  errorMessage = `Unknown provider value: ${provider}. Set \"provider\" to \"openai\" or \"anthropic\" in the Set Provider node.`;\n}\n\nconst callerPhone = $('Normalize Payload').item.json.callerPhone ?? 'anonymous';\nconst hasPhone = $('Normalize Payload').item.json.hasPhone === true;\nconst callId = $('Normalize Payload').item.json.callId ?? '';\n\n// Voice fallback should be SHORT. The voice provider TTS reads it aloud.\nconst fallbackText = \"Sorry, I'm having trouble right now. Please try calling back in a minute.\";\n\nreturn [{\n  json: {\n    replyText: fallbackText,\n    provider,\n    isFallback: true,\n    isRouterFallback: !isLlmError,\n    errorMessage: String(errorMessage),\n    callerPhone,\n    hasPhone,\n    callId,\n  },\n}];"
      },
      "id": "voice-llm-fallback",
      "name": "LLM Fallback Reply",
      "type": "n8n-nodes-base.code",
      "typeVersion": 2,
      "position": [
        2780,
        520
      ]
    },
    {
      "parameters": {
        "conditions": {
          "options": {
            "caseSensitive": true,
            "leftValue": "",
            "typeValidation": "strict",
            "version": 2
          },
          "conditions": [
            {
              "id": "cond-has-phone-post",
              "leftValue": "={{ $('Normalize Payload').item.json.hasPhone }}",
              "rightValue": true,
              "operator": {
                "type": "boolean",
                "operation": "true",
                "singleValue": true
              }
            }
          ],
          "combinator": "and"
        },
        "options": {}
      },
      "id": "voice-10b-has-phone-post",
      "name": "Has Phone? (post)",
      "type": "n8n-nodes-base.if",
      "typeVersion": 2.2,
      "position": [
        3000,
        100
      ]
    },
    {
      "parameters": {
        "resource": "memory",
        "operation": "learn",
        "content": "=LLM error in voice-agent ({{ $json.provider }}): {{ $json.errorMessage }} | Caller: {{ $json.callerPhone }} | Call: {{ $json.callId }}",
        "category": "mistake",
        "project": "voice-agent",
        "tags": "=llm-error, {{ $json.provider }}, voice-agent",
        "confidence": 0.6
      },
      "id": "voice-14-learn-error",
      "name": "Memory: Learn Error",
      "type": "n8n-nodes-studiomeyer-memory.studioMeyerMemory",
      "typeVersion": 1,
      "position": [
        3220,
        520
      ]
    },
    {
      "parameters": {
        "content": "## Production patterns (opt-in)\n\nThree Code nodes below are off by default. Toggle each with an n8n env var:\n\n- `IDEMPOTENCY_ENABLED=1` deduplicates Vapi/Retell retries on the same `callId` (5-min window).\n- `RATE_LIMIT_ENABLED=1` caps each caller (or IP if anonymous) at 60 calls / 5 min.\n- `VAPI_SIGNING_SECRET=<secret>` (or `RETELL_SIGNING_SECRET`) enables HMAC verification of the webhook signature against the raw body. Configure the same secret in your voice provider dashboard.\n\nEach node returns pass-through when its env var is unset, so the default import boots clean. Production deployments enable all three.\n\nFor clustered n8n deployments, swap the in-memory `$getWorkflowStaticData` blocks for Redis (`SET NX EX 300` for idempotency, `INCR + EXPIRE` for rate limit). Single-instance n8n is fine with the default.",
        "height": 380,
        "width": 540,
        "color": 7
      },
      "id": "note-production-patterns",
      "name": "Sticky Note - Production Patterns",
      "type": "n8n-nodes-base.stickyNote",
      "typeVersion": 1,
      "position": [
        400,
        -120
      ]
    },
    {
      "parameters": {
        "content": "## Has Phone? branch (always on)\n\nSome Vapi setups send `caller=anonymous` or strip the phone number entirely. Without this branch, an anonymous call would query Memory with `null` and create polluted entities.\n\nThe IF branches on `hasPhone`:\n- **true** (phone present): full memory path runs, caller is looked up or created.\n- **false** (anonymous): memory path is skipped, the LLM still answers with an \"anonymous caller\" system prompt.\n\nThe matching `Has Phone? (post)` IF after the LLM gates Memory: Observe + Memory: Learn so anonymous calls don't write polluted memory either. Memory: Learn Error from the LLM fallback path fires regardless (LLM errors are valuable even for anonymous callers).",
        "height": 280,
        "width": 460,
        "color": 7
      },
      "id": "note-has-phone",
      "name": "Sticky Note - Has Phone",
      "type": "n8n-nodes-base.stickyNote",
      "typeVersion": 1,
      "position": [
        1040,
        -100
      ]
    },
    {
      "parameters": {
        "content": "## Error branch (always on)\n\nBoth LLM Reply nodes have `On Error: Continue (Error Output)` enabled. The red error pin lands at **LLM Fallback Reply**, which builds a SHORT voice-friendly fallback message (\"Sorry, I'm having trouble, please call back in a minute\") and feeds two destinations:\n\n1. **Respond to Voice Provider** so the caller hears a graceful sentence instead of silence.\n2. **Memory: Learn Error** with `category: mistake, tags: [llm-error, <provider>]` so you spot patterns.\n\nThe `Route by Provider` fallback output (typo or unknown provider value) also lands here. The fallback handler discriminates between LLM-error and router-fallback so private memory context never leaks into the audit trail.\n\nThe error syntax is `{{ $json.error.message }}`, not `$error.message` (does not exist) and not `$json.execution.error.message` (that's for separate Error Trigger Workflows).",
        "height": 360,
        "width": 460,
        "color": 7
      },
      "id": "note-error-branch",
      "name": "Sticky Note - Error Branch",
      "type": "n8n-nodes-base.stickyNote",
      "typeVersion": 1,
      "position": [
        2780,
        -120
      ]
    },
    {
      "parameters": {
        "conditions": {
          "options": {
            "caseSensitive": true,
            "leftValue": "",
            "typeValidation": "strict",
            "version": 2
          },
          "conditions": [
            {
              "id": "cond-01-voice-agent-cross-session-memory-skipped",
              "leftValue": "={{ $json.skipped }}",
              "rightValue": true,
              "operator": {
                "type": "boolean",
                "operation": "true",
                "singleValue": true
              }
            }
          ],
          "combinator": "and"
        },
        "options": {}
      },
      "id": "01-voi-if-skip-dup",
      "name": "Skip If Duplicate",
      "type": "n8n-nodes-base.if",
      "typeVersion": 2,
      "position": [
        940,
        280
      ]
    },
    {
      "parameters": {
        "respondWith": "json",
        "responseBody": "={{ JSON.stringify({ ok: true, deduped: true, reason: \"duplicate\" }) }}",
        "options": {
          "responseCode": 200,
          "responseHeaders": {
            "entries": [
              {
                "name": "X-Dedup",
                "value": "1"
              }
            ]
          }
        }
      },
      "id": "01-voi-respond-duplicate",
      "name": "Respond Duplicate",
      "type": "n8n-nodes-base.respondToWebhook",
      "typeVersion": 1.1,
      "position": [
        1160,
        100
      ]
    }
  ],
  "connections": {
    "Vapi/Retell Webhook": {
      "main": [
        [
          {
            "node": "Verify Webhook (opt-in)",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Normalize Payload": {
      "main": [
        [
          {
            "node": "Has Phone?",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Memory: Lookup Caller": {
      "main": [
        [
          {
            "node": "Known Caller?",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Known Caller?": {
      "main": [
        [
          {
            "node": "Memory: Recent Context",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Memory: Create Caller",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Memory: Recent Context": {
      "main": [
        [
          {
            "node": "Build LLM Prompt",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Memory: Create Caller": {
      "main": [
        [
          {
            "node": "Build LLM Prompt",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Build LLM Prompt": {
      "main": [
        [
          {
            "node": "Set Provider",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Set Provider": {
      "main": [
        [
          {
            "node": "Route by Provider",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Route by Provider": {
      "main": [
        [
          {
            "node": "OpenAI Reply",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Anthropic Reply",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "LLM Fallback Reply",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "OpenAI Reply": {
      "main": [
        [
          {
            "node": "Normalize LLM Output",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "LLM Fallback Reply",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Anthropic Reply": {
      "main": [
        [
          {
            "node": "Normalize LLM Output",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "LLM Fallback Reply",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Normalize LLM Output": {
      "main": [
        [
          {
            "node": "Respond to Voice Provider",
            "type": "main",
            "index": 0
          },
          {
            "node": "Has Phone? (post)",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Verify Webhook (opt-in)": {
      "main": [
        [
          {
            "node": "Rate Limit (opt-in)",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Rate Limit (opt-in)": {
      "main": [
        [
          {
            "node": "Idempotency Check (opt-in)",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Idempotency Check (opt-in)": {
      "main": [
        [
          {
            "node": "Skip If Duplicate",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Has Phone?": {
      "main": [
        [
          {
            "node": "Memory: Lookup Caller",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Build LLM Prompt",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Has Phone? (post)": {
      "main": [
        [
          {
            "node": "Memory: Observe Call",
            "type": "main",
            "index": 0
          },
          {
            "node": "Memory: Learn Outcome",
            "type": "main",
            "index": 0
          }
        ],
        []
      ]
    },
    "LLM Fallback Reply": {
      "main": [
        [
          {
            "node": "Respond to Voice Provider",
            "type": "main",
            "index": 0
          },
          {
            "node": "Memory: Learn Error",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Skip If Duplicate": {
      "main": [
        [
          {
            "node": "Respond Duplicate",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Normalize Payload",
            "type": "main",
            "index": 0
          }
        ]
      ]
    }
  },
  "settings": {
    "executionOrder": "v1"
  }
}

About this workflow

Voice Agent Cross-Session Memory (StudioMeyer). Uses stickyNote, n8n-nodes-studiomeyer-memory, openAi, anthropic. Webhook trigger; 31 nodes.

Source: https://github.com/studiomeyer-io/n8n-templates/blob/main/templates/01-voice-agent-cross-session-memory/workflow.json — original creator credit. Request a take-down →

More AI & RAG workflows → · Browse all categories →