// LifeCoPilot visual — live voice conversation using the Web Speech API // for mic capture + TTS, and window.claude.complete for the reply. // Falls back to a scripted demo loop when idle or when speech APIs aren't // available (Safari, etc). function CoPilotVisual() { const domains = React.useMemo(() => ( ['CALENDAR', 'WORK', 'HOME', 'FAMILY', 'WELLBEING', 'MEMORY'] ), []); const demoExchanges = React.useMemo(() => ([ { domain: 'CALENDAR', you: "Reschedule my 3pm today.", bot: "Moved to Thursday 10am. Everyone's free." }, { domain: 'WORK', you: "Draft the design brief.", bot: "320 words, three directions. Review?" }, { domain: 'HOME', you: "I'm heading home soon.", bot: "Set thermostat to 21°, lights to warm." }, { domain: 'FAMILY', you: "Remind me to call Ana.", bot: "I'll surface it between your meetings." }, { domain: 'WELLBEING',you: "I'm feeling stressed.", bot: "Let's breathe. Four in, six out, ten minutes." }, { domain: 'MEMORY', you: "What did I promise Jake?", bot: "You said you'd send the slides by Friday." }, ]), []); // Mode: demo (scripted loop) or live (real voice conversation) const [mode, setMode] = React.useState('demo'); const [supported, setSupported] = React.useState(true); // Shared UI state const [idx, setIdx] = React.useState(0); const [phase, setPhase] = React.useState('idle'); // idle | listening | thinking | bot | done const [typed, setTyped] = React.useState(''); const [liveYou, setLiveYou] = React.useState(''); const [liveBot, setLiveBot] = React.useState(''); const [liveInterim, setLiveInterim] = React.useState(''); const [domain, setDomain] = React.useState('CALENDAR'); const recRef = React.useRef(null); const utterRef = React.useRef(null); // Feature detection React.useEffect(() => { const SR = window.SpeechRecognition || window.webkitSpeechRecognition; setSupported(!!SR && 'speechSynthesis' in window); }, []); // Scripted demo loop (original behavior) React.useEffect(() => { if (mode !== 'demo') return; let cancelled = false; const ex = demoExchanges[idx]; async function run() { setPhase('you'); setTyped(''); await wait(600); if (cancelled) return; setPhase('thinking'); await wait(700); if (cancelled) return; setPhase('bot'); setTyped(''); const text = ex.bot; for (let i = 1; i <= text.length; i++) { if (cancelled) return; setTyped(text.slice(0, i)); await wait(22 + Math.random()*28); } setPhase('done'); await wait(1800); if (cancelled) return; setIdx(v => (v + 1) % demoExchanges.length); } run(); return () => { cancelled = true; }; }, [idx, demoExchanges, mode]); // Classify user utterance to a domain (used only in live mode) function classify(text) { const t = text.toLowerCase(); if (/(schedule|calendar|meeting|appointment|reschedul|monday|tuesday|wednesday|thursday|friday|pm|am|tomorrow)/.test(t)) return 'CALENDAR'; if (/(draft|email|report|project|work|brief|deck|slide|presentation|client)/.test(t)) return 'WORK'; if (/(home|thermostat|lights|door|lock|oven|coffee|fridge)/.test(t)) return 'HOME'; if (/(mom|dad|wife|husband|kid|son|daughter|family|partner|ana|jake)/.test(t)) return 'FAMILY'; if (/(stress|anxious|tired|sleep|breathe|calm|meditat|wellbeing|feel)/.test(t)) return 'WELLBEING'; if (/(remember|remind|what did|recall|memory|promise)/.test(t)) return 'MEMORY'; return 'CALENDAR'; } async function handleUserTurn(text) { setLiveYou(text); setLiveInterim(''); setLiveBot(''); setDomain(classify(text)); setPhase('thinking'); let reply = ''; try { reply = await window.claude.complete({ messages: [{ role: 'user', content: `You are LifeCoPilot, a voice-first ambient AI assistant for everyday life. Respond to the user in ONE short, warm, conversational sentence (max 20 words). Do not use markdown, lists, or emoji. Just a direct spoken reply.\n\nUser said: "${text}"` }] }); reply = (reply || '').trim().replace(/^["']|["']$/g, ''); if (!reply) reply = "I'm here. What do you need?"; } catch(e) { reply = "I'm having trouble reaching Cortex. Try again in a moment."; } setPhase('bot'); setLiveBot(''); // typed reveal for (let i = 1; i <= reply.length; i++) { setLiveBot(reply.slice(0, i)); await wait(16 + Math.random()*20); } // speak it try { const u = new SpeechSynthesisUtterance(reply); u.rate = 1.02; u.pitch = 1.0; // Prefer a neutral English voice if available const voices = window.speechSynthesis.getVoices(); const v = voices.find(v => /en[-_]?US/i.test(v.lang) && /female|samantha|google us/i.test(v.name)) || voices.find(v => /en[-_]?US/i.test(v.lang)) || voices[0]; if (v) u.voice = v; utterRef.current = u; window.speechSynthesis.speak(u); await new Promise(res => { u.onend = res; u.onerror = res; }); } catch(e) {} setPhase('done'); } function startLive() { const SR = window.SpeechRecognition || window.webkitSpeechRecognition; if (!SR) return; // stop scripted loop setMode('live'); setLiveYou(''); setLiveBot(''); setLiveInterim(''); setPhase('listening'); const rec = new SR(); rec.lang = 'en-US'; rec.interimResults = true; rec.continuous = false; rec.maxAlternatives = 1; let finalTranscript = ''; rec.onresult = (e) => { let interim = ''; for (let i = e.resultIndex; i < e.results.length; i++) { const r = e.results[i]; if (r.isFinal) finalTranscript += r[0].transcript; else interim += r[0].transcript; } setLiveInterim(interim); if (finalTranscript) setLiveYou(finalTranscript); }; rec.onerror = (e) => { setPhase('idle'); setLiveBot('Mic error: ' + e.error); }; rec.onend = () => { const text = (finalTranscript || '').trim(); if (text) handleUserTurn(text); else { setPhase('idle'); setLiveInterim(''); } }; try { rec.start(); } catch(e) {} recRef.current = rec; } function stopLive() { try { recRef.current && recRef.current.stop(); } catch(e) {} try { window.speechSynthesis.cancel(); } catch(e) {} setPhase('idle'); } function resetToDemo() { stopLive(); setMode('demo'); setLiveYou(''); setLiveBot(''); setLiveInterim(''); setIdx(0); } // Cleanup on unmount React.useEffect(() => () => { try { recRef.current && recRef.current.abort(); } catch(e) {} try { window.speechSynthesis.cancel(); } catch(e) {} }, []); const demoEx = demoExchanges[idx]; const activeDomain = mode === 'live' ? domain : demoEx.domain; const youText = mode === 'live' ? (liveYou || liveInterim) : demoEx.you; const botText = mode === 'live' ? liveBot : (phase === 'done' ? demoEx.bot : typed); const showBot = mode === 'live' ? (phase === 'thinking' || phase === 'bot' || phase === 'done') : (phase === 'thinking' || phase === 'bot' || phase === 'done'); const orbState = phase === 'listening' ? 'listening' : phase === 'thinking' ? 'thinking' : phase === 'bot' ? 'speaking' : 'idle'; return (
LifeCoPilot meets the everyday you. Forge lets builders shape on the same mind. Both speak one language — your signals.
Talk to it. It learns you. It acts ahead of you.
LifeCoPilot is a voice-first, adaptive assistant for your whole life — calendar, work, home, family, memory, and yes, wellbeing too. Proactive when it matters, invisible when it doesn't.
Generate characters, voices, worlds — on one sovereign stack.
Forge is the generative AI platform for creators and businesses. Spin up 3D characters, rigged and animated, spawn voices, worlds, and agents — all running on the Veiron Grid with structured pipelines and first-class SDKs.