diff --git a/susconecta/app/audio-teste/page.tsx b/susconecta/app/audio-teste/page.tsx new file mode 100644 index 0000000..513bfc9 --- /dev/null +++ b/susconecta/app/audio-teste/page.tsx @@ -0,0 +1,12 @@ +"use client"; + +import AIVoiceFlow from "@/components/ZoeIA/ai-voice-flow"; + +export default function VozPage() { + return ( +
+ +
+ ); +} + diff --git a/susconecta/components/ZoeIA/ai-voice-flow.tsx b/susconecta/components/ZoeIA/ai-voice-flow.tsx new file mode 100644 index 0000000..2f7cd05 --- /dev/null +++ b/susconecta/components/ZoeIA/ai-voice-flow.tsx @@ -0,0 +1,195 @@ +"use client"; + +import React, { useRef, useState } from "react"; +import { VoicePoweredOrb } from "@/components/ZoeIA/voice-powered-orb"; +import { Button } from "@/components/ui/button"; +import { Mic, MicOff } from "lucide-react"; + +// ⚠ Coloque aqui o webhook real do seu n8n +const N8N_WEBHOOK_URL = "https://n8n.jonasbomfim.store/webhook/zoe2"; + +const AIVoiceFlow: React.FC = () => { + const [isRecording, setIsRecording] = useState(false); + const [isSending, setIsSending] = useState(false); + + const [voiceDetected, setVoiceDetected] = useState(false); + const [status, setStatus] = useState(null); + const [error, setError] = useState(null); + + const [replyAudioUrl, setReplyAudioUrl] = useState(null); // URL do áudio retornado + const [replyAudio, setReplyAudio] = useState(null); // elemento de áudio reproduzido + + const mediaRecorderRef = useRef(null); + const streamRef = useRef(null); + const chunksRef = useRef([]); + + // 🚀 Inicia gravação + const startRecording = async () => { + try { + setError(null); + setStatus("Iniciando microfone..."); + + // Se estava reproduzindo áudio da IA → parar imediatamente + if (replyAudio) { + replyAudio.pause(); + replyAudio.currentTime = 0; + } + setReplyAudio(null); + setReplyAudioUrl(null); + + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + streamRef.current = stream; + + const recorder = new MediaRecorder(stream); + mediaRecorderRef.current = recorder; + chunksRef.current = []; + + recorder.ondataavailable = (e) => { + if (e.data.size > 0) chunksRef.current.push(e.data); + }; + + recorder.onstop = async () => { + setStatus("Processando áudio..."); + const blob = new Blob(chunksRef.current, { type: "audio/webm" }); + await sendToN8N(blob); + chunksRef.current = []; + }; + + recorder.start(); + setIsRecording(true); + setStatus("Gravando... fale algo."); + } catch (err) { + console.error(err); + setError("Erro ao acessar microfone."); + } + }; + + // ⏹ Finaliza gravação + const stopRecording = () => { + try { + setIsRecording(false); + setStatus("Finalizando gravação..."); + + if (mediaRecorderRef.current && mediaRecorderRef.current.state !== "inactive") { + mediaRecorderRef.current.stop(); + } + + if (streamRef.current) { + streamRef.current.getTracks().forEach((t) => t.stop()); + streamRef.current = null; + } + } catch (err) { + console.error(err); + setError("Erro ao parar gravação."); + } + }; + + // 📤 Envia áudio ao N8N e recebe o MP3 + const sendToN8N = async (audioBlob: Blob) => { + try { + setIsSending(true); + setStatus("Enviando áudio para IA..."); + + const formData = new FormData(); + formData.append("audio", audioBlob, "voz.webm"); + + const resp = await fetch(N8N_WEBHOOK_URL, { + method: "POST", + body: formData, + }); + + if (!resp.ok) { + throw new Error("N8N retornou erro"); + } + + const replyBlob = await resp.blob(); + + // gera url local + const url = URL.createObjectURL(replyBlob); + setReplyAudioUrl(url); + + const audio = new Audio(url); + setReplyAudio(audio); + + setStatus("Reproduzindo resposta..."); + audio.play().catch(() => {}); + + } catch (err) { + console.error(err); + setError("Erro ao enviar/receber áudio."); + } finally { + setIsSending(false); + } + }; + + const toggleRecording = () => { + if (isRecording) stopRecording(); + else startRecording(); + }; + + return ( +
+ + {/* ORB — agora com comportamento inteligente */} +
+ + + {isRecording && ( + + {voiceDetected ? "Ouvindo…" : "Aguardando voz…"} + + )} +
+ + {/* 🟣 Botão de gravação */} + + + {/* STATUS */} + {status &&

{status}

} + {error &&

{error}

} + + {/* PLAYER MANUAL DA RESPOSTA */} + {replyAudioUrl && ( +
+ Última resposta da IA: +
+ )} +
+ ); +}; + +export default AIVoiceFlow; diff --git a/susconecta/types/lamejs.d.ts b/susconecta/types/lamejs.d.ts new file mode 100644 index 0000000..f1cfb4f --- /dev/null +++ b/susconecta/types/lamejs.d.ts @@ -0,0 +1,14 @@ +// Minimal type declarations for lamejs used in demo-voice-orb +// Extend if more APIs are required. + +declare module 'lamejs' { + class Mp3Encoder { + constructor(channels: number, sampleRate: number, kbps: number); + encodeBuffer(buffer: Int16Array): Uint8Array; + flush(): Uint8Array; + } + export { Mp3Encoder }; + // Default export pattern support + const _default: { Mp3Encoder: typeof Mp3Encoder }; + export default _default; +}