Spaces:
Running
Running
| <!DOCTYPE html> | |
| <html lang="en"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <title>Mobius Play-Eyes Animation</title> | |
| <style> | |
| body { | |
| margin: 0; | |
| min-height: 100vh; | |
| display: flex; | |
| align-items: center; | |
| justify-content: center; | |
| background: black; | |
| overflow: hidden; | |
| touch-action: none; /* Prevents default touch actions */ | |
| } | |
| .logo { | |
| position: absolute; | |
| top: 1rem; | |
| left: 1rem; | |
| color: #00FFFF; | |
| font-family: sans-serif; | |
| font-size: 1.5rem; | |
| font-weight: bold; | |
| letter-spacing: 0.1em; | |
| z-index: 200; | |
| } | |
| .container { | |
| position: relative; | |
| width: 200px; | |
| height: 200px; | |
| z-index: 50; | |
| } | |
| #toggle { | |
| display: none; | |
| } | |
| /* Long Press Indicator */ | |
| .longpress-indicator { | |
| position: fixed; | |
| width: 120px; | |
| height: 120px; | |
| border-radius: 50%; | |
| border: 3px solid #00FFFF; | |
| pointer-events: none; | |
| opacity: 0; | |
| z-index: 150; | |
| transition: opacity 0.2s ease; | |
| } | |
| .longpress-indicator.active { | |
| opacity: 1; | |
| animation: longpress-progress 1.5s linear forwards; | |
| } | |
| @keyframes longpress-progress { | |
| from { background: radial-gradient(circle, transparent 65%, rgba(0, 255, 255, 0.5) 66%); } | |
| to { background: radial-gradient(circle, rgba(0, 255, 255, 0.5) 0%, rgba(0, 255, 255, 0.5) 66%); } | |
| } | |
| /* Play Button */ | |
| .play-button { | |
| position: absolute; | |
| inset: 0; | |
| display: flex; | |
| align-items: center; | |
| justify-content: center; | |
| cursor: pointer; | |
| transition: all 0.5s cubic-bezier(0.4, 0, 0.2, 1); | |
| } | |
| #toggle:checked ~ .play-button { | |
| transform: scale(0) rotate(90deg); | |
| opacity: 0; | |
| pointer-events: none; | |
| } | |
| .play-wrapper { | |
| position: relative; | |
| width: 128px; | |
| height: 128px; | |
| } | |
| .play-glow { | |
| position: absolute; | |
| inset: -16px; | |
| border-radius: 50%; | |
| background: radial-gradient(circle, rgba(0, 255, 255, 0.2) 0%, transparent 70%); | |
| animation: pulse 2s infinite; | |
| } | |
| @keyframes pulse { | |
| 0%, 100% { opacity: 1; transform: scale(1); } | |
| 50% { opacity: 0.5; transform: scale(1.1); } | |
| } | |
| @keyframes shake { | |
| 10%, 90% { transform: translate3d(-1px, 0, 0); } | |
| 20%, 80% { transform: translate3d(2px, 0, 0); } | |
| 30%, 50%, 70% { transform: translate3d(-4px, 0, 0); } | |
| 40%, 60% { transform: translate3d(4px, 0, 0); } | |
| } | |
| .play-outer { | |
| position: absolute; | |
| inset: 0; | |
| border-radius: 50%; | |
| background: rgba(0, 255, 255, 0.1); | |
| backdrop-filter: blur(8px); | |
| transition: background 0.3s cubic-bezier(0.4, 0, 0.2, 1); | |
| } | |
| .play-button:hover .play-outer { | |
| background: rgba(0, 255, 255, 0.2); | |
| } | |
| .play-inner { | |
| position: absolute; | |
| inset: 16px; | |
| border-radius: 50%; | |
| background: #00FFFF; | |
| display: flex; | |
| align-items: center; | |
| justify-content: center; | |
| transition: transform 0.3s cubic-bezier(0.4, 0, 0.2, 1); | |
| } | |
| .play-button:hover .play-inner { | |
| transform: scale(0.95); | |
| } | |
| .play-button:active .play-inner { | |
| transform: scale(0.9); | |
| } | |
| .play-icon { | |
| width: 0; | |
| height: 0; | |
| margin-left: 8px; | |
| border-style: solid; | |
| border-width: 20px 0 20px 32px; | |
| border-color: transparent transparent transparent black; | |
| } | |
| /* Eyes */ | |
| .eyes { | |
| position: absolute; | |
| inset: 0; | |
| display: flex; | |
| align-items: center; | |
| justify-content: center; | |
| transform: scale(0) rotate(-90deg); | |
| opacity: 0; | |
| cursor: pointer; | |
| transition: all 0.5s cubic-bezier(0.4, 0, 0.2, 1); | |
| } | |
| #toggle:checked ~ .eyes { | |
| transform: scale(1) rotate(0); | |
| opacity: 1; | |
| } | |
| .eyes-wrapper { | |
| display: flex; | |
| gap: 1rem; | |
| transition: transform 0.3s cubic-bezier(0.4, 0, 0.2, 1); | |
| } | |
| .eyes:hover .eyes-wrapper { | |
| transform: scale(1.05); | |
| } | |
| .eye { | |
| width: 1.75rem; | |
| height: 6rem; | |
| background: #00FFFF; | |
| border-radius: 1.5rem; | |
| box-shadow: | |
| 0 0 20px rgba(0, 255, 255, 0.7), | |
| 0 0 40px rgba(0, 255, 255, 0.5), | |
| 0 0 60px rgba(0, 255, 255, 0.3), | |
| inset 0 0 15px rgba(255, 255, 255, 0.2); | |
| transition: all 100ms ease-in-out; | |
| } | |
| .emotion-blinking { | |
| transform: scaleY(0.2); | |
| transform-origin: center; | |
| } | |
| .emotion-thinking { | |
| animation: pulse 1.5s infinite; | |
| } | |
| /* Audio visualization */ | |
| .listening-container { | |
| position: absolute; | |
| inset: 0; | |
| display: flex; | |
| align-items: center; | |
| justify-content: center; | |
| gap: 6px; | |
| opacity: 0; | |
| pointer-events: none; | |
| transition: all 0.5s ease; | |
| } | |
| .listening-container.active { | |
| opacity: 1; | |
| pointer-events: auto; | |
| } | |
| .audio-bar { | |
| width: 0.75rem; | |
| height: 4rem; | |
| background-color: #00FFFF; | |
| border-radius: 1rem; | |
| box-shadow: | |
| 0 0 20px rgba(0, 255, 255, 0.7), | |
| 0 0 40px rgba(0, 255, 255, 0.5), | |
| 0 0 60px rgba(0, 255, 255, 0.3); | |
| transition: height 0.12s ease; | |
| transform-origin: bottom; | |
| } | |
| .button-container { | |
| position: absolute; | |
| bottom: -200px; | |
| left: 50%; | |
| transform: translateX(-50%); | |
| display: flex; | |
| flex-wrap: wrap; | |
| justify-content: center; | |
| gap: 0.75rem; | |
| opacity: 0; | |
| transition: opacity 0.3s ease; | |
| } | |
| #toggle:checked ~ .button-container { | |
| opacity: 1; | |
| } | |
| .emotion-btn { | |
| background-color: rgba(0, 255, 255, 0.1); | |
| color: #00FFFF; | |
| border: 1px solid rgba(0, 255, 255, 0.3); | |
| backdrop-filter: blur(10px); | |
| transition: all 100ms ease; | |
| text-transform: uppercase; | |
| padding: 0.5rem 1rem; | |
| border-radius: 0.5rem; | |
| cursor: pointer; | |
| z-index: 110; | |
| } | |
| .emotion-btn:hover { | |
| background-color: rgba(0, 255, 255, 0.2); | |
| border-color: rgba(0, 255, 255, 0.5); | |
| } | |
| .recording-indicator { | |
| position: absolute; | |
| top: 1rem; | |
| right: 1rem; | |
| width: 1rem; | |
| height: 1rem; | |
| background-color: #FF3333; | |
| border-radius: 50%; | |
| box-shadow: 0 0 15px rgba(255, 51, 51, 0.7); | |
| animation: pulse 1.5s infinite; | |
| opacity: 0; | |
| transition: opacity 0.3s ease; | |
| z-index: 200; | |
| } | |
| .recording-indicator.active { | |
| opacity: 1; | |
| } | |
| /* Transcript Container */ | |
| #transcript-container { | |
| position: fixed; | |
| bottom: 20px; | |
| left: 50%; | |
| transform: translateX(-50%); | |
| width: 80%; | |
| max-width: 600px; | |
| background-color: rgba(0, 0, 0, 0.7); | |
| color: #00FFFF; | |
| padding: 15px; | |
| border-radius: 10px; | |
| font-family: sans-serif; | |
| z-index: 100; | |
| transition: opacity 0.3s ease; | |
| backdrop-filter: blur(10px); | |
| opacity: 0; | |
| } | |
| #transcript-title { | |
| margin-bottom: 10px; | |
| font-size: 14px; | |
| opacity: 0.7; | |
| } | |
| #transcript-text { | |
| font-size: 18px; | |
| min-height: 24px; | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <div class="logo">MOBIUS</div> | |
| <div id="recordingIndicator" class="recording-indicator"></div> | |
| <div id="longpressIndicator" class="longpress-indicator"></div> | |
| <div class="container"> | |
| <input type="checkbox" id="toggle"> | |
| <label class="play-button" for="toggle"> | |
| <div class="play-wrapper"> | |
| <div class="play-glow"></div> | |
| <div class="play-outer"></div> | |
| <div class="play-inner"> | |
| <div class="play-icon"></div> | |
| </div> | |
| </div> | |
| </label> | |
| <label class="eyes" for="toggle"> | |
| <div class="eyes-wrapper"> | |
| <div id="leftEye" class="eye"></div> | |
| <div id="rightEye" class="eye"></div> | |
| </div> | |
| </label> | |
| <div id="listeningContainer" class="listening-container"> | |
| <div class="audio-bar" id="bar1"></div> | |
| <div class="audio-bar" id="bar2"></div> | |
| <div class="audio-bar" id="bar3"></div> | |
| <div class="audio-bar" id="bar4"></div> | |
| <div class="audio-bar" id="bar5"></div> | |
| <div class="audio-bar" id="bar6"></div> | |
| <div class="audio-bar" id="bar7"></div> | |
| </div> | |
| <div class="button-container"> | |
| <button onclick="setEmotion('default')" class="emotion-btn">Default</button> | |
| <button onclick="setEmotion('listening')" class="emotion-btn"> | |
| <svg fill="#00FFFF" height="24" width="24" version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" | |
| viewBox="0 0 490.9 490.9" xml:space="preserve"> | |
| <g> | |
| <g> | |
| <path d="M245.5,322.9c53,0,96.2-43.2,96.2-96.2V96.2c0-53-43.2-96.2-96.2-96.2s-96.2,43.2-96.2,96.2v130.5 | |
| C149.3,279.8,192.5,322.9,245.5,322.9z M173.8,96.2c0-39.5,32.2-71.7,71.7-71.7s71.7,32.2,71.7,71.7v130.5 | |
| c0,39.5-32.2,71.7-71.7,71.7s-71.7-32.2-71.7-71.7V96.2z" fill="#00FFFF"/> | |
| <path d="M94.4,214.5c-6.8,0-12.3,5.5-12.3,12.3c0,85.9,66.7,156.6,151.1,162.8v76.7h-63.9c-6.8,0-12.3,5.5-12.3,12.3 | |
| s5.5,12.3,12.3,12.3h152.3c6.8,0,12.3-5.5,12.3-12.3s-5.5-12.3-12.3-12.3h-63.9v-76.7c84.4-6.3,151.1-76.9,151.1-162.8 | |
| c0-6.8-5.5-12.3-12.3-12.3s-12.3,5.5-12.3,12.3c0,76.6-62.3,138.9-138.9,138.9s-138.9-62.3-138.9-138.9 | |
| C106.6,220,101.2,214.5,94.4,214.5z" fill="#00FFFF"/> | |
| </g> | |
| </g> | |
| </svg> | |
| </button> | |
| <button onclick="setEmotion('thinking')" class="emotion-btn">Thinking</button> | |
| <button onclick="setEmotion('blinking')" class="emotion-btn">Blinking</button> | |
| </div> | |
| </div> | |
| <!-- Transcript container --> | |
| <div id="transcript-container"> | |
| <div id="transcript-title">Transcript</div> | |
| <div id="transcript-text"></div> | |
| </div> | |
| <script> | |
| const leftEye = document.getElementById('leftEye'); | |
| const rightEye = document.getElementById('rightEye'); | |
| const listeningContainer = document.getElementById('listeningContainer'); | |
| const eyesContainer = document.querySelector('.eyes-wrapper'); | |
| const audioBars = Array.from(document.querySelectorAll('.audio-bar')); | |
| const recordingIndicator = document.getElementById('recordingIndicator'); | |
| const toggle = document.getElementById('toggle'); | |
| const longpressIndicator = document.getElementById('longpressIndicator'); | |
| const body = document.body; | |
| const transcriptContainer = document.getElementById('transcript-container'); | |
| const transcriptText = document.getElementById('transcript-text'); | |
| const emotionColors = { | |
| default: '#00FFFF', | |
| thinking: '#00FFFF', | |
| happy: '#00FFFF', | |
| curious: '#00FFFF', | |
| surprised: '#00FFFF', | |
| blinking: '#00FFFF', | |
| angry: '#f87171', | |
| fear: '#c084fc', | |
| disgust: '#4ade80', | |
| listening: '#00FFFF' | |
| }; | |
| let currentEmotion = 'default'; | |
| let audioVisualizationInterval = null; | |
| let currentAmplitude = 3; | |
| let isRecording = false; | |
| let longPressTimer = null; | |
| let longPressActive = false; | |
| const LONG_PRESS_DURATION = 1500; // 1.5 seconds for long press | |
| const noiseSeed = Array(20).fill(0).map(() => Math.random() * 2 - 1); | |
| let seedPosition = 0; | |
| function applyEmotion(emotion) { | |
| const emotionClasses = [ | |
| 'emotion-blinking', 'emotion-thinking', 'emotion-happy', | |
| 'emotion-surprised', 'emotion-curious', 'emotion-angry', | |
| 'emotion-fear', 'emotion-disgust', 'emotion-listening' | |
| ]; | |
| leftEye.classList.remove(...emotionClasses); | |
| rightEye.classList.remove(...emotionClasses); | |
| if (emotion !== 'default' && emotion !== 'listening') { | |
| leftEye.classList.add(`emotion-${emotion}`); | |
| rightEye.classList.add(`emotion-${emotion}`); | |
| } | |
| const color = emotionColors[emotion]; | |
| leftEye.style.backgroundColor = color; | |
| rightEye.style.backgroundColor = color; | |
| audioBars.forEach(bar => { | |
| bar.style.backgroundColor = color; | |
| }); | |
| if (emotion === 'listening') { | |
| eyesContainer.style.opacity = '0'; | |
| isRecording = true; | |
| recordingIndicator.classList.add('active'); | |
| listeningContainer.classList.add('active'); | |
| startRecordingVisualization(); | |
| } else { | |
| clearInterval(audioVisualizationInterval); | |
| isRecording = false; | |
| recordingIndicator.classList.remove('active'); | |
| listeningContainer.classList.remove('active'); | |
| eyesContainer.style.opacity = '1'; | |
| } | |
| } | |
| function setEmotion(emotion) { | |
| currentEmotion = emotion; | |
| applyEmotion(emotion); | |
| if (emotion !== 'listening' && emotion !== 'default' && emotion !== 'thinking') { | |
| setTimeout(() => { | |
| currentEmotion = 'default'; | |
| applyEmotion('default'); | |
| }, 1000); | |
| } | |
| } | |
| function getNoise() { | |
| seedPosition = (seedPosition + 1) % noiseSeed.length; | |
| return noiseSeed[seedPosition]; | |
| } | |
| function startRecordingVisualization() { | |
| clearInterval(audioVisualizationInterval); | |
| const centerValues = [2.5, 3, 4, 4.5, 4, 3, 2.5]; | |
| audioBars.forEach((bar, index) => { | |
| bar.style.height = `${centerValues[index]}rem`; | |
| }); | |
| setInterval(() => { | |
| if (Math.random() < 0.3) { | |
| currentAmplitude = 2 + Math.random() * 2.5; | |
| } | |
| }, 800); | |
| audioVisualizationInterval = setInterval(() => { | |
| audioBars.forEach((bar, index) => { | |
| const centerFactor = 1 - Math.abs(index - 3) / 3.5; | |
| const baseHeight = 2 + (centerFactor * currentAmplitude); | |
| const noise = getNoise() * 0.7; | |
| const height = Math.max(1.5, baseHeight + noise); | |
| bar.style.height = `${height}rem`; | |
| bar.style.opacity = 0.7 + (height - 2) * 0.1; | |
| }); | |
| }, 80); | |
| } | |
| function initBlinking() { | |
| setInterval(() => { | |
| if (currentEmotion === 'default' && toggle.checked) { | |
| setEmotion('blinking'); | |
| } | |
| }, 4000); | |
| } | |
| function initRandomEmotions() { | |
| setInterval(() => { | |
| if (currentEmotion === 'default' && Math.random() < 0.3 && toggle.checked) { | |
| const emotions = ['thinking', 'happy', 'curious', 'surprised']; | |
| const randomEmotion = emotions[Math.floor(Math.random() * emotions.length)]; | |
| setEmotion(randomEmotion); | |
| } | |
| }, 3000); | |
| } | |
| function simulateSpeechPattern() { | |
| setInterval(() => { | |
| if (isRecording) { | |
| if (Math.random() < 0.1) { | |
| currentAmplitude = 1; | |
| setTimeout(() => { | |
| currentAmplitude = 2 + Math.random() * 2.5; | |
| }, 300 + Math.random() * 400); | |
| } | |
| if (Math.random() < 0.15) { | |
| currentAmplitude = 4 + Math.random(); | |
| setTimeout(() => { | |
| currentAmplitude = 2 + Math.random() * 2.5; | |
| }, 200 + Math.random() * 300); | |
| } | |
| } | |
| }, 1000); | |
| } | |
| // Long Press Handlers | |
| function startLongPress(event) { | |
| if (longPressActive || !toggle.checked) return; | |
| longPressActive = true; | |
| // Position the indicator near where the user is pressing | |
| const x = event.type.includes('touch') ? event.touches[0].clientX : event.clientX; | |
| const y = event.type.includes('touch') ? event.touches[0].clientY : event.clientY; | |
| longpressIndicator.style.left = (x - 60) + 'px'; // Center the 120px wide indicator | |
| longpressIndicator.style.top = (y - 60) + 'px'; // Center the 120px tall indicator | |
| // Start long press timer | |
| longPressTimer = setTimeout(() => { | |
| // Toggle between listening and default | |
| if (currentEmotion === 'listening') { | |
| setEmotion('default'); | |
| stopListening(); | |
| } else { | |
| setEmotion('listening'); | |
| startListening(); | |
| } | |
| longpressIndicator.classList.remove('active'); | |
| longPressActive = false; | |
| }, LONG_PRESS_DURATION); | |
| // Show the indicator | |
| longpressIndicator.classList.add('active'); | |
| } | |
| function cancelLongPress() { | |
| if (!longPressActive) return; | |
| clearTimeout(longPressTimer); | |
| longpressIndicator.classList.remove('active'); | |
| longPressActive = false; | |
| } | |
| // Speech Recognition Implementation | |
| let recognition = null; | |
| let silenceTimeout = null; | |
| let isFinalResult = false; | |
| let isListening = false; | |
| function initializeSpeechRecognition() { | |
| if (!('SpeechRecognition' in window || 'webkitSpeechRecognition' in window)) { | |
| console.error('Speech recognition not supported in this browser'); | |
| return null; | |
| } | |
| const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; | |
| const newRecognition = new SpeechRecognition(); | |
| newRecognition.continuous = true; | |
| newRecognition.interimResults = true; | |
| newRecognition.maxAlternatives = 1; | |
| newRecognition.lang = 'en-US'; | |
| return newRecognition; | |
| } | |
| function startListening() { | |
| if (isListening || !toggle.checked) return; | |
| try { | |
| if (recognition) { | |
| recognition.stop(); | |
| } | |
| recognition = initializeSpeechRecognition(); | |
| if (!recognition) { | |
| console.error('Could not initialize speech recognition'); | |
| return; | |
| } | |
| // Show transcript container | |
| transcriptContainer.style.opacity = '1'; | |
| transcriptText.textContent = 'Listening...'; | |
| // Set up event handlers | |
| recognition.onstart = function() { | |
| isListening = true; | |
| setEmotion('listening'); | |
| console.log('Speech recognition started'); | |
| }; | |
| recognition.onresult = function(event) { | |
| clearTimeout(silenceTimeout); | |
| const lastResult = event.results[event.results.length - 1]; | |
| const transcript = lastResult[0].transcript.trim(); | |
| isFinalResult = lastResult.isFinal; | |
| transcriptText.textContent = transcript; | |
| // If it's a final result, process it | |
| if (isFinalResult) { | |
| processFinalTranscript(transcript); | |
| } else { | |
| // Set a timeout for silence detection (2 seconds) | |
| silenceTimeout = setTimeout(() => { | |
| processFinalTranscript(transcript); | |
| }, 2000); | |
| } | |
| }; | |
| recognition.onerror = function(event) { | |
| console.error('Speech recognition error:', event.error); | |
| if (event.error === 'no-speech') { | |
| // No speech detected, continue listening | |
| restartRecognition(); | |
| } else if (event.error === 'aborted' || event.error === 'network') { | |
| stopListening(); | |
| } | |
| }; | |
| recognition.onend = function() { | |
| // If ended but we still want to be listening, restart | |
| if (isListening && toggle.checked) { | |
| restartRecognition(); | |
| } else { | |
| transcriptContainer.style.opacity = '0'; | |
| setTimeout(() => { | |
| if (!isListening) { | |
| transcriptText.textContent = ''; | |
| } | |
| }, 300); | |
| setEmotion('default'); | |
| } | |
| }; | |
| recognition.onspeechend = function() { | |
| // When speech ends but recognition is still active | |
| console.log('Speech ended, still listening for more'); | |
| // Check if there's transcript to process | |
| const transcript = transcriptText.textContent; | |
| if (transcript && transcript !== 'Listening...') { | |
| processFinalTranscript(transcript); | |
| } | |
| }; | |
| recognition.start(); | |
| } catch (error) { | |
| console.error('Failed to start speech recognition:', error); | |
| isListening = false; | |
| } | |
| } | |
| function stopListening() { | |
| if (recognition) { | |
| isListening = false; | |
| recognition.stop(); | |
| setEmotion('default'); | |
| console.log('Speech recognition stopped'); | |
| // Hide transcript | |
| transcriptContainer.style.opacity = '0'; | |
| setTimeout(() => { | |
| transcriptText.textContent = ''; | |
| }, 300); | |
| } | |
| } | |
| function restartRecognition() { | |
| if (isListening && toggle.checked) { | |
| setTimeout(() => { | |
| try { | |
| recognition.start(); | |
| } catch (error) { | |
| console.error('Error restarting recognition:', error); | |
| isListening = false; | |
| setEmotion('default'); | |
| } | |
| }, 200); | |
| } | |
| } | |
| function processFinalTranscript(transcript) { | |
| if (!transcript || transcript === 'Listening...') return; | |
| console.log('Processing final transcript:', transcript); | |
| // Here you can add logic to send the transcript to your API | |
| // For example, uncomment and modify this if you have an API endpoint: | |
| /* | |
| fetch('your-api-endpoint', { | |
| method: 'POST', | |
| headers: { | |
| 'Content-Type': 'application/json' | |
| }, | |
| body: JSON.stringify({ question: transcript }) | |
| }) | |
| .then(response => response.json()) | |
| .then(data => { | |
| console.log('API response:', data); | |
| // Handle API response here | |
| }) | |
| .catch(error => { | |
| console.error('API error:', error); | |
| }); | |
| */ | |
| // Show thinking animation while processing | |
| setEmotion('thinking'); | |
| // Simulate processing time and response (remove this in production) | |
| setTimeout(() => { | |
| setEmotion('default'); | |
| // Clear transcript after processing | |
| transcriptText.textContent = ''; | |
| }, 2000); | |
| } | |
| // Initialize all features | |
| initBlinking(); | |
| initRandomEmotions(); | |
| simulateSpeechPattern(); | |
| // Connect to previous listening button functionality | |
| const listenButton = document.querySelector('button[onclick="setEmotion(\'listening\')"]'); | |
| if (listenButton) { | |
| listenButton.addEventListener('click', () => { | |
| if (!isListening) { | |
| startListening(); | |
| } else { | |
| stopListening(); | |
| } | |
| }); | |
| } | |
| // Setup long press events on the document body | |
| document.addEventListener('touchstart', (e) => { | |
| startLongPress(e); | |
| }, { passive: false }); | |
| document.addEventListener('touchend', () => { | |
| cancelLongPress(); | |
| }); | |
| document.addEventListener('touchcancel', () => { | |
| cancelLongPress(); | |
| }); | |
| // Mouse events for desktop | |
| document.addEventListener('mousedown', (e) => { | |
| startLongPress(e); | |
| }); | |
| document.addEventListener('mouseup', () => { | |
| cancelLongPress(); | |
| }); | |
| document.addEventListener('mouseleave', () => { | |
| cancelLongPress(); | |
| }); | |
| // For mobile devices, ensure audio context is resumed after user interaction | |
| document.addEventListener('click', function() { | |
| if (window.audioContext && window.audioContext.state === 'suspended') { | |
| window.audioContext.resume(); | |
| } | |
| }, { once: true }); | |
| // Connect toggle button to speech recognition | |
| toggle.addEventListener('change', function() { | |
| if (toggle.checked) { | |
| // When turning on, request microphone permission | |
| navigator.mediaDevices.getUserMedia({ audio: true }) | |
| .then(() => { | |
| console.log('Microphone permission granted'); | |
| }) | |
| .catch(error => { | |
| console.error('Microphone permission denied:', error); | |
| }); | |
| } else { | |
| // When turning off, stop listening if active | |
| if (isListening) { | |
| stopListening(); | |
| } | |
| } | |
| }); | |
| function processFinalTranscript(transcript) { | |
| if (!transcript || transcript === 'Listening...') return; | |
| console.log('Processing final transcript:', transcript); | |
| // Show thinking animation while processing | |
| setEmotion('thinking'); | |
| transcriptText.textContent = 'Processing...'; | |
| // Send the transcript to the API | |
| query({ question: transcript }) | |
| .then(response => { | |
| console.log('API response:', response); | |
| // Display the response | |
| transcriptText.textContent = response.text; | |
| // Set back to default emotion | |
| setEmotion('default'); | |
| // Clear transcript after a delay | |
| setTimeout(() => { | |
| if (!isListening) { | |
| transcriptContainer.style.opacity = '0'; | |
| setTimeout(() => { | |
| transcriptText.textContent = ''; | |
| }, 300); | |
| } | |
| }, 5000); | |
| }) | |
| .catch(error => { | |
| console.error('API error:', error); | |
| transcriptText.textContent = 'Sorry, there was an error processing your request.'; | |
| setEmotion('default'); | |
| }); | |
| } | |
| // API query function | |
| async function query(data) { | |
| try { | |
| const response = await fetch( | |
| "https://srivatsavdamaraju-flowise.hf.space/api/v1/prediction/2875301a-c26f-4bd5-ab10-71fa13393541", | |
| { | |
| method: "POST", | |
| headers: { | |
| "Content-Type": "application/json" | |
| }, | |
| body: JSON.stringify(data) | |
| } | |
| ); | |
| if (!response.ok) { | |
| throw new Error(`API responded with status: ${response.status}`); | |
| } | |
| const result = await response.json(); | |
| return result; | |
| } catch (error) { | |
| console.error('API query error:', error); | |
| throw error; | |
| } | |
| } | |
| // Add this at the beginning of your script section (with other variable declarations) | |
| let speechSynthesis = window.speechSynthesis; | |
| let speaking = false; | |
| // Replace the processFinalTranscript function with this updated version | |
| function processFinalTranscript(transcript) { | |
| if (!transcript || transcript === 'Listening...') return; | |
| console.log('Processing final transcript:', transcript); | |
| // Show thinking animation while processing | |
| setEmotion('thinking'); | |
| transcriptText.textContent = 'Processing...'; | |
| // Cancel any ongoing speech | |
| if (speaking) { | |
| speechSynthesis.cancel(); | |
| speaking = false; | |
| } | |
| // Send the transcript to the API | |
| query({ question: transcript }) | |
| .then(response => { | |
| console.log('API response:', response); | |
| // Display the response | |
| transcriptText.textContent = response.text; | |
| // Speak the response using text-to-speech | |
| speakResponse(response.text); | |
| // Clear transcript after the speech ends or after a delay if speech fails | |
| setTimeout(() => { | |
| if (!isListening && !speaking) { | |
| transcriptContainer.style.opacity = '0'; | |
| setTimeout(() => { | |
| transcriptText.textContent = ''; | |
| }, 300); | |
| } | |
| }, 5000); | |
| }) | |
| .catch(error => { | |
| console.error('API error:', error); | |
| transcriptText.textContent = 'Sorry, there was an error processing your request.'; | |
| speakResponse('Sorry, there was an error processing your request.'); | |
| setEmotion('default'); | |
| }); | |
| } | |
| // Add this new function to handle text-to-speech | |
| function speakResponse(text) { | |
| // Don't attempt to speak if speech synthesis is not available | |
| if (!speechSynthesis) { | |
| console.error('Speech synthesis not supported in this browser'); | |
| setEmotion('default'); | |
| return; | |
| } | |
| // Create a new speech synthesis utterance | |
| const utterance = new SpeechSynthesisUtterance(text); | |
| // Optional: Set voice, rate, pitch, etc. | |
| utterance.rate = 1.0; // Speech rate (0.1 to 10) | |
| utterance.pitch = 1.0; // Speech pitch (0 to 2) | |
| utterance.volume = 1.0; // Speech volume (0 to 1) | |
| // Optionally select a specific voice | |
| // Get all available voices | |
| const voices = speechSynthesis.getVoices(); | |
| // You can choose a specific voice if available | |
| // Find a voice that sounds good - preferably female and English | |
| const preferredVoice = voices.find(voice => | |
| voice.name.includes('Female') && | |
| (voice.lang.includes('en-US') || voice.lang.includes('en-GB')) | |
| ) || voices[0]; // Fallback to first available voice | |
| if (preferredVoice) { | |
| utterance.voice = preferredVoice; | |
| } | |
| // Event listeners | |
| utterance.onstart = () => { | |
| speaking = true; | |
| setEmotion('listening'); // Reuse the listening animation for speaking | |
| console.log('Speech started'); | |
| }; | |
| utterance.onend = () => { | |
| speaking = false; | |
| setEmotion('default'); | |
| console.log('Speech ended'); | |
| // Hide transcript if not listening | |
| if (!isListening) { | |
| setTimeout(() => { | |
| transcriptContainer.style.opacity = '0'; | |
| setTimeout(() => { | |
| transcriptText.textContent = ''; | |
| }, 300); | |
| }, 1000); | |
| } | |
| }; | |
| utterance.onerror = (event) => { | |
| speaking = false; | |
| setEmotion('default'); | |
| console.error('Speech synthesis error:', event); | |
| }; | |
| // Start speaking | |
| speechSynthesis.speak(utterance); | |
| } | |
| // API query function | |
| async function query(data) { | |
| try { | |
| const response = await fetch( | |
| "https://srivatsavdamaraju-flowise.hf.space/api/v1/prediction/2875301a-c26f-4bd5-ab10-71fa13393541", | |
| { | |
| method: "POST", | |
| headers: { | |
| "Content-Type": "application/json" | |
| }, | |
| body: JSON.stringify(data) | |
| } | |
| ); | |
| if (!response.ok) { | |
| throw new Error(`API responded with status: ${response.status}`); | |
| } | |
| const result = await response.json(); | |
| return result; | |
| } catch (error) { | |
| console.error('API query error:', error); | |
| throw error; | |
| } | |
| } | |
| // Add this line at the end of your script (right before other event listeners) | |
| // This ensures we get the voices list as soon as possible | |
| speechSynthesis.onvoiceschanged = () => console.log('Voices loaded:', speechSynthesis.getVoices().length); | |
| </script> | |
| </body> | |
| </html> |