lahjabot2 / index.html
wasmdashai's picture
Update index.html
a7ba0c1 verified
raw
history blame
22.9 kB
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LHJA - Speech to Speech Translator</title>
<script src="https://cdn.tailwindcss.com"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
<style>
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap');
body {
font-family: 'Poppins', sans-serif;
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
min-height: 100vh;
}
.gradient-bg {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
}
.wave-animation {
position: relative;
overflow: hidden;
}
.wave-animation::after {
content: "";
position: absolute;
bottom: 0;
left: 0;
right: 0;
height: 20px;
background: url('data:image/svg+xml;utf8,<svg viewBox="0 0 1200 120" xmlns="http://www.w3.org/2000/svg" preserveAspectRatio="none"><path d="M0,0V46.29c47.79,22.2,103.59,32.17,158,28,70.36-5.37,136.33-33.31,206.8-37.5C438.64,32.43,512.34,53.67,583,72.05c69.27,18,138.3,24.88,209.4,13.08,36.15-6,69.85-17.84,104.45-29.34C989.49,25,1113-14.29,1200,52.47V0Z" fill="%23ffffff" opacity=".25"/><path d="M0,0V15.81C13,36.92,27.64,56.86,47.69,72.05,99.41,111.27,165,111,224.58,91.58c31.15-10.15,60.09-26.07,89.67-39.8,40.92-19,84.73-46,130.83-49.67,36.26-2.85,70.9,9.42,98.6,31.56,31.77,25.39,62.32,62,103.63,73,40.44,10.79,81.35-6.69,119.13-24.28s75.16-39,116.92-43.05c59.73-5.85,113.28,22.88,168.9,38.84,30.2,8.66,59,6.17,87.09-7.5,22.43-10.89,48-26.93,60.65-49.24V0Z" fill="%23ffffff" opacity=".5"/><path d="M0,0V5.63C149.93,59,314.09,71.32,475.83,42.57c43-7.64,84.23-20.12,127.61-26.46,59-8.63,112.48,12.24,165.56,35.4C827.93,77.22,886,95.24,951.2,90c86.53-7,172.46-45.71,248.8-84.81V0Z" fill="%23ffffff"/></svg>');
background-size: cover;
z-index: 10;
}
.pulse {
animation: pulse 2s infinite;
}
@keyframes pulse {
0% {
transform: scale(0.95);
box-shadow: 0 0 0 0 rgba(102, 126, 234, 0.7);
}
70% {
transform: scale(1);
box-shadow: 0 0 0 10px rgba(102, 126, 234, 0);
}
100% {
transform: scale(0.95);
box-shadow: 0 0 0 0 rgba(102, 126, 234, 0);
}
}
.language-selector {
transition: all 0.3s ease;
}
.language-selector:hover {
transform: translateY(-5px);
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.1);
}
.voice-visualizer {
height: 60px;
display: flex;
align-items: flex-end;
justify-content: center;
gap: 3px;
}
.voice-bar {
width: 4px;
background-color: #667eea;
border-radius: 3px;
animation: equalize 1.5s infinite ease-in-out;
}
@keyframes equalize {
0%, 100% { height: 10%; }
50% { height: 100%; }
}
.voice-bar:nth-child(1) { animation-delay: -0.9s; }
.voice-bar:nth-child(2) { animation-delay: -0.6s; }
.voice-bar:nth-child(3) { animation-delay: -0.3s; }
.voice-bar:nth-child(4) { animation-delay: -0.6s; }
.voice-bar:nth-child(5) { animation-delay: -0.9s; }
.result-card {
transition: all 0.3s ease;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.result-card:hover {
transform: translateY(-5px);
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.15);
}
</style>
</head>
<body class="min-h-screen flex flex-col">
<!-- Header -->
<header class="gradient-bg text-white wave-animation">
<div class="container mx-auto px-4 py-8">
<div class="flex justify-between items-center">
<div class="flex items-center space-x-2">
<i class="fas fa-language text-3xl"></i>
<h1 class="text-2xl md:text-3xl font-bold">LHJA AI</h1>
</div>
<div class="flex items-center space-x-4">
<button class="bg-white text-indigo-600 px-4 py-2 rounded-full font-medium hover:bg-indigo-50 transition">
<i class="fas fa-cog mr-2"></i>Settings
</button>
</div>
</div>
<div class="mt-12 mb-16 text-center">
<h2 class="text-3xl md:text-4xl font-bold mb-4">Real-time Speech to Speech Translation</h2>
<p class="text-xl opacity-90 max-w-2xl mx-auto">Break language barriers instantly with our AI-powered voice translator</p>
</div>
</div>
</header>
<!-- Main Content -->
<main class="flex-grow container mx-auto px-4 py-8 -mt-10">
<div class="bg-white rounded-xl shadow-xl overflow-hidden">
<div class="grid md:grid-cols-2 gap-6 p-6">
<!-- Input Section -->
<div class="space-y-6">
<div class="flex justify-between items-center">
<h3 class="text-xl font-semibold text-gray-800">Speak Now</h3>
<div class="flex items-center space-x-2">
<span class="text-sm text-gray-500">Input Language:</span>
<select id="inputLanguage" class="border rounded px-3 py-1 text-sm">
<option value="ar-SA">Arabic</option>
</select>
</div>
</div>
<div class="bg-gray-50 rounded-lg p-6 text-center">
<div id="voiceInputVisualizer" class="voice-visualizer mb-6">
<div class="voice-bar"></div>
<div class="voice-bar"></div>
<div class="voice-bar"></div>
<div class="voice-bar"></div>
<div class="voice-bar"></div>
</div>
<button id="startListening" class="bg-indigo-600 text-white rounded-full p-4 pulse hover:bg-indigo-700 transition">
<i class="fas fa-microphone text-2xl"></i>
</button>
<p id="listeningStatus" class="mt-4 text-gray-600">Press the microphone to start speaking</p>
</div>
<div class="result-card bg-gray-50 rounded-lg p-4">
<h4 class="font-medium text-gray-700 mb-2">Your Speech</h4>
<div id="inputText" class="min-h-20 p-3 bg-white rounded border border-gray-200">
<p class="text-gray-500 italic">Recognized text will appear here...</p>
</div>
</div>
</div>
<!-- Output Section -->
<div class="space-y-6">
<div class="flex justify-between items-center">
<h3 class="text-xl font-semibold text-gray-800">Translation</h3>
<div class="flex items-center space-x-2">
<span class="text-sm text-gray-500">Output Language:</span>
<select id="outputLanguage" class="border rounded px-3 py-1 text-sm">
<option value="ar-SA-najdi">Arabic</option>
</select>
</div>
</div>
<div class="result-card bg-gray-50 rounded-lg p-4">
<h4 class="font-medium text-gray-700 mb-2">Translated Text</h4>
<div id="outputText" class="min-h-20 p-3 bg-white rounded border border-gray-200">
<p class="text-gray-500 italic">Translation will appear here...</p>
</div>
</div>
<div class="bg-gray-50 rounded-lg p-6 text-center">
<div id="voiceOutputVisualizer" class="voice-visualizer mb-6 opacity-0">
<div class="voice-bar bg-indigo-400"></div>
<div class="voice-bar bg-indigo-400"></div>
<div class="voice-bar bg-indigo-400"></div>
<div class="voice-bar bg-indigo-400"></div>
<div class="voice-bar bg-indigo-400"></div>
</div>
<button id="playTranslation" class="bg-indigo-600 text-white rounded-full px-6 py-3 hover:bg-indigo-700 transition disabled:opacity-50" disabled>
<i class="fas fa-volume-up mr-2"></i> Play Translation
</button>
</div>
</div>
</div>
<!-- Quick Actions -->
<div class="bg-gray-100 px-6 py-4 border-t border-gray-200 flex justify-between items-center">
<button id="clearAll" class="text-gray-600 hover:text-gray-800 transition">
<i class="fas fa-trash-alt mr-2"></i> Clear All
</button>
<div class="flex space-x-3">
<button id="copyTranslation" class="bg-white border border-gray-300 rounded-full px-4 py-2 text-sm hover:bg-gray-50 transition">
<i class="fas fa-copy mr-2"></i> Copy
</button>
<button id="saveSession" class="bg-white border border-gray-300 rounded-full px-4 py-2 text-sm hover:bg-gray-50 transition">
<i class="fas fa-save mr-2"></i> Save
</button>
<button id="shareTranslation" class="bg-indigo-600 text-white rounded-full px-4 py-2 text-sm hover:bg-indigo-700 transition">
<i class="fas fa-share-alt mr-2"></i> Share
</button>
</div>
</div>
</div>
<!-- Recent Translations -->
</main>
<!-- Footer -->
<footer class="bg-gray-800 text-white py-8">
<div class="container mx-auto px-4">
<div class="flex flex-col md:flex-row justify-between items-center">
<div class="mb-4 md:mb-0">
<div class="flex items-center space-x-2">
<i class="fas fa-language text-2xl"></i>
<h2 class="text-xl font-bold">LHJA AI</h2>
</div>
<p class="text-gray-400 mt-2">Breaking language barriers one conversation at a time</p>
</div>
<div class="flex space-x-6">
<a href="#" class="text-gray-400 hover:text-white transition">
<i class="fab fa-facebook-f"></i>
</a>
<a href="#" class="text-gray-400 hover:text-white transition">
<i class="fab fa-twitter"></i>
</a>
<a href="#" class="text-gray-400 hover:text-white transition">
<i class="fab fa-instagram"></i>
</a>
<a href="#" class="text-gray-400 hover:text-white transition">
<i class="fab fa-github"></i>
</a>
</div>
</div>
<div class="border-t border-gray-700 mt-8 pt-8 text-center text-gray-400 text-sm">
<p>© 2025 LHJA AI All rights reserved.</p>
</div>
</div>
</footer>
<script>
// DOM Elements
const startListeningBtn = document.getElementById('startListening');
const listeningStatus = document.getElementById('listeningStatus');
const inputText = document.getElementById('inputText');
const outputText = document.getElementById('outputText');
const playTranslationBtn = document.getElementById('playTranslation');
const voiceInputVisualizer = document.getElementById('voiceInputVisualizer');
const voiceOutputVisualizer = document.getElementById('voiceOutputVisualizer');
const inputLanguage = document.getElementById('inputLanguage');
const outputLanguage = document.getElementById('outputLanguage');
const clearAllBtn = document.getElementById('clearAll');
const copyTranslationBtn = document.getElementById('copyTranslation');
const saveSessionBtn = document.getElementById('saveSession');
const shareTranslationBtn = document.getElementById('shareTranslation');
// Speech Recognition and Synthesis
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
const synth = window.speechSynthesis;
let recognition;
let isListening = false;
// Initialize Speech Recognition
if (SpeechRecognition) {
recognition = new SpeechRecognition();
recognition.continuous = false;
recognition.interimResults = true;
recognition.onstart = () => {
isListening = true;
startListeningBtn.classList.add('bg-red-600', 'pulse');
startListeningBtn.classList.remove('bg-indigo-600');
listeningStatus.textContent = "Listening... Speak now";
voiceInputVisualizer.classList.add('opacity-100');
};
recognition.onend = () => {
isListening = false;
startListeningBtn.classList.remove('bg-red-600', 'pulse');
startListeningBtn.classList.add('bg-indigo-600');
listeningStatus.textContent = "Press the microphone to start speaking";
voiceInputVisualizer.classList.remove('opacity-100');
};
recognition.onresult = async (event) => {
const transcript = Array.from(event.results)
.map(result => result[0])
.map(result => result.transcript)
.join('');
inputText.innerHTML = `<p class="text-gray-800">${transcript}</p>`;
if (event.results[0].isFinal) {
try {
const translatedText = await simulateTranslation(
transcript,
inputLanguage.value,
outputLanguage.value
);
console.log("Result:", translatedText);
outputText.innerHTML = `<p class="text-gray-800 font-medium">${translatedText}</p>`;
playTranslationBtn.disabled = false;
} catch (err) {
console.error("خطأ في الترجمة:", err);
outputText.innerHTML = `<p class="text-red-600 font-medium">خطأ في الترجمة</p>`;
}
}
};
recognition.onerror = (event) => {
console.error('Speech recognition error', event.error);
listeningStatus.textContent = `Error: ${event.error}`;
};
} else {
listeningStatus.textContent = "Speech recognition not supported in your browser";
startListeningBtn.disabled = true;
}
// Event Listeners
startListeningBtn.addEventListener('click', () => {
if (!isListening) {
recognition.lang = inputLanguage.value;
recognition.start();
} else {
recognition.stop();
}
});
playTranslationBtn.addEventListener('click', () => {
if (outputText.textContent && outputText.textContent !== "Translation will appear here...") {
textToSpeech(outputText.textContent)
}
});
clearAllBtn.addEventListener('click', () => {
inputText.innerHTML = '<p class="text-gray-500 italic">Recognized text will appear here...</p>';
outputText.innerHTML = '<p class="text-gray-500 italic">Translation will appear here...</p>';
playTranslationBtn.disabled = true;
});
copyTranslationBtn.addEventListener('click', () => {
if (outputText.textContent && outputText.textContent !== "Translation will appear here...") {
navigator.clipboard.writeText(outputText.textContent)
.then(() => {
const originalText = copyTranslationBtn.innerHTML;
copyTranslationBtn.innerHTML = '<i class="fas fa-check mr-2"></i> Copied!';
setTimeout(() => {
copyTranslationBtn.innerHTML = originalText;
}, 2000);
});
}
});
saveSessionBtn.addEventListener('click', () => {
// In a real app, this would save to local storage or a database
alert('Translation saved to your history');
});
shareTranslationBtn.addEventListener('click', () => {
if (outputText.textContent && outputText.textContent !== "Translation will appear here...") {
if (navigator.share) {
navigator.share({
title: 'VoiceBridge Translation',
text: `Original: ${inputText.textContent}\nTranslation: ${outputText.textContent}`,
}).catch(err => {
console.error('Error sharing:', err);
});
} else {
// Fallback for browsers that don't support Web Share API
alert('Share functionality not supported in your browser');
}
}
});
// Language change handlers
inputLanguage.addEventListener('change', () => {
if (isListening) {
recognition.stop();
recognition.lang = inputLanguage.value;
recognition.start();
}
});
outputLanguage.addEventListener('change', () => {
if (inputText.textContent && inputText.textContent !== "Recognized text will appear here...") {
const translatedText = simulateTranslation(inputText.textContent, inputLanguage.value, outputLanguage.value);
outputText.innerHTML = `<p class="text-gray-800 font-medium">${translatedText}</p>`;
}
});
async function askAzureOpenAI(prompt)
{
const endpoint = "https://lahja-dev-resource.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview";
try {
const response = await fetch(endpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
"api-key": "4AwsIf87cyBIgaJVsy0phWUQdZFcbrJxpQBDQNzL4xjcP2MFzrrYJQQJ99BIACHYHv6XJ3w3AAAAACOGYrzM"
},
body: JSON.stringify({
messages: [
{ role: "system", content: "إنت تمثل شركة أسس الذكاء الاصطناعي وترد باللهجة السعودية." },
{ role: "user", content: prompt }
]
}),
});
const data = await response.json();
return data.choices[0].message.content;
} catch (err) {
console.error("خطأ في الاتصال بـ Azure OpenAI:", err);
return "أعتذر، حدث خطأ أثناء معالجة طلبك.";
}
}
async function textToSpeech(text, voice = "alloy", speed = 1) {
const API_KEY = "4AwsIf87cyBIgaJVsy0phWUQdZFcbrJxpQBDQNzL4xjcP2MFzrrYJQQJ99BIACHYHv6XJ3w3AAAAACOGYrzM"; // ضع مفتاح Azure هنا
const ENDPOINT = "https://lahja-dev-resource.cognitiveservices.azure.com/openai/deployments/LAHJA-V1/audio/speech?api-version=2025-03-01-preview";
const data = {
model: "LAHJA-V1",
input: text,
voice: voice,
speed: speed
};
const response = await fetch(ENDPOINT, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${API_KEY}`
},
body: JSON.stringify(data)
});
if (!response.ok) {
throw new Error("خطأ في الاتصال: " + response.statusText);
}
const blob = await response.blob();
const audioUrl= URL.createObjectURL(blob);
if (audioUrl) {
const audio = new Audio(audioUrl);
audio.play();
}
}
async function simulateTranslation(text, fromLang, toLang)
{
const response = await askAzureOpenAI(text);
return response
}
// Initialize voice visualizer animation
function initVoiceVisualizer() {
const bars = voiceInputVisualizer.querySelectorAll('.voice-bar');
bars.forEach(bar => {
const randomHeight = Math.random() * 60 + 10;
bar.style.height = `${randomHeight}%`;
});
if (isListening) {
requestAnimationFrame(initVoiceVisualizer);
}
}
// Start visualizer when listening starts
voiceInputVisualizer.addEventListener('animationstart', initVoiceVisualizer);
</script>
</body>
</html>