diff --git a/frontend/src/components/ChatMessage.jsx b/frontend/src/components/ChatMessage.jsx
index fe14099..321fd82 100644
--- a/frontend/src/components/ChatMessage.jsx
+++ b/frontend/src/components/ChatMessage.jsx
@@ -4,28 +4,28 @@ import { BotMessageSquare, User } from 'lucide-react';
const ChatMessage = ({ message, type }) => {
const isBot = type === 'bot';
- return (
+return (
-
-
- {isBot ? : }
-
+
+
+ {isBot ? : }
+
- );
+);
};
export default ChatMessage;
\ No newline at end of file
diff --git a/frontend/src/components/Navbar.jsx b/frontend/src/components/Navbar.jsx
index 39898b4..8130c08 100644
--- a/frontend/src/components/Navbar.jsx
+++ b/frontend/src/components/Navbar.jsx
@@ -4,13 +4,15 @@ import { Search, Menu, X } from 'lucide-react'
import logo from '../assets/decentrade-logo.png'
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'
import { faGithub } from '@fortawesome/free-brands-svg-icons'
-import './Navbar.css'
+import '../styles/Navbar.css'
import { connectWallet, mintNFT } from '../utils/ethereum'
+import ThemeToggle from './ThemeToggle'
+
const Navbar = ({ wallet, setWallet }) => {
const [tokenURI, setTokenURI] = useState('')
const [showMintOption, setShowMintOption] = useState(false)
- const [darkMode, setDarkMode] = useState(false)
+ // const [darkMode, setDarkMode] = useState(false)
const [isMenuOpen, setIsMenuOpen] = useState(false)
const handleGithubClick = () => {
@@ -38,11 +40,8 @@ const Navbar = ({ wallet, setWallet }) => {
}
}
- const toggleDarkMode = () => {
- setDarkMode(!darkMode)
- document.body.classList.toggle('dark', !darkMode)
- }
+
const toggleMenu = () => {
setIsMenuOpen(!isMenuOpen)
}
@@ -87,9 +86,7 @@ const Navbar = ({ wallet, setWallet }) => {
))}
-
+
diff --git a/frontend/src/components/ThemeToggle.jsx b/frontend/src/components/ThemeToggle.jsx
new file mode 100644
index 0000000..92b080e
--- /dev/null
+++ b/frontend/src/components/ThemeToggle.jsx
@@ -0,0 +1,78 @@
+import { useEffect } from 'react'
+import { motion, AnimatePresence } from 'framer-motion';
+// import { Sun, Moon } from 'lucide-react'; // Lucide React icons
+import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
+import { faSun, faMoon } from '@fortawesome/free-solid-svg-icons';
+import { useState } from 'react';
+
+
+const ThemeToggle = () => {
+ const [darkMode, setDarkMode] = useState(false)
+
+ const toggleDarkMode = () => {
+ setDarkMode((prevMode) => !prevMode)
+ document.body.classList.toggle('dark', !darkMode)
+ }
+
+ useEffect(() => {
+ const savedMode = localStorage.getItem('darkMode') === 'true'
+ setDarkMode(savedMode)
+ document.body.classList.toggle('dark', savedMode)
+ }, [])
+
+ useEffect(() => {
+ localStorage.setItem('darkMode', darkMode)
+ }, [darkMode])
+
+ return (
+
+
+ {darkMode ? (
+
+
+
+ ) : (
+
+
+
+ )}
+
+
+
+
+ )
+}
+
+export default ThemeToggle;
+
+
diff --git a/frontend/src/components/VoiceInput.jsx b/frontend/src/components/VoiceInput.jsx
index e560463..02be760 100644
--- a/frontend/src/components/VoiceInput.jsx
+++ b/frontend/src/components/VoiceInput.jsx
@@ -1,212 +1,132 @@
-// import React, { useState, useRef, useEffect } from 'react';
-// import { Mic, MicOff, Volume2, VolumeX } from 'lucide-react';
-// import { createSpeechlySpeechRecognition } from '@speechly/speech-recognition-polyfill';
-// import { useVoiceVisualizer, VoiceVisualizer } from 'react-voice-visualizer';
-
-// const VoiceInput = ({ onVoiceInputComplete }) => {
-// // State management for voice input
-// const [isListening, setIsListening] = useState(false);
-// const [audioContext, setAudioContext] = useState(null);
-// const [mediaRecorder, setMediaRecorder] = useState(null);
-// const [audioStream, setAudioStream] = useState(null);
-
-// // Refs for audio processing
-// const recorderControlsRef = useRef(null);
-// const visualizerRef = useRef(null);
-
-// // Advanced voice recognition setup
-// const setupSpeechRecognition = async () => {
-// try {
-// // Use modern Web Speech API with Speechly polyfill for enhanced cross-browser support
-// const SpeechlySpeechRecognition = createSpeechlySpeechRecognition();
-// const recognition = new SpeechlySpeechRecognition();
-
-// // Configure recognition settings
-// recognition.continuous = false;
-// recognition.interimResults = false;
-// recognition.lang = 'en-US';
-
-// // Speech recognition event handlers
-// recognition.onresult = (event) => {
-// const transcript = event.results[0][0].transcript.trim();
-// if (transcript) {
-// // Callback with transcribed text
-// onVoiceInputComplete(transcript);
-// }
-// };
-
-// recognition.onerror = (event) => {
-// console.error('Speech recognition error:', event.error);
-// setIsListening(false);
-// };
-
-// return recognition;
-// } catch (error) {
-// console.error('Speech recognition setup failed:', error);
-// return null;
-// }
-// };
-
-// // Advanced audio visualization and processing
-// const startVoiceInput = async () => {
-// try {
-// // Request microphone access
-// const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
-// setAudioStream(stream);
-
-// // Setup audio context for advanced processing
-// const context = new (window.AudioContext || window.webkitAudioContext)();
-// setAudioContext(context);
-
-// // Create media recorder for additional audio capabilities
-// const recorder = new MediaRecorder(stream);
-// setMediaRecorder(recorder);
-
-// // Start listening and processing
-// setIsListening(true);
-
-// // Optional: Start speech recognition
-// const recognition = await setupSpeechRecognition();
-// if (recognition) {
-// recognition.start();
-// }
-
-// // Advanced audio visualization
-// const audioSource = context.createMediaStreamSource(stream);
-// const analyser = context.createAnalyser();
-// audioSource.connect(analyser);
-
-// } catch (error) {
-// console.error('Voice input initialization error:', error);
-// setIsListening(false);
-// }
-// };
-
-// // Stop voice input and cleanup
-// const stopVoiceInput = () => {
-// try {
-// // Stop audio stream tracks
-// if (audioStream) {
-// audioStream.getTracks().forEach(track => track.stop());
-// }
-
-// // Close audio context
-// if (audioContext) {
-// audioContext.close();
-// }
-
-// // Reset states
-// setIsListening(false);
-// setAudioStream(null);
-// setAudioContext(null);
-// } catch (error) {
-// console.error('Voice input stop error:', error);
-// }
-// };
-
-// // Render voice input interface
-// return (
-//
-// {/* Voice Input Toggle Button */}
-//
-
-// {/* Audio Visualizer */}
-// {isListening && (
-//
-//
-//
-// )}
-//
-// );
-// };
-
-// export default VoiceInput;
-
-
-import React, { useState, useRef, useCallback } from 'react';
-import { Mic, MicOff } from 'lucide-react';
+import React, { useState, useRef, useEffect } from 'react';
+import { Mic, MicOff, Volume2, VolumeX } from 'lucide-react';
+import { createSpeechlySpeechRecognition } from '@speechly/speech-recognition-polyfill';
import { useVoiceVisualizer, VoiceVisualizer } from 'react-voice-visualizer';
const VoiceInput = ({ onVoiceInputComplete }) => {
+
+ // State management for voice input
const [isListening, setIsListening] = useState(false);
+ const [audioContext, setAudioContext] = useState(null);
+ const [mediaRecorder, setMediaRecorder] = useState(null);
+ const [audioStream, setAudioStream] = useState(null);
+
+
+ // // Refs for audio processing
+ const visualizerRef = useRef(null );
+ const recorderControls = useVoiceVisualizer();
+ const { recordedBlob, error } = recorderControls;
- // Refs and state for voice recording
- const recorderControlsRef = useRef(null);
- const {
- startRecording,
- stopRecording,
- recordingBlob,
- isRecording,
- clearRecording
- } = useVoiceVisualizer();
-
- // Effect to handle recorded blob
- React.useEffect(() => {
- const processRecording = async () => {
- if (recordingBlob) {
- try {
- // Convert blob to text using Web Speech API
- const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
- recognition.lang = 'en-US';
-
- recognition.onresult = (event) => {
- const transcript = event.results[0][0].transcript.trim();
- if (transcript) {
- onVoiceInputComplete(transcript);
- }
- };
-
- // Create a file reader to convert blob to audio file
- const reader = new FileReader();
- reader.onloadend = () => {
- // You could potentially send the audio file to a transcription service here
- console.log('Audio recorded:', reader.result);
- };
- reader.readAsDataURL(recordingBlob);
-
- // Clear the recording after processing
- clearRecording();
- } catch (error) {
- console.error('Transcription error:', error);
+ useEffect(() => {
+ if (!recordedBlob) return;
+ console.log(recordedBlob);
+ }, [recordedBlob]);
+
+ useEffect(() => {
+ if (!error) return;
+ console.error(error);
+ }, [error]);
+
+
+
+
+
+ // Advanced voice recognition setup
+ const setupSpeechRecognition = async () => {
+ try {
+
+ const SpeechlySpeechRecognition = createSpeechlySpeechRecognition();
+ const recognition = new SpeechlySpeechRecognition();
+
+ // Configure recognition settings
+ recognition.continuous = false;
+ recognition.interimResults = false;
+ recognition.lang = 'en-US';
+
+ // Speech recognition event handlers
+ recognition.onresult = (event) => {
+ const transcript = event.results[0][0].transcript.trim();
+ if (transcript) {
+ // Callback with transcribed text
+ onVoiceInputComplete(transcript);
}
+ };
+
+ recognition.onerror = (event) => {
+ console.error('Speech recognition error:', event.error);
+ setIsListening(false);
+ };
+
+ return recognition;
+ } catch (error) {
+ console.error('Speech recognition setup failed:', error);
+ return null;
+ }
+ };
+
+ // Advanced audio visualization and processing
+ const startVoiceInput = async () => {
+ try {
+ // Request microphone access
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
+ setAudioStream(stream);
+
+ // Setup audio context for advanced processing
+ const context = new (window.AudioContext || window.webkitAudioContext)();
+ setAudioContext(context);
+
+ // Create media recorder for additional audio capabilities
+ const recorder = new MediaRecorder(stream);
+ setMediaRecorder(recorder);
+
+ // Start listening and processing
+ setIsListening(true);
+
+ // Optional: Start speech recognition
+ const recognition = await setupSpeechRecognition();
+ if (recognition) {
+ recognition.start();
}
- };
- processRecording();
- }, [recordingBlob, onVoiceInputComplete, clearRecording]);
+ // Advanced audio visualization
+ const audioSource = context.createMediaStreamSource(stream);
+ const analyser = context.createAnalyser();
+ audioSource.connect(analyser);
- // Handler for starting/stopping recording
- const toggleRecording = useCallback(() => {
- if (isRecording) {
- stopRecording();
+ } catch (error) {
+ console.error('Voice input initialization error:', error);
+
+ }
+ };
+
+ // Stop voice input and cleanup
+ const stopVoiceInput = () => {
+ try {
+ // Stop audio stream tracks
+ if (audioStream) {
+ audioStream.getTracks().forEach(track => track.stop());
+ }
+
+ // Close audio context
+ if (audioContext) {
+ audioContext.close();
+ }
+
+ // Reset states
setIsListening(false);
- } else {
- startRecording();
- setIsListening(true);
+ setAudioStream(null);
+ setAudioContext(null);
+ } catch (error) {
+ console.error('Voice input stop error:', error);
}
- }, [isRecording, startRecording, stopRecording]);
+ };
+ // Render voice input interface
return (
-
- {/* Recording Button */}
+
+ {/* Voice Input Toggle Button */}
- {/* Voice Visualizer */}
+ {/* Audio Visualizer */}
{isListening && (
-
+
+
+
)}
);
};
-export default VoiceInput;
\ No newline at end of file
+export default VoiceInput;
+
diff --git a/frontend/src/pages/ChatPage.jsx b/frontend/src/pages/ChatPage.jsx
index 008edff..8dafaa9 100644
--- a/frontend/src/pages/ChatPage.jsx
+++ b/frontend/src/pages/ChatPage.jsx
@@ -36,7 +36,11 @@ const ChatPage = () => {
// Connect to Gradio client
const client = await Client.connect("4darsh-Dev/orchard_eyes-chatbot");
- const result = await client.predict("/chat", { message: userMessage });
+ // const result = await client.predict("/chat", { message: userMessage });
+ const result = {
+ data: ["Hello sir, I am here to help you, Under Maintenance Mode For Now."]
+ };
+
// Add bot response
const botResponse = { text: result.data[0], type: 'bot' };
@@ -53,24 +57,6 @@ const ChatPage = () => {
}
};
-// const handleVoiceInput = () => {
-// // Implement voice input functionality
-// if ('webkitSpeechRecognition' in window) {
-// const recognition = new window.webkitSpeechRecognition();
-// recognition.continuous = false;
-// recognition.interimResults = false;
-// recognition.lang = 'en-US';
-
-// recognition.onresult = (event) => {
-// const transcript = event.results[0][0].transcript;
-// handleSendMessage(transcript);
-// };
-
-// recognition.start();
-// } else {
-// alert('Speech recognition not supported');
-// }
-// };
const handleVoiceInputComplete = (transcribedText) => {
// Send transcribed text to Gradio or process as needed
diff --git a/frontend/src/components/Navbar.css b/frontend/src/styles/Navbar.css
similarity index 100%
rename from frontend/src/components/Navbar.css
rename to frontend/src/styles/Navbar.css