Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

A-I letters #39

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions client/components/LandingPage.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@ import React from 'react'
import { Link } from 'react-router-dom'

import Brand from './Brand'
import Navbar from './Navbar'

const LandingPage = () => {
return (
<header className="header">
<Navbar/>
<Brand />

<section className="header__section">
Expand All @@ -20,6 +22,7 @@ const LandingPage = () => {
<Link className="header__link" to="/signin">
Get started
</Link>

</section>
</header>
)
Expand Down
235 changes: 45 additions & 190 deletions client/components/Main.js
Original file line number Diff line number Diff line change
@@ -1,120 +1,43 @@
import React, { useRef, useState, useEffect } from 'react'
import { connect } from 'react-redux'
import * as tf from '@tensorflow/tfjs'
import * as tmImage from '@teachablemachine/image'
import * as handPoseDetection from '@tensorflow-models/hand-pose-detection'
import Webcam from 'react-webcam'
import { drawHand } from '../utilities/hand'
import { drawFace } from '../utilities/face'
import { drawPose } from '../utilities/pose'
import { drawBothHands } from '../utilities/bothHands'
import * as handpose from '@tensorflow-models/handpose'
import * as poseDetection from '@tensorflow-models/pose-detection'
import '@tensorflow/tfjs-backend-webgl'
import * as fp from 'fingerpose'
import * as blazeface from '@tensorflow-models/blazeface'
import { paperGesture } from './phrases/hello-thankyou'
import { loveYouGesture } from './phrases/iloveyou'
import { pleaseGesture } from './phrases/please'
import { youGesture } from './phrases/you'
import { niceGesture } from './phrases/nice'
/**
* COMPONENT
*/
//url = https://teachablemachine.withgoogle.com/models/86Rqg3NFc/
const decideGesture = (gestureName, hand, face, pose) => {
const tipOfIndex = hand[0].landmarks[8]
const tipOfPinky = hand[0].landmarks[20]
const rightEye = face[0].landmarks[0]
const mouth = face[0].landmarks[3]
const shoulder = pose[0].keypoints[5]
if (gestureName === 'hello-thankyou') {
if (
Math.abs(tipOfIndex[0] - mouth[0]) <= 50 &&
tipOfIndex[1] > mouth[1] &&
tipOfPinky[1] < shoulder.y
) {
return 'thank you'
} else if (tipOfIndex[1] < rightEye[1]) {
return 'hello'
} else if (tipOfPinky[1] >= shoulder.y) {
return 'please'
}
} else return gestureName
}
import { aSign } from './letters/letterA'
import { bSign } from './letters/letterB'
import { cSign } from './letters/letterC'
import { dSign } from './letters/letterD'
import { eSign } from './letters/letterE'
import { fSign } from './letters/letterF'
import { gSign } from './letters/letterG'
import { hSign } from './letters/letterH'
import { iSign } from './letters/letterI'

export const Home = props => {
const webcamRef = useRef(null)
const canvasRef = useRef(null)
const [translation, setTranslation] = useState(null)
const [word, setWord] = useState(null)

const URL = 'https://teachablemachine.withgoogle.com/models/86Rqg3NFc/'
const checkpointURL = URL + 'model.json'
const metadataURL = URL + 'metadata.json'

const loadModel = async () => {
const model = await tmImage.load(checkpointURL, metadataURL)
const runHandpose = async () => {
const net = await handpose.load()

const netFace = await blazeface.load()
//pose
const detectorConfig = {
architecture: 'MobileNetV1',
outputStride: 16,
inputResolution: { width: 640, height: 480 },
multiplier: 0.75
}
const netPose = await poseDetection.createDetector(
poseDetection.SupportedModels.PoseNet,
detectorConfig
)

//both hands detection
const modelBothHands = handPoseDetection.SupportedModels.MediaPipeHands
const detectorConfigBothHands = {
runtime: 'tfjs',
modelType: 'full'
}
const netBothHands = await handPoseDetection.createDetector(
modelBothHands,
detectorConfigBothHands
)
console.log('net both hands', netBothHands)
console.log('pose detector ', netPose)

console.log('net', net.pipeline.maxHandsNumber)
console.log('Handpose model loaded.')
// Loop and detect hands
setInterval(() => {
detect(model, net, netFace, netPose, netBothHands)
//this function is being called so quickly, you want to make it as optimized
//as possible
//"the fact you're using a setInterval could be a problem"
//it's asynchronous, so it's possible that two detects can happen at the same time.
//detect function sets a state on the component that says detection is happening.
//when detect function is completed you change that state back to false
//so you can create a gaurd that prevents another detect function
//request animation frame
//if resources are available, and I can perform this function.
//Window.requestAnimationFrame(detect())
//then within detect, call this again. sort of recursive for detect
//this means that the detect funtions are less likely to overlap with each other
}, 100)
detect(net)
}, 1000)
}

//Loop and detect hands

async function detect(model, net, netFace, netPose, netBothHands) {
const array = []
async function detect(net) {
// predict can take in an image, video or canvas html element

if (
typeof webcamRef.current !== 'undefined' &&
webcamRef.current !== null &&
webcamRef.current.video.readyState === 4
) {
//we don't need to use this every time we call detect. maybe we should move this outside
//of the detect function context.
//go through this code and ensure that everything is actually something we have to redo
//in order to detect. if not, we can find a way to do it once in load model, keep
//a reference using useRef, and use reference to it.
// Get Video Properties
const video = webcamRef.current.video
const videoWidth = webcamRef.current.video.videoWidth
const videoHeight = webcamRef.current.video.videoHeight
Expand All @@ -127,68 +50,22 @@ export const Home = props => {
canvasRef.current.width = videoWidth
canvasRef.current.height = videoHeight
//make detections for hand
const estimationConfig = { flipHorizontal: false }
const bothHands = await netBothHands.estimateHands(video, estimationConfig)
const hand = await net.estimateHands(video)
//make detections for face
const returnTensors = false
const face = await netFace.estimateFaces(video, returnTensors)

const pose = await netPose.estimatePoses(video)

console.log('face is', face)
// const face = await
console.log('hand', hand)
console.log('both hands are', bothHands)
console.log('pose is', pose)

//this grabbing of the 2d context, this shouldn't need to be done more than once.
//you should be able to grab context and have reference to it, and use it in
//every detect call. we're doing this often enough to make it matter.
//want to dig through detect code and sus out whether everything we're doing
//in detect code absolutely needed
//draw mesh
const ctx = canvasRef.current.getContext('2d')

//drawHand(hand, ctx);
drawBothHands(bothHands, ctx)
drawFace(face, ctx)
drawPose(pose, ctx)
// if (bothHands.length === 2) {
// console.log("get inside both hands???");
// const gestureEstimatorForBothHand = new fp.GestureEstimator([
// niceGesture,
// ]);
// const gestureLeftHand = await gestureEstimatorForBothHand.estimate(
// bothHands[0].keypoints,
// 8
// );
// console.log("gesture left hand is", gestureLeftHand);
// const gestureRightHand = await gestureEstimatorForBothHand.estimate(
// bothHands[1].keypoints,
// 8
// );
// console.log("gesture right hand is", gestureRightHand);
// if (gestureLeftHand.gestures && gestureRightHand.gestures) {
// if (
// gestureLeftHand.gestures[0].name === "nice" &&
// gestureRightHand.gestures[0].name === "nice"
// ) {
// setTranslation("nice");
// }
// }
// //make detections for hands and finger gestures
// }
if (bothHands.length === 1 && hand.length > 0 && face.length > 0) {
if (hand.length > 0) {
const gestureEstimator = new fp.GestureEstimator([
paperGesture,
loveYouGesture,
pleaseGesture,
youGesture
aSign,
bSign,
cSign,
dSign,
eSign,
fSign,
gSign,
hSign,
iSign
])

// 8 is the confidence level
const gesture = await gestureEstimator.estimate(hand[0].landmarks, 8)
const gesture = await gestureEstimator.estimate(hand[0].landmarks, 4)
console.log('gesture is ', gesture)
if (gesture.gestures && gesture.gestures.length > 0) {
const score = gesture.gestures.map(prediction => prediction.score)
Expand All @@ -197,39 +74,29 @@ export const Home = props => {
const gestureName = gesture.gestures[maxScore].name
console.log('gestures name is -', gesture.gestures[maxScore].name)

const result = decideGesture(gestureName, hand, face, pose)
console.log('result is ---', result)
setTranslation(result)
}
} else if (bothHands.length === 2) {
let prediction = await model.predict(video)
console.log('PREDICTION-----', prediction)
if (prediction && prediction.length > 0) {
const probability = prediction.map(
prediction => prediction.probability
)
console.log(probability)
const maxPro = probability.indexOf(Math.max.apply(null, probability))

//// Kaia just added the line below (hand.length > 0)
if (prediction[maxPro].probability > 0.8 && bothHands.length > 0) {
setTranslation(prediction[maxPro].className)
} else {
setTranslation(null)
}
array.push(gestureName)
console.log('array=====', array)
let result=array.join('')
console.log('result=====', result)
setTranslation(gestureName)
setWord(result)
}
} else if (bothHands.length === 0) {
setTranslation(null)
return
}
const ctx = canvasRef.current.getContext('2d')
drawHand(hand, ctx)
}
}
useEffect(() => {
loadModel()
runHandpose()
}, [])

console.log('translation====', translation)
console.log('word=====', word)
return (
<div>
<h1>Online translator </h1>
<h2>Letter:{translation} </h2>
<h2>Word:{word} </h2>

<Webcam
ref={webcamRef}
style={{
Expand All @@ -256,22 +123,10 @@ export const Home = props => {
height: 480
}}
/>
<div
style={{
backgroundColor: 'red',
color: 'black',
fontSize: 30,

marginLeft: 600
}}>
{translation}
</div>
</div>
)
}
/**
* CONTAINER
*/

const mapState = state => {
return {
username: state.auth.username
Expand Down
35 changes: 35 additions & 0 deletions client/components/Navbar.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import React from 'react'
import { Link } from 'react-router-dom'

const Navbar = () => {
return (
<div class="nav">
<input type="checkbox" id="nav-check" />
<div class="nav-header">
<div class="nav-title">Be My Voice</div>
</div>
<div class="nav-btn">
<label for="nav-check">
<span></span>
<span></span>
<span></span>
</label>
</div>

<div class="nav-links">
<Link to="/main">Home</Link>
<Link to="/main">Online Translator</Link>

<a target="_blank">About</a>
<Link to="/signin">Sign In</Link>
<a
href="https://github.com/2201gh-capstone-banana/2201gh-capstone"
target="_blank">
GitHub
</a>
</div>
</div>
)
}

export default Navbar
26 changes: 26 additions & 0 deletions client/components/letters/letterA.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import { Finger, FingerCurl, FingerDirection, GestureDescription } from 'fingerpose'

export const aSign = new GestureDescription('A')

//Thumb
aSign.addCurl(Finger.Thumb, FingerCurl.NoCurl, 1.0)
aSign.addDirection(Finger.Index, FingerDirection.DiagonalUpRight, 0.7)
// aSign.addDirection(Finger.Index, FingerDirection.DiagonalUpLeft, 0.70);

//Index
aSign.addCurl(Finger.Index, FingerCurl.FullCurl, 1)
aSign.addDirection(Finger.Index, FingerDirection.VerticalUp, 0.7)
// aSign.addDirection(Finger.Index, FingerDirection.DiagonalUpLeft, 0.70);

//Middle
aSign.addCurl(Finger.Middle, FingerCurl.FullCurl, 1)
aSign.addDirection(Finger.Middle, FingerDirection.VerticalUp, 0.7)
// aSign.addDirection(Finger.Middle, FingerDirection.DiagonalUpLeft, 0.70);

//Ring
aSign.addCurl(Finger.Ring, FingerCurl.FullCurl, 1)
aSign.addDirection(Finger.Ring, FingerDirection.VerticalUp, 0.7)

//Pinky
aSign.addCurl(Finger.Pinky, FingerCurl.FullCurl, 1)
aSign.addDirection(Finger.Pinky, FingerDirection.VerticalUp, 0.7)
Loading