Skip to content

Commit

Permalink
Merge pull request #1 from Snipeur060/Snipeur060-tkinterversion
Browse files Browse the repository at this point in the history
Main program
  • Loading branch information
Snipeur060 authored Feb 11, 2024
2 parents b6065fa + 58d0ebf commit 031646c
Show file tree
Hide file tree
Showing 8 changed files with 363 additions and 0 deletions.
59 changes: 59 additions & 0 deletions docs/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
<div>Teachable Machine Image Model</div>
<button type="button" onclick="init()">Start</button>
<div id="webcam-container"></div>
<div id="label-container"></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@teachablemachine/[email protected]/dist/teachablemachine-image.min.js"></script>
<script type="text/javascript">
// More API functions here:
// https://github.com/googlecreativelab/teachablemachine-community/tree/master/libraries/image

// the link to your model provided by Teachable Machine export panel
const URL = "https://github.com/Snipeur060/SpaceAI/tree/main/web/model/";

let model, webcam, labelContainer, maxPredictions;

// Load the image model and setup the webcam
async function init() {
const modelURL = URL + "model.json";
const metadataURL = URL + "metadata.json";

// load the model and metadata
// Refer to tmImage.loadFromFiles() in the API to support files from a file picker
// or files from your local hard drive
// Note: the pose library adds "tmImage" object to your window (window.tmImage)
model = await tmImage.load(modelURL, metadataURL);
maxPredictions = model.getTotalClasses();

// Convenience function to setup a webcam
const flip = true; // whether to flip the webcam
webcam = new tmImage.Webcam(200, 200, flip); // width, height, flip
await webcam.setup(); // request access to the webcam
await webcam.play();
window.requestAnimationFrame(loop);

// append elements to the DOM
document.getElementById("webcam-container").appendChild(webcam.canvas);
labelContainer = document.getElementById("label-container");
for (let i = 0; i < maxPredictions; i++) { // and class labels
labelContainer.appendChild(document.createElement("div"));
}
}

async function loop() {
webcam.update(); // update the webcam frame
await predict();
window.requestAnimationFrame(loop);
}

// run the webcam image through the image model
async function predict() {
// predict can take in an image, video or canvas html element
const prediction = await model.predict(webcam.canvas);
for (let i = 0; i < maxPredictions; i++) {
const classPrediction =
prediction[i].className + ": " + prediction[i].probability.toFixed(2);
labelContainer.childNodes[i].innerHTML = classPrediction;
}
}
</script>
184 changes: 184 additions & 0 deletions tkinterpythonv/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
import shutil
import uuid
from tkinter import *
from tkinter import messagebox, filedialog,simpledialog
from tkinter.ttk import *

import cv2
import os
import requests

#on désactive les messages d'erreur de tensorflow (ici on cherche à éviter cuda étant donné que l'on a pas besoin de ses infos. Ainsi on se fiche complètement de l'erreur. Il est donc inutile de l'afficher)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from keras.models import load_model
from PIL import Image, ImageOps
import numpy as np
from io import BytesIO
from rich.console import Console
import time
lowmodel = load_model('../models/6k_keras_model.h5', compile=False)
highmodel = load_model('../models/18k_keras_model.h5', compile=False)
class_names = open('../models/labels.txt', 'r').readlines()

# Create the root window
root = Tk()
root.title("AI Rocket Finder")

# le titre
title = Label(root, text="AI Rocket Finder", font=("Arial", 20))
title.grid(row=0, column=0, columnspan=3)

#on fait un systeme avec un boutton poru soit choisir un fichier ou soit entrer un lien
file,link = None,None

def open_file():
global file
file = filedialog.askopenfilename()
print(file)

def open_link():
global link
link = simpledialog.askstring("Input", "Enter the link", parent=root)
print(link)


#on fait un boutton pour choisir un fichier
file_button = Button(root, text="Choose a file", command=open_file)
file_button.grid(row=1, column=0)

#on fait un boutton pour entrer un lien
link_button = Button(root, text="Enter a link", command=open_link)
link_button.grid(row=1, column=1)
def dlimageandreturndata(url):
"""
Télécharge une image et la retourne sous forme de bytes (selon le format que l'on aura annoncé)
:param url:
:return:
"""
#on va chercher l'image
try:
res = requests.get(url, stream=True)
except requests.exceptions.ConnectionError:
return False
#on vérifie que l'url est valide
if res.status_code == 200:
#on génère un nom aléatoire pour l'image
imgname = '/temp/' + str(uuid.uuid4()) + ".png"
#on sauvegarde l'image
with open(imgname, 'wb') as f:
#on copie l'image dans le fichier
shutil.copyfileobj(res.raw, f)
#on retourne les données de l'image
data = open(imgname, 'rb').read()
#on supprime l'image avec le nom aléatoire
os.remove(imgname)
return data
else:
return False
def predict(imgdata,predict_model):
global file,link
if predict_model == "low":
model = lowmodel
elif predict_model == "high":
model = highmodel
else:
pass
try:
#on récupère le chemin de l'image
#ici
#Cela signifie que le tableau a une forme de (1, 224, 224, 3)
#Ce qui correspond à une image de taille 224x224 avec 3 canaux de couleur (rouge, vert et bleu).
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

# RGBA ce n'est pas possible donc on passe sur dur RGB (suppression du canneau alpha car (rouge, vert et bleu) dans notre array
# BytesIO ici permet de lire les données de l'image étant donnée que imgdata n'est pas sauvegardé sur le disque il ne pourra pas être lu par PIL (open)
image = Image.open(BytesIO(imgdata)).convert('RGB')

#on remet l'image à la bonne taille pour notre tableau
size = (224, 224)
#Petite amélioration dans le but d'avoir des données plus clair
image = ImageOps.fit(image, size, Image.LANCZOS)

#on fait la transformation image --> Tableau
image_array = np.asarray(image)
#encore un petit coup de filtre pour avoir des données "lissé"
image_array = cv2.GaussianBlur(image_array, (5, 5), 0)

#Dans le traitement d'image , la normalisation est un processus qui modifie la plage des valeurs d'intensité des pixels.
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1

#On charge la donnée du tableau
data[0] = normalized_image_array

# On lance la prédiction avec keras
prediction = model.predict(data)
#on v aobtenir l'indice de la classe la plus probable
index = np.argmax(prediction)
#on récupère le nom de la classe
class_name = class_names[index]
#le score (la probabilité que l'image soit bien class_name)
confidence_score = prediction[0][index]
print(class_name)
return class_name
except Exception as e:
print(e)
return f"""Erreur: {e}"""
finally:
file = None
link = None
def recog(data,predict_model):
"""
Analyse une image et affiche le résultat
:return: Un resultat sous la forme d'une chaine de caractère exemple : "1 Ariane" -> str
"""
img_path = data


# ajout du support d'une url
if img_path.startswith("http"):
img_data = dlimageandreturndata(img_path)
else:
try:
img_data = open(img_path, 'rb').read()
except:
pass

analyze = predict(img_data, predict_model)
if "Erreur" in analyze:
print(analyze)
else:
print(analyze)



def launchrecognition():
global file,link,predict_model
if (file is not None and file != "") or link is not None:
print("Recognition launched")
if file is not None and file != "":
print("File: ", file)
recog(file,predict_model.get())
if link is not None:
print("Link: ", link)
recog(link,predict_model.get())

else:
messagebox.showerror("Error", "You must choose a file or enter a link")

#select model in combobox with read only
predict_model = Combobox(root, state="readonly")
predict_model['values'] = ("low", "high")
predict_model.current(0)
predict_model.grid(row=1, column=2)




#on fait un boutton pour lancer le programme
launch_button = Button(root, text="Launch", command=launchrecognition)
launch_button.grid(row=1, column=3)



# Run the application
root.mainloop()
58 changes: 58 additions & 0 deletions train/getimage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import requests
from PIL.Image import Image
from bs4 import BeautifulSoup
import os
import uuid
import time
def download_google_images(query, num_images,where):
url = f"https://www.google.com/search?q={query}&source=lnms&hl=en&tbm=isch&asearch=ichunk&async=_id:rg_s,_pms:s,_fmt:pc&sourceid=chrome&ie=UTF-8"

# Définir l'en-tête de l'utilisateur pour éviter les blocages
headers ={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}


# Récupérer le contenu HTML de la page de recherche Google Images
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')

# Créer un dossier pour sauvegarder les images
if not os.path.exists(where):
os.makedirs(where)
print(soup)
#on save en local
try:
with open('test.html', 'w') as f:
f.write(str(soup))
except Exception as e:
print(e)
# Télécharger les images
image_tags = soup.find_all('a', class_='rg_l')
print(image_tags)
for i, image_tag in enumerate(image_tags[:num_images]):
image_url = image_tag['href']
#on enleve /imgres?imgurl=
image_url = image_url[15:]
#on cherche &amp; et on degage tout ce qui suit
image_url = image_url.split("&")[0]

print(image_url)
#get image extension
image_extension = image_url.split(".")[-1]
print(image_extension)

try:
image_data = requests.get(image_url).content
uudd = str(uuid.uuid4())
with open(f"{where}/image_{uudd}.{image_extension}", 'wb') as f:
f.write(image_data)
print(f"Image {uudd} téléchargée avec succès")
except Exception as e:
print(f"Impossible de télécharger l'image {i+1} : {e}")

# Exemple d'utilisation
search_query = input("Entrez votre recherche : ")
num_images_to_download = int(input("Combien d'images voulez-vous télécharger ? "))
where = input("Ou voulez vous les télécharger ? ")
download_google_images(search_query, num_images_to_download,where)


59 changes: 59 additions & 0 deletions web/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
<div>Teachable Machine Image Model</div>
<button type="button" onclick="init()">Start</button>
<div id="webcam-container"></div>
<div id="label-container"></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@teachablemachine/[email protected]/dist/teachablemachine-image.min.js"></script>
<script type="text/javascript">
// More API functions here:
// https://github.com/googlecreativelab/teachablemachine-community/tree/master/libraries/image

// the link to your model provided by Teachable Machine export panel
const URL = "https://github.com/Snipeur060/SpaceAI/tree/main/web/model/";

let model, webcam, labelContainer, maxPredictions;

// Load the image model and setup the webcam
async function init() {
const modelURL = URL + "model.json";
const metadataURL = URL + "metadata.json";

// load the model and metadata
// Refer to tmImage.loadFromFiles() in the API to support files from a file picker
// or files from your local hard drive
// Note: the pose library adds "tmImage" object to your window (window.tmImage)
model = await tmImage.load(modelURL, metadataURL);
maxPredictions = model.getTotalClasses();

// Convenience function to setup a webcam
const flip = true; // whether to flip the webcam
webcam = new tmImage.Webcam(200, 200, flip); // width, height, flip
await webcam.setup(); // request access to the webcam
await webcam.play();
window.requestAnimationFrame(loop);

// append elements to the DOM
document.getElementById("webcam-container").appendChild(webcam.canvas);
labelContainer = document.getElementById("label-container");
for (let i = 0; i < maxPredictions; i++) { // and class labels
labelContainer.appendChild(document.createElement("div"));
}
}

async function loop() {
webcam.update(); // update the webcam frame
await predict();
window.requestAnimationFrame(loop);
}

// run the webcam image through the image model
async function predict() {
// predict can take in an image, video or canvas html element
const prediction = await model.predict(webcam.canvas);
for (let i = 0; i < maxPredictions; i++) {
const classPrediction =
prediction[i].className + ": " + prediction[i].probability.toFixed(2);
labelContainer.childNodes[i].innerHTML = classPrediction;
}
}
</script>
1 change: 1 addition & 0 deletions web/model/d
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
d
1 change: 1 addition & 0 deletions web/model/metadata.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"tfjsVersion":"1.3.1","tmVersion":"2.4.7","packageVersion":"0.8.4-alpha2","packageName":"@teachablemachine/image","timeStamp":"2024-02-11T18:48:26.271Z","userMetadata":{},"modelName":"tm-my-image-model","labels":["Ariane","Falcon","Saturn","Soyouz","Space Shuttle"],"imageSize":224}
1 change: 1 addition & 0 deletions web/model/model.json

Large diffs are not rendered by default.

Binary file added web/model/weights.bin
Binary file not shown.

0 comments on commit 031646c

Please sign in to comment.