Skip to content

Commit

Permalink
Merge pull request #30 from robmarkcole/rename-file
Browse files Browse the repository at this point in the history
rename var file to image
  • Loading branch information
robmarkcole authored Jul 24, 2020
2 parents e55b3aa + 43a69ba commit c9971e5
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 53 deletions.
37 changes: 14 additions & 23 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,50 +20,41 @@ If you want to create custom models, there is the easy way, and the longer but m
## Usage
Start the tflite-server on port 5000 :
```
(venv) $ uvicorn tflite-server:app --reload --port 5000
(venv) $ uvicorn tflite-server:app --reload --port 5000 --host 0.0.0.0
```

You can check that the tflite-server is running by visiting `http://ip:5000/` from any machine, where `ip` is the ip address of the host (`localhost` if querying from the same machine). The docs can be viewed at `http://localhost:5000/docs`

Post an image to detecting objects via cURL:
```
curl -X POST "http://localhost:5000/v1/vision/detection" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@tests/people_car.jpg;type=image/jpeg"
curl -X POST "http://localhost:5000/v1/vision/detection" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "image=@tests/people_car.jpg;type=image/jpeg"
```
Which should return:
```
{
"predictions": [
{
"confidence": 0.93359375,
"label": "car",
"x_max": 619,
"x_min": 302,
"y_max": 348,
"confidence": 0.93359375,
"label": "car",
"x_max": 619,
"x_min": 302,
"y_max": 348,
"y_min": 120
},
},
{
"confidence": 0.7890625,
"label": "person",
"x_max": 363,
"x_min": 275,
"y_max": 323,
"confidence": 0.7890625,
"label": "person",
"x_max": 363,
"x_min": 275,
"y_max": 323,
"y_min": 126
},
.
.
.
'success': True}
```

To detect faces:
```
curl -X POST "http://localhost:5000/v1/vision/face" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@tests/faces.jpg;type=image/jpeg"
```

To detect the scene (dogs vs cats model):
```
curl -X POST "http://localhost:5000/v1/vision/scene" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@tests/cat.jpg;type=image/jpeg"
```
An example request using the python requests package is in `tests/live-test.py`

## Add tflite-server as a service
You can run tflite-server as a [service](https://www.raspberrypi.org/documentation/linux/usage/systemd.md), which means tflite-server will automatically start on RPi boot, and can be easily started & stopped. Create the service file in the appropriate location on the RPi using: ```sudo nano /etc/systemd/system/tflite-server.service```
Expand Down
37 changes: 7 additions & 30 deletions tflite-server.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
Expose tflite models via a rest API.
"""
import io
import logging
import sys

import numpy as np
Expand All @@ -14,14 +13,6 @@

app = FastAPI()

LOGFORMAT = "%(asctime)s %(levelname)s %(name)s : %(message)s"
logging.basicConfig(
# filename="tflite-server.log", # select filename or stream
stream=sys.stdout,
level=logging.DEBUG,
format=LOGFORMAT,
)

MIN_CONFIDENCE = 0.1 # The absolute lowest confidence for a detection.

FACE_DETECTION_URL = "/v1/vision/face"
Expand Down Expand Up @@ -72,14 +63,10 @@ async def info():


@app.post(FACE_DETECTION_URL)
async def predict_face(file: UploadFile = File(...)):
async def predict_face(image: UploadFile = File(...)):
data = {"success": False}
if file.content_type.startswith("image/") is False:
raise HTTPException(
status_code=400, detail=f"File '{file.filename}' is not an image."
)
try:
contents = await file.read()
contents = await image.read()
image = Image.open(io.BytesIO(contents)) # A PIL image
image_width = image.size[0]
image_height = image.size[1]
Expand All @@ -92,9 +79,7 @@ async def predict_face(file: UploadFile = File(...)):
# Process image and get predictions
face_interpreter.invoke()
boxes = face_interpreter.get_tensor(face_output_details[0]["index"])[0]
classes = face_interpreter.get_tensor(face_output_details[1]["index"])[
0
]
classes = face_interpreter.get_tensor(face_output_details[1]["index"])[0]
scores = face_interpreter.get_tensor(face_output_details[2]["index"])[0]

faces = []
Expand All @@ -121,14 +106,10 @@ async def predict_face(file: UploadFile = File(...)):


@app.post(OBJ_DETECTION_URL)
async def predict_object(file: UploadFile = File(...)):
async def predict_object(image: UploadFile = File(...)):
data = {"success": False}
if file.content_type.startswith("image/") is False:
raise HTTPException(
status_code=400, detail=f"File '{file.filename}' is not an image."
)
try:
contents = await file.read()
contents = await image.read()
image = Image.open(io.BytesIO(contents)) # A PIL image
image_width = image.size[0]
image_height = image.size[1]
Expand Down Expand Up @@ -167,14 +148,10 @@ async def predict_object(file: UploadFile = File(...)):


@app.post(SCENE_URL)
async def predict_scene(file: UploadFile = File(...)):
async def predict_scene(image: UploadFile = File(...)):
data = {"success": False}
if file.content_type.startswith("image/") is False:
raise HTTPException(
status_code=400, detail=f"File '{file.filename}' is not an image."
)
try:
contents = await file.read()
contents = await image.read()
image = Image.open(io.BytesIO(contents)) # A PIL image
# Format data and send to interpreter
resized_image = image.resize(
Expand Down

0 comments on commit c9971e5

Please sign in to comment.