Hi !
In my previous post I wrote
- How to create a custom dataset with images to be used on a Azure Machine Learning Designer project.
- How to use the custom data set and how to train an image classification model.
- How to publish the model to be used as a WebService / HTTP REST endpoint.
- How to consume the AzureML Webservice Endpoint from a python app using local files.
Today’s code sample is a twist on the previous sample, where we will use a WebCam to send frames to be analyzed by the AzureML Endpoint. And we will show the results online.
Just look at the code.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Copyright (c) 2022 | |
# Author : Bruno Capuano | |
# Create Time : 2022 January | |
# Change Log : | |
# – Open a camera feed from a local webcam and analyze each frame using an AzureML Endpoint | |
# – The camera is a USBCam defined by the index 0 | |
# – Pres [D] to enable / disable the frame analysis process | |
# | |
# The MIT License (MIT) | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in | |
# all copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
# THE SOFTWARE. | |
import os | |
import cv2 | |
import ssl | |
import json | |
import base64 | |
import urllib.request | |
import time | |
def displayPredictions(jsonPrediction, frame): | |
global frame_Width, frame_Heigth, labelColors | |
jsonObj = json.loads(jsonPrediction) | |
scored_label = jsonObj['Results']['WebServiceOutput0'][0]['Scored Labels'] | |
scored_prob_space_wolf = jsonObj['Results']['WebServiceOutput0'][0]['Scored Probabilities_space_wolf'] | |
scored_prob_squirrel = jsonObj['Results']['WebServiceOutput0'][0]['Scored Probabilities_squirrel'] | |
print(f" > scored label: {scored_label} – squirrel: {scored_prob_squirrel} – space_wolf: {scored_prob_space_wolf}") | |
color = (255,255,255) | |
# display labels | |
start_point_label = (10, 40) | |
text = str(f"Scored Label: {scored_label}") | |
cv2.putText(frame, text, start_point_label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) | |
start_point_label = (10, 60) | |
text = "Squirrel: {:.4f}".format(scored_prob_squirrel) | |
cv2.putText(frame, text, start_point_label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) | |
start_point_label = (10, 80) | |
text = "Space Wolf: {:.4f}".format(scored_prob_space_wolf) | |
cv2.putText(frame, text, start_point_label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) | |
return frame | |
def allowSelfSignedHttps(allowed): | |
# bypass the server certificate verification on client side | |
if allowed and not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None): | |
ssl._create_default_https_context = ssl._create_unverified_context | |
def processFile(img_filepath): | |
# Request data goes here | |
data = { | |
"Inputs": { | |
"WebServiceInput0": | |
[ | |
{ | |
'image': "data:image/png;base64,", | |
'id': "0", | |
'category': "space_wolf", | |
}, | |
], | |
}, | |
"GlobalParameters": { | |
} | |
} | |
# Create base64 encoded string | |
with open(img_filepath, "rb") as f: | |
image_string = base64.b64encode(f.read()).decode("utf-8") | |
image_data = str(f"""data:image/png;base64,{image_string}""") | |
data['Inputs']['WebServiceInput0'][0]['image'] = image_data | |
body = str.encode(json.dumps(data)) | |
url = '< AZURE ML HTTP EndPoint Uri >' | |
api_key = '< API KEY >' # Replace this with the API key for the web service | |
headers = {'Content-Type':'application/json', 'Authorization'😦'Bearer '+ api_key)} | |
req = urllib.request.Request(url, body, headers) | |
json_result = {} | |
try: | |
response = urllib.request.urlopen(req) | |
result = response.read() | |
data = json.loads(result) | |
json_result = json.dumps(data) | |
except urllib.error.HTTPError as error: | |
print("The request failed with status code: " + str(error.code)) | |
print(error.info()) | |
print(json.loads(error.read().decode("utf8", 'ignore'))) | |
return json_result | |
def main(): | |
# instantiate flask app and push a context | |
app = Flask(__name__) | |
# Camera Settings | |
frame_Width = 640 # 1024 # 1280 # 640 | |
frame_Heigth = 480 # 780 # 960 # 480 | |
frame_Size = (frame_Width, frame_Heigth) | |
video_capture = cv2.VideoCapture(0) | |
time.sleep(1.0) | |
# working dirs | |
i = 0 | |
directoryDet = '.\det' | |
detectionEnabled = False | |
while True: | |
start_time = time.time() | |
ret, frameOrig = video_capture.read() | |
frame = cv2.resize(frameOrig, frame_Size) | |
if (detectionEnabled): | |
i = i + 1 | |
imgNumber = str(i).zfill(5) | |
imgName = 'frame_' + str(imgNumber) + '.jpg' | |
imgNameOri = imgName + "_01_Orig.jpg" | |
imgNameOriFullPath = os.path.join(directoryDet, imgNameOri) | |
imgNameDet = imgName + "_03_Detected.jpg" | |
imgNameDetFullPath = os.path.join(directoryDet, imgNameDet) | |
cv2.imwrite(imgNameOriFullPath, frame) | |
# analyze file using AzureML Endpoint | |
jsonStr = processFile(imgNameOriFullPath) | |
frame = displayPredictions(jsonStr, frame) | |
cv2.imwrite(imgNameDetFullPath, frame) | |
if (time.time() – start_time ) > 0: | |
fpsInfo = "FPS: " + str(1.0 / (time.time() – start_time)) # FPS = 1 / time to process loop | |
font = cv2.FONT_HERSHEY_DUPLEX | |
cv2.putText(frame, fpsInfo, (10, 20), font, 0.4, (255, 255, 255), 1) | |
cv2.imshow('@elbruno – Camera FPS', frame) | |
if (detectionEnabled): | |
time.sleep(2.0) | |
# key controller | |
key = cv2.waitKey(1) & 0xFF | |
if key == ord("d"): | |
if (detectionEnabled == True): | |
detectionEnabled = False | |
else: | |
detectionEnabled = True | |
if key == ord("q"): | |
break | |
video_capture.release() | |
cv2.destroyAllWindows() | |
if __name__ == '__main__': | |
main() |
Happy coding!
Greetings
El Bruno
More posts in my blog ElBruno.com.
More info in https://beacons.ai/elbruno
¿Con ganas de ponerte al día?
En Lemoncode te ofrecemos formación online impartida por profesionales que se baten el cobre en consultoría:
- Si tienes ganas de ponerte al día con Front End (ES6, Typescript, React, Angular, Vuejs…) te recomendamos nuestros Máster Front End: https://lemoncode.net/master-frontend#inicio-banner
- Si te quieres poner al día en Backend (stacks .net y nodejs), te aconsejamos nuestro Bootcamp Backend: https://lemoncode.net/bootcamp-backend#bootcamp-backend/banner
- Y si tienes ganas de meterte con Docker, Kubernetes, CI/CD…, tenemos nuestro Bootcamp Devops: https://lemoncode.net/bootcamp-devops#bootcamp-devops/inicio