- 
                Notifications
    You must be signed in to change notification settings 
- Fork 1
Emotion reco #12
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: devel
Are you sure you want to change the base?
Emotion reco #12
Changes from 10 commits
794566e
              e6db95a
              e9613ae
              ddeb706
              7091c0e
              d0dd6a3
              5e5dc14
              ad963b0
              49cf8a1
              adfa6f1
              004f221
              618c5c9
              55df0ee
              01836f3
              18690a1
              20b8fed
              d83f480
              d039d92
              0b2d18a
              4586764
              930e466
              aa06ff1
              a53870a
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -1 +1,2 @@ | ||
| #Facial Emotion Sensing | ||
| # Facial Expression Recognition | ||
|  | 
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,73 @@ | ||
|  | ||
|  | ||
|  | ||
|  | ||
| from __future__ import absolute_import | ||
| from __future__ import division | ||
| from __future__ import print_function | ||
| import numpy as np | ||
| import tensorflow as tf | ||
|  | ||
| import argparse | ||
|  | ||
| import numpy as np | ||
|  | ||
|  | ||
| import cv2 | ||
|  | ||
| import tensorflow as tf # TF2 | ||
|  | ||
|  | ||
| def load_labels(filename): | ||
| with open(filename, 'r') as f: | ||
| return [line.strip() for line in f.readlines()] | ||
|  | ||
|  | ||
| if __name__ == '__main__': | ||
|  | ||
| interpreter = tf.lite.Interpreter(model_path="model.tflite") | ||
| interpreter.allocate_tensors() | ||
|  | ||
| input_details = interpreter.get_input_details() | ||
| output_details = interpreter.get_output_details() | ||
|  | ||
| # check the type of the input tensor | ||
| floating_model = input_details[0]['dtype'] == np.float32 | ||
|  | ||
|  | ||
| height = 48 | ||
| width = 48 | ||
| img = cv2.imread('1happy.jpg') | ||
| img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | ||
| print("image shape ", img.shape) | ||
| img = np.asarray(img) | ||
| img=cv2.resize(img,(48,48)) | ||
| img = np.expand_dims(img, axis=-1) | ||
| print("image shape after resize", img.shape) | ||
| img= img.astype('float32') | ||
| img /= 255.0 | ||
|  | ||
|  | ||
| input_data = np.expand_dims(img, axis=0) | ||
| print("input data shape", input_data.shape) | ||
|  | ||
| if floating_model: | ||
| input_data = (np.float32(input_data) - 127.5) / 127.5 | ||
|  | ||
| interpreter.set_tensor(input_details[0]['index'], input_data) | ||
|  | ||
| interpreter.invoke() | ||
|  | ||
| output_data = interpreter.get_tensor(output_details[0]['index']) | ||
| results = np.squeeze(output_data) | ||
| #print("result",results) | ||
| top_k = results.argsort()[-2:][::-1] | ||
| labels = load_labels("labels.txt") | ||
| print("top_k",top_k) | ||
|  | ||
|  | ||
| for i in top_k: | ||
| if floating_model: | ||
| print('{:08.6f}: {}'.format(float(results[i]), labels[i])) | ||
| else: | ||
| print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i])) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
labels.txtis not uploaded.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please add an
initfunction that is responsible for loading the model and the labels, and adetectfunction that receives a an image as anumpyarray. Take a look on facial_recognition for a reference.