// Import @tensorflow/tfjs or @tensorflow/tfjs-core const tf = require('@tensorflow/tfjs'); // Add the WAsm backend to the global backend registry. require('@tensorflow/tfjs-backend-wasm'); // Set the backend to WAsm and wait for the module to be ready. tf.setBackend('wasm').then(() => main());
setWasmPaths
import {setWasmPaths} from '@tensorflow/tfjs-backend-wasm'; setWasmPaths(yourCustomFolder); tf.setBackend('wasm').then(() => {...});
<!-- Import @tensorflow/tfjs or @tensorflow/tfjs-core --> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script> <!-- Adds the WAsm backend to the global backend registry --> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm/dist/tf-backend-wasm.js"></script> <script> tf.setBackend('wasm').then(() => main()); </script>
tf.setBackend(‘wasm’)
probability_model
from google.colab import auth auth.authenticate_user()
CLOUD_PROJECT = 'your-project-id-here' BUCKET = 'gs://' + CLOUD_PROJECT + '-tf2-models'
!gcloud config set project $CLOUD_PROJECT
!gsutil mb $BUCKET print(BUCKET)
model.save()
fashion-mnist
probability_model.save(BUCKET + '/fashion-mnist', save_format='tf')
MODEL = 'fashion_mnist' !gcloud ai-platform models create $MODEL --regions=us-central1
VERSION = 'v1' MODEL_DIR = BUCKET + '/fashion-mnist'
!gcloud ai-platform versions create $VERSION \ --model $MODEL \ --origin $MODEL_DIR \ --runtime-version=2.1 \ --framework='tensorflow' \ --python-version=3.7
import googleapiclient.discovery def predict_json(project, model, instances, version=None): service = googleapiclient.discovery.build('ml', 'v1') name = 'projects/{}/models/{}'.format(project, model) if version is not None: name += '/versions/{}'.format(version) response = service.projects().predict( name=name, body={'instances': instances} ).execute() if 'error' in response: raise RuntimeError(response['error']) return response['predictions']
test_predictions = predict_json(CLOUD_PROJECT, MODEL, test_images[:2].tolist())
softmax
np.argmax(test_predictions[0]['softmax'])
plt.figure() plt.imshow(test_images[0]) plt.colorbar() plt.grid(False) plt.show()
feature_columns
thal
hd-prediction
model.save(BUCKET + '/hd-prediction', save_format='tf')
hd_prediction
v1
BUCKET + '/hd-prediction'
# First remove the label column test = test.pop('target') caip_instances = [] test_vals = test[:2].values for i in test_vals: example_dict = {k: [v] for k,v in zip(test.columns, i)} caip_instances.append(example_dict)
caip_instances
[{'age': [60], 'ca': [2], 'chol': [293], 'cp': [4], 'exang': [0], 'fbs': [0], 'oldpeak': [1.2], 'restecg': [2], 'sex': [1], 'slope': [2], 'thal': ['reversible'], 'thalach': [170], 'trestbps': [140]}, ...]
predict_json
test_predictions = predict_json(CLOUD_PROJECT, 'hd_prediction', caip_instances)
[{'output_1': [-1.4717596769332886]}, {'output_1': [-0.2714746594429016]}]
output_1
name
layers.Dense(1, name='prediction_probability')
import * as facemesh from '@tensorflow-models/facemesh;
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/facemesh"></script>
// Load the MediaPipe facemesh model assets. const model = await facemesh.load(); // Pass in a video stream to the model to obtain // an array of detected faces from the MediaPipe graph. const video = document.querySelector("video"); const faces = await model.estimateFaces(video); // Each face object contains a `scaledMesh` property, // which is an array of 468 landmarks. faces.forEach(face => console.log(face.scaledMesh));
estimateFaces
{ faceInViewConfidence: 1, boundingBox: { topLeft: [232.28, 145.26], // [x, y] bottomRight: [449.75, 308.36], }, mesh: [ [92.07, 119.49, -17.54], // [x, y, z] [91.97, 102.52, -30.54], ... ], scaledMesh: [ [322.32, 297.58, -17.54], [322.18, 263.95, -30.54] ], annotations: { silhouette: [ [326.19, 124.72, -3.82], [351.06, 126.30, -3.00], ... ], ... } }
import * as handtrack from '@tensorflow-models/handpose;
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/handpose"></script>
// Load the MediaPipe handpose model assets. const model = await handpose.load(); // Pass in a video stream to the model to obtain // a prediction from the MediaPipe graph. const video = document.querySelector("video"); const hands = await model.estimateHands(video); // Each hand object contains a `landmarks` property, // which is an array of 21 3-D landmarks. hands.forEach(hand => console.log(hand.landmarks));
facemesh
{ handInViewConfidence: 1, boundingBox: { topLeft: [162.91, -17.42], // [x, y] bottomRight: [548.56, 368.23], }, landmarks: [ [472.52, 298.59, 0.00], // [x, y, z] [412.80, 315.64, -6.18], ... ], annotations: { indexFinger: [ [412.80, 315.64, -6.18], [350.02, 298.38, -7.14], ... ], ... } }