@nuralogix.ai/tf-face-tracker-worker-ts
v1.0.2
Published
MediaPipe Tasks-vision Face Landmarker - Module Worker
Downloads
19
Readme
MediaPipe Tasks-vision Face Landmarker - Module worker
This packages uses @mediapipe/tasks-vision Face Landmarker inside a module web worker and maps Mediapipe facial landmarks to DeepAffex Facial Landmarks
How to use MediaPipe Tasks-vision Face Landmarker Worker
via script tags in HTML page
<!DOCTYPE html>
<html lang="en">
<head>
<!-- your meta tags -->
</head>
<body>
<div id="tracker"></div>
<script type="module">
import { FaceTracker, enums, utils } from 'https://unpkg.com/@nuralogix.ai/tf-face-tracker-worker-ts';
const { CameraController, AssetDownloader } = utils;
const assetDownloader = AssetDownloader.init();
const onBytesDownloaded = e => {
const { bytes, url, done } = e.detail;
console.log(bytes, 'bytes downloaded from', url, done);
};
const onDownloadedError = e => {
const { error, url } = e.detail;
console.log('Downloaded error', error, url);
};
assetDownloader.addEventListener('downloadedError', onDownloadedError);
assetDownloader.addEventListener('bytesDownloaded', onBytesDownloaded);
// path to your assets
const url = 'https://unpkg.com/@nuralogix.ai/tf-face-tracker-worker-ts/lib/taskVision/'
const isSimdSupported = await assetDownloader.isSimdSupported();
const wasmPath = `${url}wasm`;
const modelPath = `${url}model/face_landmarker.task`;
const wasmName = `vision_wasm${isSimdSupported ? '' : '_nosimd'}_internal`;
const [wasmLoaderFile, wasmFile, modelFile] = await Promise.all([
assetDownloader.fetchAsset(`${wasmPath}/${wasmName}_js.json`, true),
assetDownloader.fetchAsset(`${wasmPath}/${wasmName}_wasm.json`, true),
assetDownloader.fetchAsset(modelPath, false),
]);
const camera = CameraController.init();
const onSelectedDeviceChanged = (e) => {
console.log(e.detail.deviceId);
};
const onCameraStatusChanged = (e) => {
console.log(e.detail.isOpen);
};
camera.addEventListener('selectedDeviceChanged', onSelectedDeviceChanged);
camera.addEventListener('cameraStatus', onCameraStatusChanged);
await camera.list();
const mediaElement = document.getElementById('tracker');
const onMediaElementSizeChanged = e => {
console.log('mediaElementSize Changed',e.detail);
}
mediaElement.addEventListener('mediaElementSizeChanged', onMediaElementSizeChanged);
const broadcastChannelName = 'test';
const broadcastChannel = new BroadcastChannel(broadcastChannelName);
broadcastChannel.onmessage = e => {
const { action, payload } = e.data;
// console.log(payload);
}
const options = {
faceTrackerType: enums.FaceTrackerType.MEDIAPIPE,
broadcastChannelName,
numOfWorkers: 2,
mediapipe: {
wasmLoaderFile,
wasmFile,
modelFile,
delegate: 'CPU',
},
mediaElement,
mirrorVideo: false,
settings: {
displayMediaStream: true,
faceTrackerHeight: 360,
faceTrackerWidth: 640,
objectFit: enums.ObjectFitType.COVER,
isMaskVisible: true,
},
};
camera.start(1280, 720);
const tracker = await FaceTracker.init(options);
tracker.setMediaStream(camera.cameraStream);
</script>
</body>
</html>