• Jump To … +
    ./demo/canvas-001.js ./demo/canvas-002.js ./demo/canvas-003.js ./demo/canvas-004.js ./demo/canvas-005.js ./demo/canvas-006.js ./demo/canvas-007.js ./demo/canvas-008.js ./demo/canvas-009.js ./demo/canvas-010.js ./demo/canvas-011.js ./demo/canvas-012.js ./demo/canvas-013.js ./demo/canvas-014.js ./demo/canvas-015.js ./demo/canvas-016.js ./demo/canvas-017.js ./demo/canvas-018.js ./demo/canvas-019.js ./demo/canvas-020.js ./demo/canvas-021.js ./demo/canvas-022.js ./demo/canvas-023.js ./demo/canvas-024.js ./demo/canvas-025.js ./demo/canvas-026.js ./demo/canvas-027.js ./demo/canvas-028.js ./demo/canvas-029.js ./demo/canvas-030.js ./demo/canvas-031.js ./demo/canvas-032.js ./demo/canvas-033.js ./demo/canvas-034.js ./demo/canvas-035.js ./demo/canvas-036.js ./demo/canvas-037.js ./demo/canvas-038.js ./demo/canvas-039.js ./demo/canvas-040.js ./demo/canvas-041.js ./demo/canvas-042.js ./demo/canvas-043.js ./demo/canvas-044.js ./demo/canvas-045.js ./demo/canvas-046.js ./demo/canvas-047.js ./demo/canvas-048.js ./demo/canvas-049.js ./demo/canvas-050.js ./demo/canvas-051.js ./demo/canvas-052.js ./demo/canvas-053.js ./demo/canvas-054.js ./demo/canvas-055.js ./demo/canvas-056.js ./demo/canvas-057.js ./demo/canvas-058.js ./demo/canvas-059.js ./demo/canvas-060.js ./demo/canvas-061.js ./demo/canvas-062.js ./demo/canvas-063.js ./demo/canvas-064.js ./demo/canvas-065.js ./demo/canvas-066.js ./demo/canvas-067.js ./demo/canvas-068.js ./demo/canvas-069.js ./demo/canvas-070.js ./demo/canvas-071.js ./demo/canvas-072.js ./demo/canvas-073.js ./demo/canvas-201.js ./demo/canvas-202.js ./demo/canvas-203.js ./demo/canvas-204.js ./demo/canvas-205.js ./demo/canvas-206.js ./demo/canvas-207.js ./demo/canvas-208.js ./demo/canvas-209.js ./demo/canvas-210.js ./demo/canvas-211.js ./demo/canvas-212.js ./demo/delaunator-001.js ./demo/delaunator-002.js ./demo/dom-001.js ./demo/dom-002.js ./demo/dom-003.js ./demo/dom-004.js ./demo/dom-005.js ./demo/dom-006.js ./demo/dom-007.js ./demo/dom-008.js ./demo/dom-009.js ./demo/dom-010.js ./demo/dom-011.js ./demo/dom-012.js ./demo/dom-013.js ./demo/dom-015.js ./demo/dom-016.js ./demo/dom-017.js ./demo/dom-018.js ./demo/dom-019.js ./demo/dom-020.js ./demo/dom-021.js ./demo/filters-001.js ./demo/filters-002.js ./demo/filters-003.js ./demo/filters-004.js ./demo/filters-005.js ./demo/filters-006.js ./demo/filters-007.js ./demo/filters-008.js ./demo/filters-009.js ./demo/filters-010.js ./demo/filters-011.js ./demo/filters-012.js ./demo/filters-013.js ./demo/filters-014.js ./demo/filters-015.js ./demo/filters-016.js ./demo/filters-017.js ./demo/filters-018.js ./demo/filters-019.js ./demo/filters-020.js ./demo/filters-021.js ./demo/filters-022.js ./demo/filters-023.js ./demo/filters-024.js ./demo/filters-025.js ./demo/filters-026.js ./demo/filters-027.js ./demo/filters-028.js ./demo/filters-029.js ./demo/filters-030.js ./demo/filters-031.js ./demo/filters-032.js ./demo/filters-033.js ./demo/filters-034.js ./demo/filters-035.js ./demo/filters-036.js ./demo/filters-037.js ./demo/filters-038.js ./demo/filters-039.js ./demo/filters-040.js ./demo/filters-041.js ./demo/filters-042.js ./demo/filters-101.js ./demo/filters-102.js ./demo/filters-103.js ./demo/filters-104.js ./demo/filters-105.js ./demo/filters-501.js ./demo/filters-502.js ./demo/filters-503.js ./demo/filters-504.js ./demo/filters-505.js ./demo/mediapipe-001.js ./demo/mediapipe-002.js ./demo/mediapipe-003.js ./demo/modules-001.js ./demo/modules-002.js ./demo/modules-003.js ./demo/modules-004.js ./demo/modules-005.js ./demo/modules-006.js ./demo/packets-001.js ./demo/packets-002.js ./demo/particles-001.js ./demo/particles-002.js ./demo/particles-003.js ./demo/particles-004.js ./demo/particles-005.js ./demo/particles-006.js ./demo/particles-007.js ./demo/particles-008.js ./demo/particles-009.js ./demo/particles-010.js ./demo/particles-011.js ./demo/particles-012.js ./demo/particles-013.js ./demo/particles-014.js ./demo/particles-015.js ./demo/particles-016.js ./demo/particles-017.js ./demo/snippets-001.js ./demo/snippets-002.js ./demo/snippets-003.js ./demo/snippets-004.js ./demo/snippets-005.js ./demo/snippets-006.js ./demo/temp-001.js ./demo/temp-shape-scale-investigation.js ./demo/tensorflow-001.js ./demo/tensorflow-002.js ./demo/utilities.js
  • §

    Demo Mediapipe 002

    MediaPipe Face Landmarker - model image output

  • §

    Run code

    import * as MediaPipe from './js/mediapipe/tasks-vision/vision-bundle.js';
    import * as scrawl from '../source/scrawl.js';
    
    import { reportSpeed } from './utilities.js';
  • §

    Scene setup

    const canvas = scrawl.findCanvas('mycanvas');
  • §

    Namespacing boilerplate

    const namespace = canvas.name;
    const name = (n) => `${namespace}-${n}`;
  • §

    Importing a device-based media stream

    For this Demo we:

    • Create a hidden Cell (camera-input-cell) which will hold the raw data from the media stream video

    • Create a media stream video asset

    • Display the media stream asset in a Picture entity in our hidden Cell

    • Display the hidden Cell in the base Cell using a second Picture entity (the background)

    • Display an overlay of label coordinates detected by the face landmarker

    • Note 1: Users will need to explicitly agree to let Scrawl-canvas use the media stream the first time the page loads (the browser should handle this agreement procedure itself)

    • Note 2: importMediaStream returns a Promise!

    const videoFeedCell = canvas.buildCell({
    
        name: name('camera-input-cell'),
        dimensions: [768, 768],
  • §

    We pipe the media stream displayed in this cell:

    • Through the MediaPipe ML model code, to remove background
    • Into the base cell to display the filtered background

    Because MediaPipe needs time to process each frame, this means:

    • There’s a chance of the face and background falling out of sync
    • So we only update the cell after MediaPipe completes its processing work
    • Thus keeping both background and face in sync
        cleared: false,
        compiled: false,
        shown: false,
    });
  • §

    We use another Cell to feed data into MediaPipe

    • Required dimensions: 128 x 128
    const modelInputCell = canvas.buildCell({
    
        name: name('model-input-cell'),
        dimensions: [128, 128],
        shown: false,
    });
  • §

    Entitys

  • §

    Media stream picture entity

    • Goes into the hidden video feed Cell
    • Initialized without an asset, and given some default dimensions - these will be updated when the media stream completes initialization
    const inputPicture = scrawl.makePicture({
    
        name: name('camera-input-picture'),
        group: videoFeedCell,
    
        dimensions: ['100%', '100%'],
        copyDimensions: ['100%', '100%'],
  • §

    To get a mirror effect

        start: ['center', 'center'],
        handle: ['center', 'center'],
        flipReverse: true,
    });
  • §

    The model input Cell also needs a Picture entity, to feed into the model

    • The model requires image data with set dimensions (128 x 128)
    scrawl.makePicture({
    
        name: name('model-input-picture'),
        group: modelInputCell,
    
        asset: videoFeedCell,
    
        dimensions: ['100%', '100%'],
        copyDimensions: ['100%', '100%'],
    });
  • §

    Base Cell background image

    • We apply filters to the background image, and stamp it onto the base cell last (with appropriate GCO)
    scrawl.makePicture({
    
        name: name('background-picture'),
        asset: videoFeedCell,
    
        dimensions: ['100%', '100%'],
        copyDimensions: ['100%', '100%'],
    
        globalAlpha: 0.4,
    });
  • §

    Create some Label entitys to display landmark points

    const labels = [];
    
    for (let i = 0; i < 478; i++) {
    
        labels.push(scrawl.makeLabel({
    
            name: name(`p${i}`),
            fontString: '8px monospace',
            text: `${i}`,
            handle: ['center', 'center'],
            scale: 1,
            visibility: false,
        }));
    }
  • §

    Google MediaPipe ML model code

    let faceLandmarker;
    
    const startModel = async () => {
    
        const path = 'js/mediapipe/tasks-vision/'
        const vision = await MediaPipe.FilesetResolver.forVisionTasks();
    
        vision.wasmBinaryPath = `${path}wasm${vision.wasmBinaryPath}`;
        vision.wasmLoaderPath = `${path}wasm${vision.wasmLoaderPath}`;
    
        faceLandmarker = await MediaPipe.FaceLandmarker.createFromOptions(vision, {
    
            baseOptions: {
                modelAssetPath: `${path}model/face_landmarker.task`,
            },
    
            runningMode: 'VIDEO',
        });
    };
  • §

    We can start the model code running straight away

    • It’s the camera for which we need user permission
    startModel();
  • §

    This function gets consumed by the model’s faceLandmarker object

    • faceLandmarker doesn’t start its work until it has something to segment
    const processModelData = (results) => {
    
        if (results && results.faceLandmarks && results.faceLandmarks.length) {
    
            const data = results.faceLandmarks[0];
    
            if (data && data.length) {
    
                let point, label;
    
                for (let i = 0, iz = labels.length; i < iz; i++) {
    
                    point = data[i];
                    label = labels[i];
    
                    label.set({
                        startX: `${point.x * 100}%`,
                        startY: `${point.y * 100}%`,
                        scale: 1 - (point.z * 2),
                        visibility: true,
                    });
                }
            }
        }
    
        else {
    
            for (let i = 0, iz = labels.length; i < iz; i++) {
    
                labels[i].set({ visibility: false });
            }
        }
        videoFeedCell.clear();
        videoFeedCell.compile();
    };
  • §

    Media stream capture

    scrawl.importMediaStream({
    
        name: name('video-feed'),
        audio: false,
        video: {
            width: { ideal: 768 },
            height: { ideal: 768 },
            facingMode: 'user',
        },
    })
    .then(streamAsset => {
  • §

    The asset creates a non-DOM video element, which loads metadata asynchronously

        scrawl.addNativeListener('loadedmetadata', () => {
  • §

    We need to account for the case when the browser doesn’t return the desired dimensions

    • The handle to the non-DOM video element is stored in the asset.source attribute
            const width = streamAsset.source.videoWidth,
                height = streamAsset.source.videoHeight,
                minimumDimension = Math.min(width, height),
                scale = 768 / minimumDimension;
  • §

    Use the asset’s actual dimensions, and scale to prevent distortions

            inputPicture.set({
                dimensions: [width, height],
                scale,
                asset: streamAsset,
            });
  • §

    We need to feed input data into the model discretely, via an SC animation object

            scrawl.makeAnimation({
    
                name: name('model-segmenter'),
                order: 0,
                fn: () => {
    
                    if (faceLandmarker && faceLandmarker.detectForVideo) {
    
                        const results = faceLandmarker.detectForVideo(modelInputCell.element, performance.now());
    
                        if (results) processModelData(results);
                    }
                }
            });
    
        }, streamAsset.source);
    })
    .catch(err => console.log(err.message));
  • §

    Scene animation

    Function to display frames-per-second data, and other information relevant to the demo

    const report = reportSpeed('#reportmessage');
  • §

    Create the Display cycle animation

    scrawl.makeRender({
    
      name: name('render'),
      target: canvas,
      afterShow: report,
    });
  • §

    Development and testing

    console.log('scrawl.library', scrawl.library);
    console.log('MediaPipe', MediaPipe);