Face API for JavaScript

0.22.2 · active · verified Wed Apr 22

face-api.js is a robust JavaScript API designed for performing real-time face detection, face recognition, face landmark detection, face expression recognition, and age and gender estimation. It is built on top of the TensorFlow.js core library, enabling these advanced computer vision capabilities directly within web browsers and Node.js environments. The current stable version is 0.22.2. While a strict release cadence isn't explicitly stated, the project appears actively maintained with regular updates and tutorials, often aligning with TensorFlow.js advancements. Its key differentiator is providing a high-level, easy-to-use API over complex TensorFlow.js operations, simplifying the integration of sophisticated facial analysis features into JavaScript applications without requiring deep machine learning expertise. It also provides pre-trained models for various tasks, abstracting away the complexities of model management.

Common errors

Warnings

Install

Imports

Quickstart

This quickstart initializes all necessary face-api.js models, starts a webcam stream, and continuously performs real-time face detection, landmark identification, expression recognition, and age/gender estimation, drawing the results onto an HTML canvas overlaid on the video feed.

import * as faceapi from 'face-api.js';

const video = document.getElementById('video') as HTMLVideoElement;
const canvas = document.getElementById('overlay') as HTMLCanvasElement;

async function initializeFaceApi() {
  // Ensure models are served from a public path (e.g., /models folder)
  await Promise.all([
    faceapi.nets.tinyFaceDetector.load('/models'),
    faceapi.nets.faceLandmark68Net.load('/models'),
    faceapi.nets.faceRecognitionNet.load('/models'),
    faceapi.nets.faceExpressionNet.load('/models'),
    faceapi.nets.ageGenderNet.load('/models')
  ]);
  console.log('All face-api.js models loaded successfully.');
  startWebcamStream();
}

async function startWebcamStream() {
  try {
    const stream = await navigator.mediaDevices.getUserMedia({ video: true });
    video.srcObject = stream;
    video.onloadedmetadata = () => {
      // Set canvas dimensions to match video
      const displaySize = { width: video.width, height: video.height };
      faceapi.matchDimensions(canvas, displaySize);
      
      setInterval(async () => {
        const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
          .withFaceLandmarks()
          .withFaceExpressions()
          .withAgeAndGender()
          .withFaceDescriptors();

        const resizedDetections = faceapi.resizeResults(detections, displaySize);
        
        canvas.getContext('2d')?.clearRect(0, 0, canvas.width, canvas.height);

        faceapi.draw.drawDetections(canvas, resizedDetections);
        faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
        faceapi.draw.drawFaceExpressions(canvas, resizedDetections);

        resizedDetections.forEach(detection => {
          const { age, gender, genderProbability } = detection;
          new faceapi.draw.DrawTextField(
            [
              `${faceapi.utils.round(age, 0)} years`,
              `${gender} (${faceapi.utils.round(genderProbability)})`
            ],
            detection.detection.box.bottomLeft
          ).draw(canvas);
        });

      }, 100); // Run detection every 100ms
    };
  } catch (err) {
    console.error("Error accessing webcam or initializing Face API:", err);
  }
}

document.addEventListener('DOMContentLoaded', initializeFaceApi);

view raw JSON →