|
|
@@ -1,5 +1,6 @@ |
|
|
|
import { Component, ElementRef, OnInit, ViewChild } from '@angular/core'; |
|
|
|
import { detectSingleFace, loadFaceExpressionModel, loadFaceLandmarkModel, loadFaceLandmarkTinyModel, loadFaceRecognitionModel, loadSsdMobilenetv1Model, loadTinyFaceDetectorModel, TinyFaceDetectorOptions } from 'face-api.js'; |
|
|
|
import { CameraPreview, CameraPreviewPictureOptions, CameraPreviewOptions, CameraPreviewDimensions } from '@ionic-native/camera-preview/ngx'; |
|
|
|
import { Location } from '@angular/common'; |
|
|
|
import { ModalController } from '@ionic/angular'; |
|
|
|
|
|
|
@@ -12,6 +13,7 @@ export class ArFanCamPage implements OnInit { |
|
|
|
@ViewChild('videoElement') videoElement: ElementRef<HTMLVideoElement>; |
|
|
|
@ViewChild('glassesElement') glassesElement: ElementRef<HTMLImageElement>; |
|
|
|
@ViewChild('canvasElement') canvasElement: ElementRef<HTMLCanvasElement>; |
|
|
|
@ViewChild('sourceImageElement') sourceImageElement: ElementRef<HTMLImageElement>; |
|
|
|
@ViewChild('sourceCanvasElement') sourceCanvasElement: ElementRef<HTMLCanvasElement>; |
|
|
|
|
|
|
|
mediaStream: MediaStream|null = null; |
|
|
@@ -34,8 +36,20 @@ export class ArFanCamPage implements OnInit { |
|
|
|
|
|
|
|
constructor( |
|
|
|
private location: Location, |
|
|
|
private modalController: ModalController |
|
|
|
) { } |
|
|
|
private modalController: ModalController, |
|
|
|
private cameraPreview: CameraPreview |
|
|
|
) { |
|
|
|
this.sourceImageElement.nativeElement.onload = () => { |
|
|
|
const context = this.sourceCanvasElement.nativeElement.getContext('2d'); |
|
|
|
context.drawImage( |
|
|
|
this.sourceImageElement.nativeElement, |
|
|
|
0, |
|
|
|
0, |
|
|
|
this.sourceCanvasElement.nativeElement.width, |
|
|
|
this.sourceCanvasElement.nativeElement.height |
|
|
|
); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
loadNeuralNetModels = async () => { |
|
|
|
await loadTinyFaceDetectorModel('/assets/weights'); |
|
|
@@ -56,19 +70,38 @@ export class ArFanCamPage implements OnInit { |
|
|
|
} |
|
|
|
|
|
|
|
getCameraStream = async () => { |
|
|
|
const stream = await window.navigator.mediaDevices.getUserMedia({ |
|
|
|
video: { |
|
|
|
facingMode: 'user', |
|
|
|
width: this.width, |
|
|
|
height: this.height |
|
|
|
}, |
|
|
|
}).then((stream) => { |
|
|
|
this.mediaStream = stream; |
|
|
|
this.videoElement.nativeElement.srcObject = stream; |
|
|
|
}).catch(err => alert(JSON.stringify(err))); |
|
|
|
if (this.cameraPreview) { |
|
|
|
const cameraPreviewOpts: CameraPreviewOptions = { |
|
|
|
x: 0, |
|
|
|
y: 0, |
|
|
|
width: window.screen.width, |
|
|
|
height: window.screen.height, |
|
|
|
camera: 'rear', |
|
|
|
tapPhoto: true, |
|
|
|
previewDrag: true, |
|
|
|
toBack: true, |
|
|
|
alpha: 1 |
|
|
|
} |
|
|
|
this.cameraPreview.startCamera(cameraPreviewOpts); |
|
|
|
} else { |
|
|
|
const stream = await window.navigator.mediaDevices.getUserMedia({ |
|
|
|
video: { |
|
|
|
facingMode: 'user', |
|
|
|
width: this.width, |
|
|
|
height: this.height |
|
|
|
}, |
|
|
|
}).then((stream) => { |
|
|
|
this.mediaStream = stream; |
|
|
|
this.videoElement.nativeElement.srcObject = stream; |
|
|
|
}).catch(err => alert(JSON.stringify(err))); |
|
|
|
} |
|
|
|
}; |
|
|
|
|
|
|
|
stopCameraStream = async () => { |
|
|
|
if (this.cameraPreview) { |
|
|
|
this.cameraPreview.stopCamera(); |
|
|
|
} |
|
|
|
|
|
|
|
if (this.mediaStream) { |
|
|
|
this.mediaStream.getVideoTracks().forEach(track => { |
|
|
|
track.stop(); |
|
|
@@ -116,12 +149,25 @@ export class ArFanCamPage implements OnInit { |
|
|
|
context.clearRect(0, 0, window.innerWidth, window.innerHeight); |
|
|
|
} |
|
|
|
|
|
|
|
detectAndDrawFace = async () => { |
|
|
|
const tinyFaceDetectorOptions = new TinyFaceDetectorOptions(); |
|
|
|
async getSnapshotFromPreview() { |
|
|
|
const base64Image: string = await this.cameraPreview.takeSnapshot(); |
|
|
|
this.sourceImageElement.nativeElement.src = base64Image; |
|
|
|
} |
|
|
|
|
|
|
|
getSnapshotFromVideo() { |
|
|
|
const sourceCanvas = this.sourceCanvasElement.nativeElement; |
|
|
|
const context = sourceCanvas.getContext('2d'); |
|
|
|
context.drawImage(this.videoElement.nativeElement, 0, 0, sourceCanvas.width, sourceCanvas.height); |
|
|
|
} |
|
|
|
|
|
|
|
detectAndDrawFace = async () => { |
|
|
|
const tinyFaceDetectorOptions = new TinyFaceDetectorOptions(); |
|
|
|
|
|
|
|
if (this.cameraPreview) { |
|
|
|
await this.getSnapshotFromPreview(); |
|
|
|
} else { |
|
|
|
this.getSnapshotFromVideo(); |
|
|
|
} |
|
|
|
|
|
|
|
let detectionWithLandmarks = await detectSingleFace(this.sourceCanvasElement.nativeElement, tinyFaceDetectorOptions).withFaceLandmarks(true); |
|
|
|
|
|
|
|