瀏覽代碼

Get the face detection input from a source canvas instead of a video element

master
Adwaith Rao 4 年之前
父節點
當前提交
6dc04a3d1e
共有 3 個檔案被更改,包括 9 行新增2 行删除
  1. +1
    -0
      src/app/ar-fan-cam/ar-fan-cam.page.html
  2. +2
    -1
      src/app/ar-fan-cam/ar-fan-cam.page.scss
  3. +6
    -1
      src/app/ar-fan-cam/ar-fan-cam.page.ts

+ 1
- 0
src/app/ar-fan-cam/ar-fan-cam.page.html 查看文件

@@ -5,6 +5,7 @@
<div class="container" #containerElement>
<img #glassesElement class="glass-image" [ngStyle]="glassProperties" src="/assets/ar-accessories/glass.png" alt=""/>
<video id="playback-video" width="{{ width }}" height="{{ height }}" #videoElement autoPlay></video>
<canvas id="source-canvas" #sourceCanvasElement width="{{ width }}" height="{{ height }}"></canvas>
<canvas id="result-canvas" #canvasElement width="{{ width }}" height="{{ height }}"></canvas>
</div>



+ 2
- 1
src/app/ar-fan-cam/ar-fan-cam.page.scss 查看文件

@@ -50,6 +50,7 @@
position: absolute;
top: 0px;
left: 0px;
z-index: 1;
transform-origin: 15% 50%;
}

@@ -57,7 +58,7 @@
display: block;
}

#result-canvas, #three-container {
#result-canvas, #source-canvas, #three-container {
position: absolute;
top: 0;
left: 0;


+ 6
- 1
src/app/ar-fan-cam/ar-fan-cam.page.ts 查看文件

@@ -12,6 +12,7 @@ export class ArFanCamPage implements OnInit {
@ViewChild('videoElement') videoElement: ElementRef<HTMLVideoElement>;
@ViewChild('glassesElement') glassesElement: ElementRef<HTMLImageElement>;
@ViewChild('canvasElement') canvasElement: ElementRef<HTMLCanvasElement>;
@ViewChild('sourceCanvasElement') sourceCanvasElement: ElementRef<HTMLCanvasElement>;
mediaStream: MediaStream|null = null;
capturedImageStrings: Array<string> = [];
@@ -118,7 +119,11 @@ export class ArFanCamPage implements OnInit {
detectAndDrawFace = async () => {
const tinyFaceDetectorOptions = new TinyFaceDetectorOptions();

let detectionWithLandmarks = await detectSingleFace(this.videoElement.nativeElement, tinyFaceDetectorOptions).withFaceLandmarks(true);
const sourceCanvas = this.sourceCanvasElement.nativeElement;
const context = sourceCanvas.getContext('2d');
context.drawImage(this.videoElement.nativeElement, 0, 0, sourceCanvas.width, sourceCanvas.height);

let detectionWithLandmarks = await detectSingleFace(this.sourceCanvasElement.nativeElement, tinyFaceDetectorOptions).withFaceLandmarks(true);
if (detectionWithLandmarks) {
const leftEye = detectionWithLandmarks.landmarks.getLeftEye();


Loading…
取消
儲存