跳到主要内容

canvas

2024年02月19日
柏拉文
越努力,越幸运

一、获取视频帧数据, 指定宽度


<div class="operation">
<button id="capture">捕获</button>
</div>
<div class="video-container">
<video id="video" autoplay></video>
</div>
<div class="canvas-container">
<canvas id="canvas" width="640" height="480" hidden></canvas>
</div>
<div class="capture-container">
<img id="capture-image" />
</div>

<script>
const video = document.getElementById('video');
const captureEl = document.getElementById('capture');
const captureImageEl = document.getElementById('capture-image');
const startRecordEl = document.getElementById('start-record');
const stopRecordEl = document.getElementById('stop-record');
const canvas = document.getElementById('canvas');
const context = canvas.getContext('2d');

async function prepareRecord() {
const stream = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});

video.srcObject = stream;
video.play();
}

async function checkPermissions(name) {
try {
return await navigator.permissions.query({ name: name });
} catch (error) {
return false;
}
}

async function run() {
const camera = await checkPermissions('camera');
const microphone = await checkPermissions('microphone');
if (camera.state === 'granted' && microphone.state === 'granted') {
prepareRecord();
} else {
alert('请允许使用摄像头和麦克风');
}
}

function getImageDataFromVideoElement({
context,
videoSize,
videoElement,
imageDataSize
}) {
context.clearRect(0, 0, imageDataSize, imageDataSize);
context.save();
context.translate(imageDataSize, 0);
context.scale(-1, 1);
context.drawImage(
videoElement,
0,
0,
videoSize.width,
videoSize.height,
0,
0,
imageDataSize,
(imageDataSize * videoSize.height) / videoSize.width
);
context.restore();
const imageData = context.getImageData(
0,
0,
imageDataSize,
imageDataSize
);
return imageData;
}

captureEl.addEventListener('click', () => {
const imageData = getImageDataFromVideoElement({
context,
imageDataSize: 320,
videoElement: video,
videoSize: { width: 640, height: 480 },

});
console.log('imageData', imageData);
captureImageEl.src = canvas.toDataURL('image/png');
});

run();
</script>

二、获取视频帧数据, 指定宽、高


<div class="operation">
<button id="capture">捕获</button>
</div>
<div class="video-container">
<video id="video" autoplay></video>
</div>
<div class="canvas-container">
<canvas id="canvas" width="640" height="480" hidden></canvas>
</div>
<div class="capture-container">
<img id="capture-image" />
</div>

<script>
const video = document.getElementById('video');
const captureEl = document.getElementById('capture');
const captureImageEl = document.getElementById('capture-image');
const startRecordEl = document.getElementById('start-record');
const stopRecordEl = document.getElementById('stop-record');
const canvas = document.getElementById('canvas');
const context = canvas.getContext('2d');

async function prepareRecord() {
const stream = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});

video.srcObject = stream;
video.play();
}

async function checkPermissions(name) {
try {
return await navigator.permissions.query({ name: name });
} catch (error) {
return false;
}
}

async function run() {
const camera = await checkPermissions('camera');
const microphone = await checkPermissions('microphone');
if (camera.state === 'granted' && microphone.state === 'granted') {
prepareRecord();
} else {
alert('请允许使用摄像头和麦克风');
}
}

function getImageDataFromVideoElement({
context,
videoSize,
videoElement,
imageDataSize
}) {
context.clearRect(0, 0, imageDataSize.width, imageDataSize.height);
context.save();
context.translate(imageDataSize.width, 0);
context.scale(-1, 1);
context.drawImage(
videoElement,
0,
0,
videoSize.width,
videoSize.height,
0,
0,
imageDataSize.width,
imageDataSize.height
);
context.restore();
const imageData = context.getImageData(
0,
0,
imageDataSize.width,
imageDataSize.height
);
return imageData;
}

captureEl.addEventListener('click', () => {
const imageData = getImageDataFromVideoElement({
context,
videoElement: video,
videoSize: { width: 640, height: 480 },
imageDataSize: { width: 640, height: 480 },
});
console.log('imageData', imageData);
captureImageEl.src = canvas.toDataURL('image/png');
});

run();
</script>

三、实时获取视频帧数据, 指定宽度


<div class="operation">
<button id="start-record">录制</button>
<button id="stop-record">停止</button>
</div>
<div class="video-container">
<video id="video" autoplay></video>
</div>
<div class="canva-container">
<canvas
id="canvasElement"
width="640"
height="480"
style="display: none"
></canvas>
</div>

<script>
let mediaRecorder = null;
const recordedBlobs = [];
const video = document.getElementById('video');
const startRecordEl = document.getElementById('start-record');
const stopRecordEl = document.getElementById('stop-record');
const canvas = document.getElementById('canvasElement');
const context = canvas.getContext('2d');

function getImageDataFromVideoElement({
context,
videoSize,
videoElement,
imageDataSize
}) {
context.clearRect(0, 0, imageDataSize, imageDataSize);
context.save();
context.translate(imageDataSize, 0);
context.scale(-1, 1);
context.drawImage(
videoElement,
0,
0,
videoSize.width,
videoSize.height,
0,
0,
imageDataSize,
(imageDataSize * videoSize.height) / videoSize.width
);
context.restore();
const imageData = context.getImageData(
0,
0,
imageDataSize,
imageDataSize
);
return imageData;
}


function captureFrame(){
const imageData = getImageDataFromVideoElement({
context,
imageDataSize: 320,
videoElement: video,
videoSize: { width: 640, height: 480 }
});
console.log("imageData",imageData)
requestAnimationFrame(captureFrame);
}

function startRecord() {
const timeSlice = 5000;
mediaRecorder.start(timeSlice);
captureFrame();
}

async function stopRecord() {
const stream = video.srcObject;
const tracks = stream.getTracks();

tracks.forEach(track => {
track.stop();
});

video.srcObject = null;
mediaRecorder.stop();
}

async function prepareRecord() {
const stream = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});

video.srcObject = stream;
video.play();

mediaRecorder = new MediaRecorder(stream, {
mimeType: 'video/webm'
});

mediaRecorder.ondataavailable = event => {
if (event.data && event.data.size > 0) {
recordedBlobs.push(event.data);
}
};
}

async function checkPermissions(name) {
try {
return await navigator.permissions.query({ name: name });
} catch (error) {
return false;
}
}

async function run() {
const camera = await checkPermissions('camera');
const microphone = await checkPermissions('microphone');
if (camera.state === 'granted' && microphone.state === 'granted') {
prepareRecord();
startRecordEl.addEventListener('click', startRecord);
stopRecordEl.addEventListener('click', stopRecord);
} else {
alert('请允许使用摄像头和麦克风');
}
}

run();
</script>

四、实时获取视频帧数据, 指定宽、高


<div class="operation">
<button id="start-record">录制</button>
<button id="stop-record">停止</button>
</div>
<div class="video-container">
<video id="video" autoplay></video>
</div>
<div class="canva-container">
<canvas
id="canvasElement"
width="640"
height="480"
style="display: none"
></canvas>
</div>

<script>
let mediaRecorder = null;
const recordedBlobs = [];
const video = document.getElementById('video');
const startRecordEl = document.getElementById('start-record');
const stopRecordEl = document.getElementById('stop-record');
const canvas = document.getElementById('canvasElement');
const context = canvas.getContext('2d');

function getImageDataFromVideoElement({
context,
imageDataSize,
videoElement,
videoSize
}) {
context.clearRect(0, 0, imageDataSize.width, imageDataSize.height);
context.save();
context.translate(imageDataSize.width, 0);
context.scale(-1, 1);
context.drawImage(
videoElement,
0,
0,
videoSize.width,
videoSize.height,
0,
0,
imageDataSize.width,
imageDataSize.height
);
context.restore();
const imageData = context.getImageData(0, 0, imageDataSize.width, imageDataSize.height);
return imageData;
}

function captureFrame(){
const imageData = getImageDataFromVideoElement({
context,
imageDataSize: { width: 640, height: 480 },
videoElement: video,
videoSize: { width: 640, height: 480 }
});
console.log("imageData",imageData)
requestAnimationFrame(captureFrame);
}

function startRecord() {
const timeSlice = 5000;
mediaRecorder.start(timeSlice);
captureFrame();
}

async function stopRecord() {
const stream = video.srcObject;
const tracks = stream.getTracks();

tracks.forEach(track => {
track.stop();
});

video.srcObject = null;
mediaRecorder.stop();
}

async function prepareRecord() {
const stream = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});

video.srcObject = stream;
video.play();

mediaRecorder = new MediaRecorder(stream, {
mimeType: 'video/webm'
});

mediaRecorder.ondataavailable = event => {
if (event.data && event.data.size > 0) {
recordedBlobs.push(event.data);
}
};
}

async function checkPermissions(name) {
try {
return await navigator.permissions.query({ name: name });
} catch (error) {
return false;
}
}

async function run() {
const camera = await checkPermissions('camera');
const microphone = await checkPermissions('microphone');
if (camera.state === 'granted' && microphone.state === 'granted') {
prepareRecord();
startRecordEl.addEventListener('click', startRecord);
stopRecordEl.addEventListener('click', stopRecord);
} else {
alert('请允许使用摄像头和麦克风');
}
}

run();
</script>