How to play audio in Three JS - audio

I have some problems with Three.js audio and everything related.
How do you make an .mp3 file to play when you open the page in Three js? I haven't found any examples, any tutorial, any nothing, just nothing anywhere. I have following code:
var listener = new THREE.AudioListener();
camera.add( listener );
// create a global audio source
var sound = new THREE.Audio( listener );
var audioLoader = new THREE.AudioLoader();
//Load a sound and set it as the Audio object's buffer
audioLoader.load( 'sounds/ambient.ogg', function( buffer ) {
sound.setBuffer( buffer );
sound.setLoop(true);
sound.setVolume(0.5);
sound.play();
});
How do I make it play?

Try this link https://codepen.io/EllenProbst/pen/RQQmJK, below I have removed most of the JavaScript code that was not necessary to make the audio play.
It works with m4a, but it should work for mp3 and ogg formats as well.
// Music by The War On Drugs - Thinking Of A Place
var camera, scene, renderer;
var stream = "https://cdn.rawgit.com/ellenprobst/web-audio-api-with-Threejs/57582104/lib/TheWarOnDrugs.m4a";
// init
function init() {
// scene
scene = new THREE.Scene();
scene.fog = new THREE.FogExp2(0x01131e, 0.025);
// camera
camera = new THREE.PerspectiveCamera(55, window.innerWidth / window.innerHeight, 0.01, 1000);
camera.position.set(0.1, -0.14, 0.8);
// renderer
renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true });
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setClearColor("#01131E");
document.body.appendChild(renderer.domElement);
// AUDIO
var audioLoader = new THREE.AudioLoader();
var listener = new THREE.AudioListener();
var audio = new THREE.Audio(listener);
audioLoader.load(stream, function(buffer) {
audio.setBuffer(buffer);
audio.setLoop(true);
audio.play();
});
}
// animate
function animate() {
requestAnimationFrame(animate);
render();
}
// render
function render() {
renderer.render(scene, camera);
}
init();
animate();

You should include:
// onProgress callback
function ( xhr ) {
console.log( (xhr.loaded / xhr.total * 100) + '% loaded' );
},
// onError callback
function ( err ) {
console.log( 'Un error ha ocurrido' );
}
So your code will be:
var listener = new THREE.AudioListener();
camera.add( listener );
// create a global audio source
var sound = new THREE.Audio( listener );
var audioLoader = new THREE.AudioLoader();
//Load a sound and set it as the Audio object's buffer
audioLoader.load( 'sounds/ambient.ogg', function( buffer ) {
sound.setBuffer( buffer );
sound.setLoop(true);
sound.setVolume(0.5);
sound.play();
},
// onProgress callback
function ( xhr ) {
console.log( (xhr.loaded / xhr.total * 100) + '% loaded' );
},
// onError callback
function ( err ) {
console.log( 'Un error ha ocurrido' );
}
);

Related

MediaRecorder: How to stop Video recording and play back in same video element?

I am using a with mediaRecorder
function getUserMediaSuccess(stream) {
$videoElement[0].srcObject = stream;
$videoElement[0].autoplay = true;
$videoElement[0].muted = true;
$videoElement[0].controls = false;
mediaRecorder = new MediaRecorder(stream, settings.recorderOptions);
}
Once recording is finished, I want to play the recorded chunks.
I tried with:
const blob = new Blob(chunks, { 'type' : settings.recorderOptions.mimeType});
$videoElement[0].src = window.URL.createObjectURL(blob);
and also with
$videoElement[0].pause();
$videoElement[0].removeAttribute('src');
$videoElement[0].load();
$videoElement[0].src = settings.filename;
$videoElement[0].controls = true;
I cannot stop the video element of showing the real time webcam.
I can play back the recorded video in ANOTHER video element. But I want to use the SAME that is used to display the webcam.
I also tried:
localStream.getTracks().forEach(function(track) {
track.stop();
});
Which gives a black screen, but I am unable then to play back again the recorded video.
it think you have done most things correctly.
based on the mozilla MediaRecorder example and some more research i think the magic is to switch between using srcObject and src:
srcObject for mediaStream (getUserMedia live preview)
src with window.URL.createObjectURL
this snippet works (if run un localhost or over https -
(maybe that the embedding does not fullfill all security things to allow access to getUserMedia..)
const videoEl = document.getElementById('theVideoElement');
let mediaRecorder = null;
let mediaChunks = [];
function recordStart() {
console.log('recordStart..');
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia (
{
audio: true,
video: true,
}
)
.then( stream => {
videoEl.srcObject = stream;
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.addEventListener('dataavailable', event => {
mediaChunks.push(event.data);
});
mediaRecorder.addEventListener('stop', event => {
console.log("recorder stopped");
const blob = new Blob(mediaChunks, { 'type' : 'video/webm' });
mediaBlobURL = window.URL.createObjectURL(blob);
mediaChunks = [];
videoEl.src = mediaBlobURL;
});
mediaRecorder.start();
console.log("recorder started", mediaRecorder.state);
window.setTimeout(event => {
console.log("time is over.");
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
// stop getUserMedia stream - this way the cam and mic gets released.
for (const track of stream.getTracks()) {
track.stop();
}
videoEl.srcObject = null;
console.log("stream stopped.");
}, 3000);
})
.catch( err => {
console.error(`The following getUserMedia error occurred:\n ${err}`);
});
} else {
console.error('getUserMedia not supported on your browser!');
}
}
console.info('******************************************');
window.addEventListener('load', (event) => {
console.info('All resources finished loading.');
const buttonEl = document.getElementById('button_start');
buttonEl.addEventListener('click', (event) => {
console.info('click!');
recordStart();
});
});
button {
display: block;
}
video {
display: block;
border: solid 1px black;
}
<button id="button_start">
start
</button>
<video
id="theVideoElement"
autoplay
controls
>
</video>

Upload audio blob after conversion from wav to mp3

function init() {
var cfg = {};
audio = new Recorder(cfg);
}
function toggle( btn ){ // audio && audio.record();
if(audio instanceof Recorder){
var btnLabel = btn.firstChild.nodeValue;
if( btnLabel === 'Record' ){
audio.record();
}else{
audio.stop();
createPreview( 'recordings' );
audio.clear();
}
btn.firstChild.nodeValue = (btnLabel === 'Record') ? 'Stop' : 'Record';
btn.setAttribute('class', (btn.getAttribute('class') === 'btn btn-primary') ? 'btn btn-danger' : 'btn btn-primary');
} else {
init();
toggle( btn );
}
}
function createPreview( containerId ) {
// audio && audio.exportWAV( function(blob) {
var targetContainer = document.getElementById( containerId );
var timestamp = new Date().getTime();
var filename = 'recording_'+ timestamp;
var div = document.createElement('div');
var linkMP3 = document.createElement('a');
linkMP3.setAttribute('id', 'MP3-'+ timestamp);
var iconMP3 = document.createElement('img');
iconMP3.setAttribute('src', 'images/i-mp3.jpeg');
var linkWAV = document.createElement('a');
linkWAV.setAttribute('id', 'WAV-'+ timestamp);
var iconWAV = document.createElement('img');
iconWAV.setAttribute('src', 'images/i-wav.jpeg');
var player = document.createElement('audio');
player.setAttribute('id', 'PLAYER-'+ timestamp);
player.controls = true;
div.appendChild(player);
div.appendChild(linkWAV);
div.appendChild(linkMP3);
targetContainer.appendChild(div);
audio.export( function( mediaObj ) {
if( mediaObj.blob.type == 'audio/mp3' ){
var url = mediaObj.url;
targetLink = document.getElementById( 'MP3-'+ timestamp );
targetLink.href = url;
targetLink.download = filename +'.mp3';
targetLink.innerHTML = targetLink.download;
saveAudio( url, filename );
} else { // 'audio/wav'
var url = URL.createObjectURL( mediaObj.blob );
targetPlayer = document.getElementById( 'PLAYER-'+ timestamp );
targetLink = document.getElementById( 'WAV-'+ timestamp );
targetPlayer.src = url;
targetLink.href = url;
targetLink.download = filename +'.wav';
targetLink.innerHTML = targetLink.download;
}
});
}
function saveAudio( url, filename ){
var firebaseUrl = 'your_firebase_url';
if(firebaseUrl !== 'your_firebase_url'){
console.info('>> saving audio: url');
console.log( url );
ref = new Firebase( firebaseUrl );
ref.set({
filetype: 'audio/mp3',
base64Str: url,
filename: filename +'.mp3'
});
}else{
console.warn('Audio not saved to firebase because firebaseUrl is undefined.');
}
}
I need to record audio in the browser (short clips, spoken voice, mono) and upload it in mp3 format. This by Chris Geirman has almost everything that I need, except that instead of using firebase, I'd like to use jquery to upload audio blobs to a folder on my server. I'm fairly new to all of this, but I'm guessing that I need to replace the saveAudio() function with my own uploadAudio() jquery(?) function (something like this), which will link to a script in /upload.php. So far so good (?), but I can't figure out from Chris's script exactly what it is that I should be uploading / passing to /upload.php. I'm planning to implement the script here.
OK just in case it helps anyone I managed to get it working using this from Soumen Basak.
function uploadAudio( blob ) {
var reader = new FileReader();
reader.onload = function(event){
var fd = {};
fd["fname"] = "test.wav";
fd["data"] = event.target.result;
$.ajax({
type: 'POST',
url: 'upload.php',
data: fd,
dataType: 'text'
}).done(function(data) {
console.log(data);
});
};
reader.readAsDataURL(blob);
}
Replace test.wav with whatever applies - in my case BlahBlah.mp3. Then to reference the blob from Chris Geirman's script, change uploadAudio( blob ); to uploadAudio( mediaObj.blob );.
Be aware that with this set up on localhost, 2 mins of audio took 1'40" to convert from wav to mp3 and move to the uploads directory. Next job, create progress bars, etc!
Upload.php (Thanks again Soumen Basak):
<?
// pull the raw binary data from the POST array
$data = substr($_POST['data'], strpos($_POST['data'], ",") + 1);
// decode it
$decodedData = base64_decode($data);
// print out the raw data,
$filename = $_POST['fname'];
echo $filename;
// write the data out to the file
$fp = fopen($filename, 'wb');
fwrite($fp, $decodedData);
fclose($fp);
?>

new MediaRecorder(stream[, options]) stream can living modify?

new MediaRecorder(stream[, options]);
I want record the user camera and audio
I need mixing the song.mp3 to the audio track in recording.
and result export a video file to download by link.
But the MediaRecorder first params stream can living modify ?
But When I use recoder.stop()
It tips error: Failed to execute 'stop' on 'MediaRecorder': The MediaRecorder's state is 'inactive'.
My code:
function getFileBuffer(filepath) {
return fetch(filepath, {method: 'GET'}).then(response => response.arrayBuffer())
}
function mp3play() {
getFileBuffer('song.mp3')
.then(buffer => context.decodeAudioData(buffer))
.then(buffer => {
console.log(buffer)
const source = context.createBufferSource()
source.buffer = buffer
let volume = context.createGain()
volume.gain.value = 1
source.connect(volume)
dest = context.createMediaStreamDestination()
volume.connect(dest)
// volume.connect(context.destination)
source.start(0)
const _audioTrack = stream.getAudioTracks();
if (_audioTrack.length > 0) {
_audioTrack[0].stop();
stream.removeTrack(_audioTrack[0]);
}
//
// console.log(dest.stream)
// console.log(dest.stream.getAudioTracks()[0])
// stream.addTrack(dest.stream.getAudioTracks()[0])
})
}
function startRecording() {
recorder = new MediaRecorder(stream, {
mimeType: 'video/webm'
})
recorder.start()
stopBtn.removeAttribute('disabled')
startBtn.disabled = true
}
No we still can't record a MediaStream whose tracks are changed after the recording began, doing so will stop() the MediaRecorder. Here is a very related Q/A which was more about recording video.
What can be done though is to create a kind of merger MediaStream.
It's way easier with audio, moreover since you are already using the WebAudio API: all you need to do is to create an other MediaStreamDestination node, and connect / disconnect the different sources.
const base = "https://upload.wikimedia.org/wikipedia/en/d/";
const urls = [
"d3/Beach_Boys_-_Good_Vibrations.ogg",
"dc/Strawberry_Fields_Forever_%28Beatles_song_-_sample%29.ogg"
].map( url => base + url );
const context = new AudioContext();
const button = document.querySelector( 'button' );
button.onclick = async () => {
button.disabled = true;
context.resume();
const audiobuffers = await Promise.all( urls.map( fetchAsAudioBuffer ) );
button.remove();
const streamNode = context.createMediaStreamDestination();
const stream = streamNode.stream;
const recorder = new MediaRecorder( stream );
const chunks = [];
recorder.ondataavailable = evt => chunks.push( evt.data );
recorder.onstop = evt => exportAudio( new Blob( chunks ) );
document.getElementById( 'record-stopper' ).onclick = evt => {
recorder.stop();
current_source.stop( 0 );
};
let current_index = 0;
let current_source = null;
document.getElementById( 'switcher' ).onclick = switchAudioSource;
switchAudioSource();
recorder.start();
function switchAudioSource() {
if( current_source ) {
current_source.stop( 0 );
}
current_index = (current_index + 1) % audiobuffers.length;
current_source = context.createBufferSource();
current_source.buffer = audiobuffers[ current_index ];
current_source.loop = true;
current_source.connect( streamNode );
current_source.connect( context.destination );
current_source.start( 0 );
}
};
function exportAudio( blob ) {
const aud = new Audio( URL.createObjectURL( blob ) );
aud.controls = true;
document.body.prepend( aud );
}
async function fetchAsAudioBuffer( url ) {
const buf = await fetchAsBuffer( url );
return context.decodeAudioData( buf );
}
async function fetchAsBuffer( url ) {
const resp = await fetch( url );
return resp.arrayBuffer();
}
button+.recording-controls,
audio+.recording-controls {
display: none;
}
<button>begin</button>
<div class="recording-controls">
<label>Recording...</label>
<button id="switcher">Switch Audio Sources</button>
<button id="record-stopper">Stop Recording</button>
</div>
For video that would imply recording a CanvasMediaStreamTrack and drawing the different video streams on the source <canvas>, but we generally loose a lot of quality doing so...

How to detect more than 10 faces in the google vision apis

Hi i am new to google vision apis. I want to detect the faces on the Image ,i am using the node.js. the local image containing more than 10 faces. but vision api returning only 10 faces Detection. Is there any way to detect all the faces using this Vision api. please refer vision node api.
and you can take this image as ref
Here is my code
function findFaceontheImage(req, res, next) {
var vision = Vision();
var inputfile = 'NASA_Astronaut_Group_15.jpg';
var outputFile = 'out.png';
vision.faceDetection({source: {filename: inputfile}})
.then(function (results) {
const faces = results[0].faceAnnotations;
console.log('Faces:');
req.body['faces']=results;
var numFaces = faces.length;
console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces'));
highlightFaces(inputfile, faces, outputFile, Canvas, function (err) {
if (err) {
next()
}
console.log("Finished!");
next()
});
})
.catch(function (err) {
console.error('ERROR:', err);
});
}
function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
fs.readFile(inputFile, function (err, image) {
if (err) {
return callback(err);
}
var Image = Canvas.Image;
// Open the original image into a canvas
var img = new Image();
img.src = image;
var canvas = new Canvas(img.width, img.height);
var context = canvas.getContext("2d");
context.drawImage(img, 0, 0, img.width, img.height);
// Now draw boxes around all the faces
context.strokeStyle = "rgba(0,255,0,0.8)";
context.lineWidth = "5";
faces.forEach(function (face) {
context.beginPath();
var origX = 0;
var origY = 0;
face.boundingPoly.vertices.forEach(function (bounds, i) {
if (i === 0) {
origX = bounds.x;
origY = bounds.y;
}
context.lineTo(bounds.x, bounds.y);
});
context.lineTo(origX, origY);
context.stroke();
});
// Write the result to a file
console.log("Writing to file " + outputFile);
var writeStream = fs.createWriteStream(outputFile);
var pngStream = canvas.pngStream();
pngStream.on("data", function (chunk) {
writeStream.write(chunk);
});
pngStream.on("error", console.log);
pngStream.on("end", callback);
});
}
In case there're other people who's still struggling on this topic.
With the Node.js Client Library, you can pass the ImprovedRequest object to the client.faceDetection(..) method instead of using the filepath or imageuri.
For example, in my case, I want the api to process an image in my GCS. So, instead of placing the imageuri as string. I'd do something like below.
import { protos } from '#google-cloud/vision';
// BEFORE
const [result] = await CLIENT.faceDetection(`gs://${bucketName}/${filePath}`);
// AFTER
const [result] = await CLIENT.faceDetection({
image: {
source: { imageUri: `gs://${bucketName}/${filePath}` }
},
features: [
{
maxResults: 100,
type: protos.google.cloud.vision.v1.Feature.Type.FACE_DETECTION,
},
],
});
Just in case noone will come up with solution that would force API to return more results, a pseudocode:
def process(image)
faces = process image
return faces if faces.size < 10
split image into two a bit overlapping half1 and half2
# we do overlapping because splitting may split a face
a = process(half1)
b = process(half2)
return a + b - intersection(a + b)
The intersection function should throw out those images that are on the same (taking in mind the possible +/-few pixel errors) coordinates plus the shift that we had between half1 and half2 withing the image.

Chrome extension to capture video of last few minutes of active tab

I am trying to write a Chrome plugin to capture a video of the active tab. My code is based on this post.
When my page action is invoked, I start the recording:
var recordedChunks = null;
var captureOptions = { audio : false, video : true };
chrome.tabCapture.capture(captureOptions,
function(stream) {
if (stream) {
recordedChunks = [];
var options = {mimeType: "video/webm"};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.start();
mediaRecorder.ondataavailable = function(event) {
if (event.data.size > 0) {
recordedChunks.push(event.data);
}
}
}
}
);
When the page action is invoked again, I stop the recording and download a file as:
mediaRecorder.stop();
var blob = new Blob(recordedChunks, {
type: 'video/webm'
});
var url = URL.createObjectURL(blob);
var a = document.createElement('a');
document.body.appendChild(a);
a.style = 'display: none';
a.href = url;
a.download = 'test.webm';
a.click();
window.URL.revokeObjectURL(url);
stream.getVideoTracks()[0].stop();
This works great - I am able to play the downloaded test.webm video.
But I only want to record the last few minutes of video of the active tab. I do not want the recordedChunks array to grow unbounded. So, I tried something like this in the start recording action:
chrome.tabCapture.capture(captureOptions,
function(stream) {
// ...
mediaRecorder.ondataavailable = function(event) {
if (event.data.size > 0) {
recordedChunks.push(event.data);
// CHANGE HERE: keep only the last 1000 blobs
while (recordedChunks.length > 1000) {
recordedChunks.shift();
}
}
}
}
);
But with this modification, the download test.webm video is not playable. How do I capture just the tail of the blob output from MediaRecorder?

Resources