audio context is not resuming - audio

I was working with web audio API and I was following the code from
https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/suspend
to resume and suspend audio context. The suspending part is working but I can't resume after suspension.
window.AudioContext = window.AudioContext || window.webkitAudioContext;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
window.URL = window.URL || window.webkitURL;
// Store the instance of AudioContext globally
audio_context = new AudioContext;
if(audio_context.state === 'running') {
audio_context.suspend();
} else if(audio_context.state === 'suspended') {
alert("Resumed");
//audio_context.resume();
audio_context.resume().then(function() {
// susresBtn.textContent = 'Suspend context';
});
}

Figured it out, this has to come from a user interaction like button click

Related

Forge viewer isLayerVisible is always false

For some reason I get always false on viewer.isLayerVisible(layerNode).
I followed this tutorial https://forge.autodesk.com/blog/toggle-sheet-layer-visibility
I have event handler on LAYER_VISIBILITY_CHANGED_EVENT, here is my code snippet in typescript:
viewer.addEventListener(Autodesk.Viewing.LAYER_VISIBILITY_CHANGED_EVENT, (e) => {
var root = viewer.model["myData"].layersRoot; //getLayersRoot() is not a function for some reason
var overlayLayer = viewer["getSelectedLayer"]();
if (viewer["layerRoot"] != undefined) {
var layerNode = root.children.filter((e) => { return e.name === overlayLayer })
var isLayerVisible = viewer.isLayerVisible(layerNode);
//show layer
if (isLayerVisible) {
viewer.impl.addOverlay("Edit2D", viewer["savedPoints"].overlayLayer)
}
//hide layer
else {
viewer.impl.removeOverlayScene("Edit2D")
}
}
});
After switching some layers from layer manager off, I also get viewer.areAllVisible() as true.
Forge viewer version is 7.*
Do you have any advice? Thanks!
I found out that you can access visible and visible layers from indexToLayer viewer property
var visibleLayers = Array.from(viewer.impl.layers.indexToLayer.filter(e => e != null && e.visible));

Cannot read property 'activities' of undefined

my problem is that when user go live and have live status on discord, in console i get error "
TypeError: Cannot read property 'activities' of undefined" and bot is crashing. I expect to bot send a message with link to stream.
Discord.js - v12
Code:
client.on('presenceUpdate', (oldMember, newMember) => {
const channel = newMember.guild.channels.cache.find(x => x.name === "test");
if (!channel) return;
let oldStreamingStatus = oldMember.presence.activities.type ? oldMember.presence.activities.streaming : false;
let newStreamingStatus = newMember.presence.activities.type ? newMember.presence.activities.streaming : false;
if(oldStreamingStatus == newStreamingStatus){
return;
}
if(newStreamingStatus){
if(message.member.roles.cache.find(r => r.name === "test")) {
channel.send(`${newMember.user}, is live URL: ${newMember.presence.activities.url} ${newMember.presence.activities.name}`);
return;
}else
return;
}});
oldMember and newMember are of type Presence, so you do not need to access the .presence property to get the member's activities, you can simply use oldMember.activities. For newMember.presence.activities.url & newMember.presence.activities.name do this:
newMember.activities.find(activity => activity.type === 'STREAMING').name
newMember.activities.find(activity => activity.type === 'STREAMING').url
Either way however, your code won't work, since activities returns an array of Activity so you can go about doing this in 2 ways.
(Recommended) Looking for Activity of type STREAMING:
let oldStreamingStatus = oldMember.activities.find(activity => activity.type === 'STREAMING') ? true : false;
let newStreamingStatus = newMember.activities.find(activity => activity.type === 'STREAMING') ? true : false;
Getting the first Activity:
let oldStreamingStatus = oldMember.activities[0].type === 'STREAMING' ? true : false
let newStreamingStatus = newMember.activities[0].type === 'STREAMING' ? true : false
The problem is, that oldMember and newMember are already presences, so remove .presence.

How can I call addTelemetryInitializer when using the latest Javascript snippet?

I am trying to customise the name attribute for pageview events
This has previously been asked, for example How to provide custom names for page view events in Azure App Insights?
but this and all other solutions I've found (and the Microsoft documentation too) are working with an old version of the javascript snippet, of the form
window.appInsights = appInsights;
// …
appInsights.trackPageView();
The current snippet from the portal is very different though
var sdkInstance="appInsightsSDK";window[sdkInstance]="appInsights";var // ...
{
instrumentationKey:"key"
}); window[aiName] = aisdk,aisdk.queue && aisdk.queue.length ===0 && aisdk.trackPageView({});
I've tried this sort of thing
var sdkInstance="appInsightsSDK";window[sdkInstance]="appInsights";var aiName=window[sdkInstance],aisdk=window[aiName]||function(e){function n(e){t[e]=function(){var n=arguments;t.queue.push(function(){t[e].apply(t,n)})}}var t={config:e};t.initialize=!0;var i=document,a=window;setTimeout(function(){var n=i.createElement("script");n.src=e.url||"https://az416426.vo.msecnd.net/scripts/b/ai.2.min.js",i.getElementsByTagName("script")[0].parentNode.appendChild(n)});try{t.cookie=i.cookie}catch(e){}t.queue=[],t.version=2;for(var r=["Event","PageView","Exception","Trace","DependencyData","Metric","PageViewPerformance"];r.length;)n("track"+r.pop());n("startTrackPage"),n("stopTrackPage");var s="Track"+r[0];if(n("start"+s),n("stop"+s),n("setAuthenticatedUserContext"),n("clearAuthenticatedUserContext"),n("flush"),!(!0===e.disableExceptionTracking||e.extensionConfig&&e.extensionConfig.ApplicationInsightsAnalytics&&!0===e.extensionConfig.ApplicationInsightsAnalytics.disableExceptionTracking)){n("_"+(r="onerror"));var o=a[r];a[r]=function(e,n,i,a,s){var c=o&&o(e,n,i,a,s);return!0!==c&&t["_"+r]({message:e,url:n,lineNumber:i,columnNumber:a,error:s}),c},e.autoExceptionInstrumented=!0}return t}(
{
instrumentationKey:"my-key"
}); window[aiName] = aisdk;
if (aisdk.queue && 0 !== aisdk.queue.length) {
function adjustPageName(item) {
var name = item.name.replace("AppName", "");
if (name.indexOf("Order") !== -1)
return "Order";
if (name.indexOf("Product") !== -1)
return "Shop";
// And so on...
return name;
}
// Add telemetry initializer
aisdk.queue.push(function () {
aisdk.context.addTelemetryInitializer(function (envelope) {
var telemetryItem = envelope.data.baseData;
// To check the telemetry item’s type:
if (envelope.name === Microsoft.ApplicationInsights.Telemetry.PageView.envelopeType || envelope.name === Microsoft.ApplicationInsights.Telemetry.PageViewPerformance.envelopeType) {
// Do not track admin pages
if (telemetryItem.name.indexOf("Admin") !== -1)
return false;
telemetryItem.name = adjustPageName(telemetryItem);
}
});
});
aisdk.trackPageView();
};
But it doesn't work (no errors, but no effect on the telemetry either)
Has anyone managed to get anything like this working using the new snippet?
Please try the code below, I can add a custom property by using the latest javascript code snippet:
var sdkInstance="appInsightsSDK";window[sdkInstance]="appInsights";var aiName=window[sdkInstance],aisdk=window[aiName]||function(e){function n(e) { t[e] = function () { var n = arguments; t.queue.push(function () { t[e].apply(t, n) }) } }var t={config: e};t.initialize=!0;var i=document,a=window;setTimeout(function(){var n=i.createElement("script");n.src=e.url||"https://az416426.vo.msecnd.net/scripts/b/ai.2.min.js",i.getElementsByTagName("script")[0].parentNode.appendChild(n)});try{t.cookie = i.cookie}catch(e){}t.queue=[],t.version=2;for(var r=["Event","PageView","Exception","Trace","DependencyData","Metric","PageViewPerformance"];r.length;)n("track"+r.pop());n("startTrackPage"),n("stopTrackPage");var s="Track"+r[0];if(n("start"+s),n("stop"+s),n("setAuthenticatedUserContext"),n("clearAuthenticatedUserContext"),n("flush"),!(!0===e.disableExceptionTracking||e.extensionConfig&&e.extensionConfig.ApplicationInsightsAnalytics&&!0===e.extensionConfig.ApplicationInsightsAnalytics.disableExceptionTracking)){n("_" + (r = "onerror")); var o=a[r];a[r]=function(e,n,i,a,s){var c=o&&o(e,n,i,a,s);return!0!==c&&t["_"+r]({message: e,url:n,lineNumber:i,columnNumber:a,error:s}),c},e.autoExceptionInstrumented=!0}return t}(
{
instrumentationKey: "xxxxxxxxxx"
}
); window[aiName] = aisdk, aisdk.queue && 0 === aisdk.queue.length;
// Add telemetry initializer
aisdk.queue.push(function () {
var telemetryInitializer = (envelope) => {
//Add a custom property
envelope.data.name = 'This item passed through my telemetry initializer';
};
appInsights.addTelemetryInitializer(telemetryInitializer);
});
aisdk.trackPageView({})
Then in azure portal, the custom property is added:

Socket.io with AudioContext send and receive audio Errors on receiving

I am trying to build something, where a user can send audio instantly to many people using, socket.io, audioContext, js for the front-end and Node.js,socket.io for the server.
I can record the audio, send it to the server and send it back to other users, but I cannot play the data. I guess it must be a problem of how I send them or how I process the buffer that receives them.
I get the following error: Update!
The buffer passed to decodeAudioData contains an unknown content type.
Audio is passed fine, the buffer is created with no errors but there is no sound feedback.
The User presses record and it started recording/streaming with he following functions:
This is how it all starts:
navigator.getUserMedia({audio: true,video: false}, initializeRecorder, errorCallback);
function initializeRecorder(stream) {
var bufferSize = 2048;
audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var source = audioCtx.createMediaStreamSource(stream);
var recorder = audioCtx.createScriptProcessor(bufferSize, 1, 1);
recorder.onaudioprocess = recorderProcess;
source.connect(recorder);
recorder.connect(audioCtx.destination);
recording = true;
initialized = true;
play = false;
stop = true;
}
function recorderProcess(e) {
var left = e.inputBuffer.getChannelData(0);
socket.emit('audio-blod-send', convertFloat32ToInt16(left));
}
function convertFloat32ToInt16(buffer) {
l = buffer.length;
buf = new Int16Array(l);
while (l--) {
buf[l] = Math.min(1, buffer[l])*0x7FFF;
}
return buf.buffer;
}
Then the server uses the socket to broadcast what the original sender send:
socket.on('audio-blod-send',function(data){
socket.broadcast.to(roomName).emit('audio-blod-receive', data);
});
And then the data are played: Update!
I was using audioContext.decodeData which I found out that it is only used to read/decode audio from MP3 or WAV files not streaming. With the new code no errors appear however there is no Audio feedback.
socket.on('audio-blod-receive',function(data) {
playAudio(data);
});
function playAudio(buffer)
{
var audioCtx;
var started = false;
if(!audioCtx) {
audioCtx = new (window.AudioContext || window.webkitAudioContext)();
}
source = audioCtx.createBufferSource();
audioBuffer = audioCtx.createBuffer( 1, 2048, audioCtx.sampleRate );
audioBuffer.getChannelData( 0 ).set( buffer );
source.buffer = audioBuffer;
source.connect( audioCtx.destination );
source.start(0);
console.log(buffer);
}
P.S If anyone is interested further in what I am trying to do, feel free to contact me.

HTML5 Microphone capture stops after 5 seconds in Firefox

I'm capturing audio input from microphone with getUserMedia() function, works fine in chrome, but in firefox sound dies out after 5 seconds. If I send request for microphone again (without reloading the page) same thing happens. Here is the code (I used http://updates.html5rocks.com/2012/09/Live-Web-Audio-Input-Enabled as guidance):
//getting the function depending on browser
navigator.getMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
// success callback when requesting audio input stream
function gotAudioStream(stream) {
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = new AudioContext();
// Create an AudioNode from the stream.
var mediaStreamSource = audioContext.createMediaStreamSource( stream );
// Connect it to the destination to hear yourself (or any other node for processing!)
mediaStreamSource.connect( audioContext.destination );
}
function gotError(err) {
alert("An error occured! " + err);
}
//when button clicked, browser asks a permission to access microphone
jQuery("#sound_on").click(function()
{
navigator.getMedia({audio: true},gotAudioStream,gotError);
});
Any ideas?
EDIT/UPDATE
Thank you, csch, for the reference. Workaround by Karoun Kasraie worked!
context = new AudioContext();
navigator.getUserMedia({ audio: true }, function(stream) {
// the important thing is to save a reference to the MediaStreamAudioSourceNode
// thus, *window*.source or any other object reference will do
window.source = context.createMediaStreamSource(stream);
source.connect(context.destination);
}, alert);
It's a bug in Firefox, it can be found here:
https://bugzilla.mozilla.org/show_bug.cgi?id=934512
There's also a workaround:
context = new AudioContext();
navigator.getUserMedia({ audio: true }, function(stream) {
// the important thing is to save a reference to the MediaStreamAudioSourceNode
// thus, *window*.source or any other object reference will do
window.source = context.createMediaStreamSource(stream);
source.connect(context.destination);
}, alert);
source

Resources