Expo-Audio is not working on iOS. Its stuck on recording button itself - audio

github.com/expo/audio-recording-example
You can check it out about code.
I am using audio from expo-av.
It works fine with android devices even on emulator. On android device, it first asks for audio permission then started to record audio on click the on stop, it provides playback audio.
But testing on iOS, it does not asking for permission too, directly shows the audio recording page, and clicking on record button , recording doesn't started.
I can't understand whether its problem with iOS audio permission or syntax of audio.recording.
I've tried to set permission manually true
this.recordingSettings = JSON.parse(JSON.stringify(Audio.RECORDING_OPTIONS_PRESET_LOW_QUALITY));
// // UNCOMMENT THIS TO TEST maxFileSize:
// this.recordingSettings.android['maxFileSize'] = 12000;
}
_askForPermissions = async () => {
const response = await Permissions.askAsync(Permissions.AUDIO_RECORDING);
this.setState({
haveRecordingPermissions: response.status === 'granted',
});
};
async _stopPlaybackAndBeginRecording() {
this.setState({
isLoading: true,
});
if (this.sound !== null) {
await this.sound.unloadAsync();
this.sound.setOnPlaybackStatusUpdate(null);
this.sound = null;
}
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
interruptionModeIOS: Audio.INTERRUPTION_MODE_IOS_DO_NOT_MIX,
playsInSilentModeIOS: true,
shouldDuckAndroid: true,
interruptionModeAndroid: Audio.INTERRUPTION_MODE_ANDROID_DO_NOT_MIX,
playThroughEarpieceAndroid: false,
staysActiveInBackground: true,
});
if (this.recording !== null) {
this.recording.setOnRecordingStatusUpdate(null);
this.recording = null;
}
const recording = new Audio.Recording();
await recording.prepareToRecordAsync(this.recordingSettings);
recording.setOnRecordingStatusUpdate(this._updateScreenForRecordingStatus);
this.recording = recording;
await this.recording.startAsync(); // Will call this._updateScreenForRecordingStatus to update the screen.
this.setState({
isLoading: false,
});
}
_onRecordPressed = () => {
if (this.state.isRecording) {
this._stopRecordingAndEnablePlayback();
} else {
this._stopPlaybackAndBeginRecording();
}
};
I expect audio recording on iOS but gets stuck on isrecording.

I change my recording settings and everything is goof on both devices android and iOS.
My updated Settings.
this.recordingSettings = JSON.parse(JSON.stringify(Audio.RECORDING_OPTIONS_PRESET_HIGH_QUALITY: RecordingOptions = {
android: {
extension: '.m4a',
outputFormat: Audio.RECORDING_OPTION_ANDROID_OUTPUT_FORMAT_MPEG_4,
audioEncoder: Audio.RECORDING_OPTION_ANDROID_AUDIO_ENCODER_AAC,
sampleRate: 44100,
numberOfChannels: 2,
bitRate: 128000,
},
ios: {
extension: '.m4a',
outputFormat: Audio.RECORDING_OPTION_IOS_OUTPUT_FORMAT_MPEG4AAC,
audioQuality: Audio.RECORDING_OPTION_IOS_AUDIO_QUALITY_MIN,
sampleRate: 44100,
numberOfChannels: 2,
bitRate: 128000,
linearPCMBitDepth: 16,
linearPCMIsBigEndian: false,
linearPCMIsFloat: false,
},
}));
This used this setting according to my requirement you can used another setting options too.. But just keep Audio.RECORDING_OPTIONS_PRESET_HIGH_QUALITY same.

Related

getUserMedia notallowederror : permission denied

Um...
when i play 'getUserMedia' on my phone, i got an error
from alert(e)) notallowederror : permission denied
What should i do?
it is a part of the index.js code(where the video & chating happens)...
async function getMedia(deviceId) {
const initialConstraints = {
audio: true,
video: { facingMode: 'user' },
};
const cameraConstraints = {
audio: true,
video: { deviceId: { exact: deviceId } },
};
try {
myStream = await navigator.mediaDevices.getUserMedia(
deviceId ? cameraConstraints : initialConstraints
);
myFace.srcObject = myStream;
if (!deviceId) {
await getCameras();
}
} catch (e) {
console.log(e);
alert(e);
}
}
I'm using rails app(web), and react-native(webview app), node.js(for realtime chating and video call like zoom by socket.io webRTC)
I ran it on the webview-app(hybrid), and that doesn't working... (working weeeell in browser but..)
so i googling .. add the options..
video autoplay="" webkit-playsinline="webkit-playsinline" playsinline="playsinline" muted="true" id="myFace" width="350" height="400"
and video tag's parent is iframe, and it has attributes allow="camera;microphone;autoplay" and so on...
Also added the expo webview options like,,,
return useWebkit
allowInlineMediaPlayback={true}
mediaPlaybackRequiresUserAction={false}
javaScriptEnabled={true}
javaScriptEnabledAndroid
geolocationEnabled={true}
im so beginner.. can you help me out?? THanks!!!
If you get this error in Localhost or any site, you have to go to settings and search for microphone or webcam and enable/disable it manually,

How to attach an Electron window to an application screen and resize dynamically

I am currently writing a program that uses a mix of Electron and React-Redux to create an overlay window on top of screens/applications. I managed to successfully create the transparent overlay window and list all the valid media streams. But I can't figure out how I can have this new overlay window match the size/location of the selected stream and dynamically resize. On top of that, I would like the overlay to be on top of the selected stream alone.
Any tips are welcome :)
// In MainProcess
ipcMain.on(ELECTRON_CREATE_OVERLAY_WINDOW, (event, windowSettings) => {
if (overlayWindow !== null) {
console.error('Trying to create an Overlay window when there is already one!')
return
}
console.log('Creating the Overlay window')
overlayWindow = new BrowserWindow({
width: windowSettings.width,
height: windowSettings.height,
webPreferences: {
nodeIntegration: true,
enableRemoteModule: true,
},
transparent: true,
frame: false,
alwaysOnTop: true,
});
overlayWindow.setIgnoreMouseEvents(true);
overlayWindow.loadURL("http://localhost:3000/overlay");
overlayWindow.on('closed', () => {
console.log('Overlay window closed')
overlayWindow = null
})
});
// In React page / RendererProcess
React.useEffect(async () => {
desktopCapturer
.getSources({
types: ["window", "screen"],
})
.then((inputSources) => {
for (let i = 0; i < inputSources.length; i++) {
let source = inputSources[i];
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
chromeMediaSourceId: source.id,
},
},
};
navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
inputSources[i].stream = stream;
console.log('stream', stream)
// When we got all streams, update the state
if (i == inputSources.length - 1) {
setSources([...inputSources]);
}
});
}
});
}, []);
...
const launchOverlay = (source) => {
const streamSettings = source.stream.getVideoTracks()[0].getSettings();
console.log(source)
console.log(source.stream)
console.log(streamSettings)
createOverlayWindow({ width: streamSettings.width, height: streamSettings.height })
};
You can use electron-overlay-window package for it.
Readme says it supports Windows and Linux
It tracks target windows by its title and keeps your app window right over it. It also re-attaches itself if you restart the target app/game. The only downside - it's not very documented. But the basic demo is simple.
// ...
import { overlayWindow as OW } from 'electron-overlay-window'
// ...
const win = new BrowserWindow({
...OW.WINDOW_OPTS, // pay attention here
width: 800,
height: 600,
resizable: false,
webPreferences: {
nodeIntegration: process.env.ELECTRON_NODE_INTEGRATION,
},
})
// ... when ready
OW.attachTo(window, 'Untitled - Notepad')
// listen for lifecycle events
OW.on('attach', ev => { console.log('WO: attach', ev) })
OW.on('detach', ev => { console.log('WO: detach', ev) })
OW.on('blur', ev => { console.log('WO: blur', ev)})
OW.on('focus', ev => { console.log('WO: focus', ev)})
OW.on('fullscreen', ev => console.log('WO: fullscreen', ev))
OW.on('moveresize', ev => console.log('WO: fullscreen', ev))
You can look up more examples here:
Ender-O for Elite Dangerous written in JS (demo video)
Awaken PoE Trade for Path of Exile written in TS
Simple demo from author written in TS (I recommend starting with this example)

Basic broadcast example using agora.io not working

I'm trying to get agora.io's audio broadcasting working on a webpage, following this example.
Everything works, if I also broadcast video. If I broadcast audio only, nothing shows, nothing is heard, but no errors are showing in the console.
Here's my HTML:
<div class="video-grid" id="video">
<div class="video-view">
<div id="local_stream" class="video-placeholder"></div>
<div id="local_video_info" class="video-profile hide"></div>
<div id="video_autoplay_local" class="autoplay-fallback hide"></div>
</div>
</div>
Here's my Agora code:
var rtc = {
client: null,
joined: false,
published: false,
localStream: null,
remoteStreams: [],
params: {}
};
// Options for joining a channel
//Self-generated token
var option = {
appID: "{{myappid}}",
channel: "event-2123",
uid: "1",
token: "{{mytoken}}"
}
rtc.client = AgoraRTC.createClient({mode: "live", codec: "h264"});
// Initialize the client
rtc.client.init(option.appID, function () {
console.log("init success");
// Join a channel
rtc.client.join(option.token ? option.token : null, option.channel, option.uid ? +option.uid : null, function (uid) {
console.log("join channel: " + option.channel + " success, uid: " + uid);
rtc.params.uid = uid;
rtc.client.setClientRole("host");
rtc.localStream = AgoraRTC.createStream({
streamID: rtc.params.uid,
audio: true,
video: true,
screen: false,
})
// Initialize the local stream
rtc.localStream.init(function () {
console.log("--------");
console.log("init local stream success");
// play stream with html element id "local_stream"
rtc.localStream.play("local_stream");
// Publish the local stream
rtc.client.publish(rtc.localStream, function (err) {
console.log("publish failed");
console.error(err);
})
}, function (err) {
console.error("init local stream failed ", err);
});
}, function(err) {
console.error("client join failed", err)
})
}, (err) => {
console.error(err);
});
This works (but not, it seems, on Safari). But If I change the stream parameters to this, nothing works:
rtc.localStream = AgoraRTC.createStream({
streamID: rtc.params.uid,
audio: true,
video: false,
screen: false,
})
I've noticed that, in some browsers, the video is muted by default. So, if no interface elements are showing, and muting is 'on', perhaps this is the source of the problem?
How to make this work?

CPU usage gets higher as more WebRTC peers are added?

I'm streaming video/audio from a server with an electron app to get the desktop. When one user is connected CPU usage on both cores is 30-50%. As more users join the usage gets higher, when there were ~6 users it was a constant 100% on both cores and video quality becomes laggy and poor.
It's like it's encoding the video for each user that joins? How can I make it encode once and send that stream to everyone? That's my only guess as to why cpu usage would get so much higher anyway, maybe I'm wrong about why. Thank you for any help you can give! I'm open to other ways of doing this as well, as only the server needs to send video out.
Getting the video and audio:
function getAudio(audioID){
navigator.mediaDevices.getUserMedia( { video: false, audio: {deviceId: {exact: audioID},
autoGainControl: false, channelCount: 2, echoCancellation: false, noiseSuppression: false, sampleRate: 44100, sampleSize: 16 } } )
.then(function(stream) {
console.log("audio got??");
var audio = stream.getAudioTracks()[0];
mediaStream.addTrack(audio);
})
.catch(function(err) {
console.log(err.message);
});
}
desktopCapturer.getSources({ types: ['screen'] })
.then(async sources => {
console.log(sources);
let constraints2 = {
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
maxWidth: 1280,
maxHeight: 720
}
}
}
let constraints3 = {
frameRate: {max: 24}
}
navigator.mediaDevices.getUserMedia(constraints2)
.then(function(stream){
mediaStream = stream;
let track = stream.getVideoTracks()[0];
track.applyConstraints(constraints3);
setTimeout(function(){
getAudio(audioID, 0);
}, 2000);
})
.catch(console.error);
})
.catch(console.error);
Calling the peers that join:
peer = new Peer(myPeerID, {host: 'selfhostedpeerjs.com', port: 9000, path: '/', key: 'peerjs', config: {'iceServers': [{ url: 'stun:stun.l.google.com:19302' },
{url:'stun:stun1.l.google.com:19302'},
{url:'stun:stun2.l.google.com:19302'},
{url:'stun:stun3.l.google.com:19302'},
{url:'stun:stun4.l.google.com:19302'}
]}
});
peer.on('open', function(id){
console.log("My peer id is: " + id);
});
peer.on('connection', function(conn)
{
conn.on('open', function(){
console.log("connection opened");
var id = conn.peer;
//conn.send('Hello!');
console.log("Trying to call now");
var call = peer.call(id, mediaStream);
call.on('error', function(err){
console.log('calling error');
console.log(err);
})
});

Get camera stream on embedded system

I have an embedded system with camera and gstreamer and I-m trying to get the stream of my camera. I have a web application built with aurelia and electron.
I tried with mediaDevices.getUserMedia but I get a NotFoundError, but usinge enumerateDevices I get the devices I need.
Can be a problem that the getUserMedia doesn-t work properly with Gstreamer? If I run the same project on my pc it works perfectly.
Here it is my HTML:
<video ref="videoPlayer" hide.bind="screenSharing" id="videoPlayer" autoplay muted></video>
And this is my js:
let j = 0;
navigator.mediaDevices.enumerateDevices()
.then((deviceInfos) => {
for (var i = 0; i !== deviceInfos.length; ++i) {
console.log(deviceInfos[i]);
if (deviceInfos[i].kind === 'videoinput') {
this.deviceInfo[j] = deviceInfos[i];
j++;
}
}
if (this.deviceInfo.length > 1) {
console.log(this.deviceInfo.length);
this.constraints = {
audio: true,
video: {
deviceId: { exact: this.deviceInfo[1].deviceId }
}
};
}
else {
console.log("Only one camera");
this.constraints = {
video: {
deviceId: { exact: this.deviceInfo[0].deviceId }
},
audio: true
};
console.log(this.constraints);
}
})
.then(() => {
navigator.mediaDevices.getUserMedia(this.constraints)
.then((stream) => {
console.log('Got mic+video stream', stream);
this.localStream = stream;
this.videoPlayer.srcObject = this.localStream;
})
.catch((err) => {
console.error(err);
});
})
}
I've seen on internet there some packages like livecam but no idea on how to use it.
I attach the output of mediaDevices.enumerateDevices:
console.log(navigator.mediaDevices.enumerateDevices())
VM149:1 Promise {[[PromiseStatus]]: "pending", [[PromiseValue]]: undefined}__proto__: Promise[[PromiseStatus]]: "resolved"[[PromiseValue]]:
Array(5)0: MediaDeviceInfodeviceId: "default"groupId: "6dbae3b74e14f5e239133b5feea86e5ae7a9741a3e3fd21a86eab9273fe135aa"kind: "audioinput"label: "Default"__proto__:
MediaDeviceInfo1: MediaDeviceInfodeviceId: "d415346fe3db142f8daa611ad3dedb298b5d94b70f4221c38e7e6582f45c3008"groupId: "8d82cc2495eebb4c40bb77a5e0287d4b365ac1de8205684eae39cb605a703f11"kind: "audioinput"label: "Built-in Audio Stereo"__proto__:
MediaDeviceInfo2: MediaDeviceInfodeviceId: "82378e03eff67ac471305e50ac95e629ebf441c1ab1819d6a36aca137e37e89d"groupId: ""kind: "videoinput"label: ""__proto__: MediaDeviceInfodeviceId: (...)groupId: (...)kind: (...)label: (...)toJSON: function toJSON()constructor: function MediaDeviceInfo()Symbol(Symbol.toStringTag): "MediaDeviceInfo"get deviceId: function ()get groupId: function ()get kind: function ()get label: function ()__proto__:
Object3: MediaDeviceInfodeviceId: "default"groupId: "default"kind: "audiooutput"label: "Default"__proto__:
MediaDeviceInfo4: MediaDeviceInfodeviceId: "31a7efff94b610d3fce02b21a319cc43e2541d56d98b4138b6e3fe854b0df38c"groupId: "391b1de381c11ab437d507abc0543f288dd29d999717dbb0e949c006ef120935"kind: "audiooutput"label: "Built-in Audio Stereo"__proto__:
MediaDeviceInfolength: 5__proto__: Array(0)
undefined

Resources