How to change sampling rate from 48000 (ue4 default) to 16000 samples and stereo to mono in a wav recording in ue4? I have searched in BPs but not lack. The image below shows what I have done with BPs. In order for this to work I had to change in WindowsEngine.ini the audio settings to XAudio (see this: https://www.youtube.com/watch?v=BpP1SxxwYIE)
Therefore I assume that this should be only possible with C++.
I did it with C++
File -> New C++ class -> VoiceCharacter -> Public
Change your character to have "VoiceCharacter" as parent. It can be found in Class settings of your character BP.
Add this method to VoiceCharacter C++ class and built and play.
void AVoiceCharacter::StereoToMono(TArray<uint8> stereoWavBytes, TArray<uint8>& monoWavBytes)
{
if(stereoWavBytes.Num() == 0)
{
GEngine->AddOnScreenDebugMessage(-1, 5.f, FColor::Red, "Stereo Bytes is empty");
return;
}
//Change wav headers
for (int i = 0; i < 44; i++)
{
//NumChannels starts from 22 to 24
if (i == 22)
{
short originalChannels = (*(short*)&stereoWavBytes[i]);
short NumChannels = originalChannels / 2;
FString message = FString::FromInt(originalChannels);
GEngine->AddOnScreenDebugMessage(-1, 25.f, FColor::Red, message);
monoWavBytes.Append((uint8*)&NumChannels, sizeof(NumChannels));
i++;
}//SamplingRate starts from 24 to 27
else if (i == 24)
{
int OriginalSamplingRate = (*(int*)&stereoWavBytes[i]);
int SamplingRate = OriginalSamplingRate / 3 ;
GEngine->AddOnScreenDebugMessage(-1, 25.f, FColor::Yellow, FString::FromInt(OriginalSamplingRate));
monoWavBytes.Append((uint8*)&SamplingRate, sizeof(SamplingRate));
i += 3;
} //ByteRate starts from 28 to 32
else if (i == 28)
{
int OriginalByteRate = (*(int*)&stereoWavBytes[i]);
int ByteRate = OriginalByteRate / 6 ;
GEngine->AddOnScreenDebugMessage(-1, 25.f, FColor::Yellow, FString::FromInt(OriginalByteRate));
monoWavBytes.Append((uint8*)&ByteRate, sizeof(ByteRate));
i += 3;
}
//BlockAlign starts from 32 to 34
else if (i == 32)
{
short BlockAlign = (*(short*)&stereoWavBytes[i]) / 2;
GEngine->AddOnScreenDebugMessage(-1, 25.f, FColor::White, FString::FromInt(BlockAlign));
monoWavBytes.Append((uint8*)&BlockAlign, sizeof(BlockAlign));
i++;
}
//SubChunkSize starts from 40 to 44
else if (i == 40)
{
int SubChunkSize = (*(int*)&stereoWavBytes[i]) / 2;
GEngine->AddOnScreenDebugMessage(-1, 25.f, FColor::Green, FString::FromInt(SubChunkSize));
monoWavBytes.Append((uint8*)&SubChunkSize, sizeof(SubChunkSize));
i += 3;
}
else
{
monoWavBytes.Add(stereoWavBytes[i]);
}
}
//Copies only the left channel and ignores the right channel
// for (int i = 44; i < stereoWavBytes.Num(); i += 4)
// {
// monoWavBytes.Add(stereoWavBytes[i]);
// monoWavBytes.Add(stereoWavBytes[i+1]);
// }
//Copies only the left channel and ignores the right channel. Also downsamples by 3,
// i.e. converts Windows 48000 sampling rate of ue4 to 16000
for (int i = 44; i < stereoWavBytes.Num(); i += 12)
{
monoWavBytes.Add(stereoWavBytes[i]);
monoWavBytes.Add(stereoWavBytes[i+1]);
}
}
Do not forget to add this in VoiceCharacter.h so that you can use it BPs.
UFUNCTION(BlueprintCallable, Category="Audio")
static void StereoToMono(TArray<uint8> stereoWavBytes, TArray<uint8>& monoWavBytes);
and here is how use it in BPs
Some useful plugins for (CUFile)
https://github.com/getnamo/nodejs-ue4
https://github.com/getnamo/socketio-client-ue4
Related
I get a cracking at the beginning and end of a core audio output queue. The code should simply generate a tone.
Edit: created a sample project
https://github.com/MrMatthias/CoreAudioCrackle
Here is the setup:
-(void) startOutputQueue {
if(userData.outputQueue != NULL) {
if(!checkError(AudioQueuePrime(userData.outputQueue, 0, NULL), "AudioQueuePrime")) {
NSLog(#"Error priming QutputQueue");
}
if(!checkError(AudioQueueStart(userData.outputQueue, NULL), "AudioQueueStart Output")) {
NSLog(#"Error starting OutputQueue");
}
}
}
-(void) setupOutputQueue {
memset(&userData.outputDesc, 0, sizeof(userData.outputDesc));
userData.outputDesc.mFormatID = kAudioFormatLinearPCM;
userData.outputDesc.mFramesPerPacket = 1;
userData.outputDesc.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
userData.outputDesc.mChannelsPerFrame = 1;
userData.outputDesc.mSampleRate = 44100;
userData.outputDesc.mBitsPerChannel = 16;
userData.outputDesc.mBytesPerFrame = userData.outputDesc.mBytesPerPacket = 2;
userData.outputSamplePosition = 0;
if (userData.outputQueue == NULL) {
if(!checkError(AudioQueueNewOutput(&userData.outputDesc, outputCallback, &userData, NULL, NULL, 0, &userData.outputQueue), "AudioQueueNewOutput")) {
return;
}
UInt32 bufferSize = userData.outputDesc.mBytesPerFrame * userData.outputDesc.mSampleRate * DURATION;
for (int i=0; i<3; ++i) {
if(!checkError(AudioQueueAllocateBuffer(userData.outputQueue, bufferSize, &userData.outputBuffers[i]), "AudioQueueAllocateBuffer")) {
return;
}
outputCallback(&userData, userData.outputQueue, userData.outputBuffers[i]);
}
}
}
In the output callback i call a block that fills the buffers:
userData->outputBlock(userData, inAQ, inBuffer);
AudioQueueEnqueueBuffer(userData->outputQueue, inBuffer, 0, NULL);
Filling of the Buffer looks like this:
UInt32 sampleCount = DURATION * userData->outputDesc.mSampleRate;
double f1 = userData->outputDesc.mSampleRate / 10000.0f;
for (int i=0; i<sampleCount; ++i) {
SInt16 sample = CFSwapInt16HostToBig(SHRT_MAX * ( sin((userData->outputSamplePosition + i) * 2 * M_PI / f1) ));
((SInt16*)inBuffer->mAudioData)[i] = sample;
}
userData->outputSamplePosition += sampleCount;
inBuffer->mAudioDataByteSize = sampleCount * 2;
The recording looks like this:
Try adding (SInt16) just before SHRT_MAX ( sin((userData->...
such that your entire code block looks like this
SInt16 sample = CFSwapInt16HostToBig((SInt16)SHRT_MAX * ( sin((userData->outputSamplePosition + i) * 2 * M_PI / f1) ));
Can I somehow record and output to WAV with trackPosition, offset. it works fine when played in browser works fine I just wanna output is to WAV file.
for (var i = 0; i <= loop; ++i) {
node = that.context.createBufferSource();
that.nodes.push(node);
node.buffer = clip.get('buffer');
node.connect(gainNode);
// clip offset and duration times
if (loop > 0) {
if (i === 0) { // first subclip
offset = startTime;
duration = duration - offset;
} else if (i === loop) { // last subclip
offset = 0;
duration = endTime;
} else {
offset = 0;
duration = clip.get('buffer').duration;
}
} else { // loop === 0
offset = startTime;
if (inClipStart)
duration = endTime - startTime;
else
duration = clip.clipLength();
}
// sets the clip's playback start time
node.start(
currentTime + trackPosition - cursor,
offset,
duration
);
trackPosition += duration;
}
Check out https://github.com/mattdiamond/Recorderjs - it let's you record/save the output of your Web Audio app as a .wav, which sound like what you're looking for!
I have just started with electronics, and doing a project using the Spark Photon, which is based on Arduino. The project website is here: http://hackster.io/middleca/sending-sound-over-the-internet
I uploaded the following two files (.ino and .js) to the Photon, which should then capture and transmit sound (directly I assume). I expected a test.wav would be created. However, where should I find this file so I can check if everything worked?
main.ino file:
#define MICROPHONE_PIN A5
#define AUDIO_BUFFER_MAX 8192
int audioStartIdx = 0, audioEndIdx = 0;
uint16_t audioBuffer[AUDIO_BUFFER_MAX];
uint16_t txBuffer[AUDIO_BUFFER_MAX];
// version without timers
unsigned long lastRead = micros();
char myIpAddress[24];
TCPClient audioClient;
TCPClient checkClient;
TCPServer audioServer = TCPServer(3443);
void setup() {
Serial.begin(115200);
pinMode(MICROPHONE_PIN, INPUT);
// so we know where to connect, try:
// particle get MY_DEVICE_NAME ipAddress
Spark.variable("ipAddress", myIpAddress, STRING);
IPAddress myIp = WiFi.localIP();
sprintf(myIpAddress, "%d.%d.%d.%d", myIp[0], myIp[1], myIp[2], myIp[3]);
// 1/8000th of a second is 125 microseconds
audioServer.begin();
lastRead = micros();
}
void loop() {
checkClient = audioServer.available();
if (checkClient.connected()) {
audioClient = checkClient;
}
//listen for 100ms, taking a sample every 125us,
//and then send that chunk over the network.
listenAndSend(100);
}
void listenAndSend(int delay) {
unsigned long startedListening = millis();
while ((millis() - startedListening) < delay) {
unsigned long time = micros();
if (lastRead > time) {
// time wrapped?
//lets just skip a beat for now, whatever.
lastRead = time;
}
//125 microseconds is 1/8000th of a second
if ((time - lastRead) > 125) {
lastRead = time;
readMic();
}
}
sendAudio();
}
// Callback for Timer 1
void readMic(void) {
uint16_t value = analogRead(MICROPHONE_PIN);
if (audioEndIdx >= AUDIO_BUFFER_MAX) {
audioEndIdx = 0;
}
audioBuffer[audioEndIdx++] = value;
}
void copyAudio(uint16_t *bufferPtr) {
//if end is after start, read from start->end
//if end is before start, then we wrapped, read from start->max, 0->end
int endSnapshotIdx = audioEndIdx;
bool wrapped = endSnapshotIdx < audioStartIdx;
int endIdx = (wrapped) ? AUDIO_BUFFER_MAX : endSnapshotIdx;
int c = 0;
for(int i=audioStartIdx;i<endIdx;i++) {
// do a thing
bufferPtr[c++] = audioBuffer[i];
}
if (wrapped) {
//we have extra
for(int i=0;i<endSnapshotIdx;i++) {
// do more of a thing.
bufferPtr[c++] = audioBuffer[i];
}
}
//and we're done.
audioStartIdx = audioEndIdx;
if (c < AUDIO_BUFFER_MAX) {
bufferPtr[c] = -1;
}
}
// Callback for Timer 1
void sendAudio(void) {
copyAudio(txBuffer);
int i=0;
uint16_t val = 0;
if (audioClient.connected()) {
write_socket(audioClient, txBuffer);
}
else {
while( (val = txBuffer[i++]) < 65535 ) {
Serial.print(val);
Serial.print(',');
}
Serial.println("DONE");
}
}
// an audio sample is 16bit, we need to convert it to bytes for sending over the network
void write_socket(TCPClient socket, uint16_t *buffer) {
int i=0;
uint16_t val = 0;
int tcpIdx = 0;
uint8_t tcpBuffer[1024];
while( (val = buffer[i++]) < 65535 ) {
if ((tcpIdx+1) >= 1024) {
socket.write(tcpBuffer, tcpIdx);
tcpIdx = 0;
}
tcpBuffer[tcpIdx] = val & 0xff;
tcpBuffer[tcpIdx+1] = (val >> 8);
tcpIdx += 2;
}
// any leftovers?
if (tcpIdx > 0) {
socket.write(tcpBuffer, tcpIdx);
}
}
and the waveRecorder.js file:
// make sure you have Node.js Installed!
// Get the IP address of your photon, and put it here:
// CLI command to get your photon's IP address
//
// particle get MY_DEVICE_NAME ipAddress
// Put your IP here!
var settings = {
ip: "192.168.0.54",
port: 3443
};
/**
* Created by middleca on 7/18/15.
*/
//based on a sample from here
// http://stackoverflow.com/questions/19548755/nodejs-write-binary-data-into-writablestream-with-buffer
var fs = require("fs");
var samplesLength = 1000;
var sampleRate = 8000;
var outStream = fs.createWriteStream("test.wav");
var writeHeader = function() {
var b = new Buffer(1024);
b.write('RIFF', 0);
/* file length */
b.writeUInt32LE(32 + samplesLength * 2, 4);
//b.writeUint32LE(0, 4);
b.write('WAVE', 8);
/* format chunk identifier */
b.write('fmt ', 12);
/* format chunk length */
b.writeUInt32LE(16, 16);
/* sample format (raw) */
b.writeUInt16LE(1, 20);
/* channel count */
b.writeUInt16LE(1, 22);
/* sample rate */
b.writeUInt32LE(sampleRate, 24);
/* byte rate (sample rate * block align) */
b.writeUInt32LE(sampleRate * 2, 28);
/* block align (channel count * bytes per sample) */
b.writeUInt16LE(2, 32);
/* bits per sample */
b.writeUInt16LE(16, 34);
/* data chunk identifier */
b.write('data', 36);
/* data chunk length */
//b.writeUInt32LE(40, samplesLength * 2);
b.writeUInt32LE(0, 40);
outStream.write(b.slice(0, 50));
};
writeHeader(outStream);
var net = require('net');
console.log("connecting...");
client = net.connect(settings.port, settings.ip, function () {
client.setNoDelay(true);
client.on("data", function (data) {
try {
console.log("GOT DATA");
outStream.write(data);
//outStream.flush();
console.log("got chunk of " + data.toString('hex'));
}
catch (ex) {
console.error("Er!" + ex);
}
});
});
setTimeout(function() {
console.log('recorded for 10 seconds');
client.end();
outStream.end();
process.exit(0);
}, 10 * 1000);
Thieme! Such a beginner's question... SO unworthy!
Anyway, I will iron my heart and tell you the answer.
First of all, you misunderstood: the .ino file should go to the Photon and the waveRecorder.js file should be stored on your computer (or server) and called whenever you want to retrieve the audio. As you can read in the code, the .ino file makes sure that every millisecond it will check if something wants to connect, and if so, it will stream the sound to the wav.file stored in the same location as your waveRecorder.js file. "Something wants to connect" happens when you launch waveRecorder.js. Make sure you have node installed.
So, to sum it up:
Download the two files (main.ino and waveRecorder.js) to your computer in a folder ../xx/folderName
Then configure the IPAddress in both files using your photon's IPAddress
Upload main.ino to the photon (type 'particle flash abcdefgh123456578 "xx/../folderName/main.ino"' in the terminal)
Then run waveRecorder.js by typing 'node "xx/../folderName/waveRecorder.js"' in your terminal.
That should do it.. Even I got it working :)
I try to make some concatenation of buffers which are saved in a memory streams. Then, when I'm trying to play the whole buffer it gives an exception:
An exception of type 'System.ArgumentException' occurred in
Microsoft.Xna.Framework.ni.dll but was not handled in user code
Additional information: Ensure that the buffer length is non-zero and
meets the block alignment requirements for the audio format.
When I debug the mStrm is still remains 0, can't find why.
private void mySendClick(object sender, RoutedEventArgs e)
{
var mStrmStartDelimiter = new MemoryStream();
var mStrmEndDelimiter = new MemoryStream();
BinaryWriter writer1 = new BinaryWriter(mStrmStartDelimiter);
Sinus(6500, 200, writer1, 32767);
BinaryWriter writer2 = new BinaryWriter(mStrmEndDelimiter);
Sinus(6800, 200, writer2, 32767);
var mStrm = new MemoryStream();
mStrmStartDelimiter.CopyTo(mStrm);
//ToDO
mStrmEndDelimiter.CopyTo(mStrm);
mStrm.Seek(0, SeekOrigin.Begin);
SoundEffect mySoundPlay = new SoundEffect(mStrm.ToArray(), 16000, AudioChannels.Mono);
mySoundPlay.Play();
}
public static void Sinus(double frequency, int msDuration, BinaryWriter writer, int volume)
{
double TAU = 2 * Math.PI;
double samplesPerSecond = 16000;
double theta = frequency * TAU / (double)samplesPerSecond;
int samples = (int)((decimal)samplesPerSecond * msDuration / 1000);
// 'volume' is UInt16 with range 0 thru Uint16.MaxValue ( = 65 535)
// we need 'amp' to have the range of 0 thru Int16.MaxValue ( = 32 767)
double amp = volume >> 2; // so we simply set amp = volume / 2
for (int step = 0; step < samples; step++)
{
short s = (short)(amp * Math.Sin(theta * (double)step));
writer.Write(s);
}
}
I'm targeting windows phone 8.1 silverlight platform
I got the solution for the problem: do the following before calling CopyTo()
mStrmStartDelimiter.Position = 0;
mStrmEndDelimiter.Position = 0;
I'm having this little problem with some easy code, basically what I'm doing is sending information via the serial port via a program I wrote in Java. The information is getting their for basic statements (IE, can turn on lights and stuff) but I'm having errors getting it to decode strings with number values send to it.
So for example, I'm sending strings that look like this
BS//:+000/+000/+000
and the decoding method I'm using looks like this.
After adding the string via this:
if (inputString.startsWith("BS//:")) //**fixed
{
inputInfoToBaseStepper(inputString);
baseStepperRunAction(baseStepperRotCount, baseStepperRotStepSize, baseStepperTime);
}
Sends it too...
void inputInfoToBaseStepper(String baseStepper)
{
baseStepperRotCount = baseStepper.substring(6,9).toInt();
baseStepperRotStepSize = baseStepper.substring(10,13).toInt();
baseStepperTime = baseStepper.substring(15,18).toInt();
}
Which should decode and run
void baseStepperRunAction (int rotations, int StepSize, int delayTime)
{
for (int rotations; rotations >=0; rotations--)
{
baseStepper.step(StepSize);
delay(delayTime);
}
}
Problem seems to be that it doesn't decode... ideas I'm sort of lost at this stage. :/
(total past of the code, I know the information is getting there, just not compiling like it should.)
#include <Stepper.h>
//#include <HardwareSerial.h>
// int intensity = 0; // led intensity this is needed just as example for this sketch
String inputString = ""; // a string to hold incoming data (this is general code you can reuse)
boolean stringComplete = false; // whether the string is complete (this is general code you can reuse)
int stepsPerRevolution = 64; //at 5.625 degrees a step
// initialize the stepper library on pins 8 through 11:
Stepper baseStepper(stepsPerRevolution, 2,3,4,5); // protocols start with //BS:
Stepper shoulderStepper(stepsPerRevolution, 6,7,8,9); // protocols start with //SS:
Stepper armStepper(stepsPerRevolution, 10,11,12,13); // protocols start with //AS:
//--------baseStepper--------//
int baseStepperRotCount = 0; //how many rotations in the for loop is needed
int baseStepperRotStepSize = 0; // how large should the steps be...
int baseStepperTime = 0; //delay time needed between each step (delay); so the stepper can do it's work.
//--------shoulderStepper--------//
int shoulderStepperRotCount =0;
void setup() {
// initialize serial: (this is general code you can reuse)
Serial.begin(115200);
}
void loop() {
// when a newline arrives:
if (stringComplete) {
//these are test if statements, they serve no purpose after the intial boot, but must be included to test the connectivity;
if (inputString.startsWith("alpha"))
{
boolean msgRecognized = true;
pinMode(13, OUTPUT);
digitalWrite(13, HIGH);
inputString = "";
stringComplete = false;
}
else if (inputString.startsWith("beta"))
{
boolean msgRecognized = true;
pinMode(13, OUTPUT);
digitalWrite(13, LOW);
inputString = "";
stringComplete = false;
}
//---------------------///
//these statements set the engines and prepare for running of the program.
if (inputString.startsWith("//BS:")) // "BS//:+000/+000/+000"
{
inputInfoToBaseStepper(inputString);
baseStepperRunAction(baseStepperRotCount, baseStepperRotStepSize, baseStepperTime);
}
else if (inputString.startsWith("//SS:"))
{
//inputInfoToShoulderStepper();
//outputConfirmed();
}
else if (inputString.startsWith("//AS:"))
{
//inputInfoToArmStepper();
// outputConfirmed();
}
if(inputString.startsWith("alp://")) { // OK is a message I know (this is general code you can reuse)
boolean msgRecognized = true;
if(inputString.substring(6,10) == "kprs") { // KeyPressed }
msgRecognized = false; // this sketch doesn't know other messages in this case command is ko (not ok)
// Prepare reply message if caller supply a message id (this is general code you can reuse)
int idPosition = inputString.indexOf("?id=");
if(idPosition != -1) {
String id = inputString.substring(idPosition + 4);
// print the reply
Serial.print("alp://rply/");
if(msgRecognized) { // this sketch doesn't know other messages in this case command is ko (not ok)
Serial.print("ok?id=");
} else {
Serial.print("ko?id=");
}
Serial.print(id);
Serial.write(255); // End of Message
Serial.flush();
}
}
// clear the string:
inputString = "";
stringComplete = false;
}
}
}
/*
Send listen messages
int index = 0;
for (index = 0; index < digitalPinListeningNum; index++) {
if(digitalPinListening[index] == true) {
int value = digitalRead(index);
if(value != digitalPinListenedValue[index]) {
digitalPinListenedValue[index] = value;
Serial.print("alp://dred/");
Serial.print(index);
Serial.print("/");
Serial.print(value);
Serial.write(255);
Serial.flush();
}
}
}
for (index = 0; index < analogPinListeningNum; index++) {
if(analogPinListening[index] == true) {
int value = analogRead(index);
if(value != analogPinListenedValue[index]) {
analogPinListenedValue[index] = value;
Serial.print("alp://ared/");
Serial.print(index);
Serial.print("/");
Serial.print(value);
Serial.write(255); // End of Message
Serial.flush();
}
}
}
} */
//this method decodes and stores inputs
void inputInfoToBaseStepper(String baseStepper)
{
baseStepperRotCount = baseStepper.substring(6,9).toInt(); // B S / / : + 0 0 0 / + 0 0 0 / + 0 0 0
baseStepperRotStepSize = baseStepper.substring(10,13).toInt();// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8
baseStepperTime = baseStepper.substring(15,18).toInt();
}
//this method runs the base stepper off the decoded actions.
void baseStepperRunAction (int rotations, int StepSize, int delayTime)
{
for (int rotations; rotations >=0; rotations--)
{
baseStepper.step(StepSize);
delay(delayTime);
}
}
/*
SerialEvent occurs whenever a new data comes in the
hardware serial RX. This routine is run between each
time loop() runs, so using delay inside loop can delay
response. Multiple bytes of data may be available.
This is general code you can reuse.
*/
void serialEvent() {
while (Serial.available() && !stringComplete) {
// get the new byte:
char inChar = (char)Serial.read();
// add it to the inputString:
inputString += inChar;
// if the incoming character is a newline, set a flag
// so the main loop can do something about it:
if (inChar == '\n') {
stringComplete = true;
}
}
}