Read Shader Storage Buffer Object data after computer shader execution in openscenegraph - openscenegraph

I'm trying to read back data, a simple float array, from a shader storage buffer object after execution of a compute shader. I need this data into my application code running osg library. Following the osgssbo example I setup a shader storage buffer object callback but when I read data in it array elements appear at their initial value unchanged. Ssbo and shader seems linked well and correctly executed.
Thanks
Here is my basic code:
#include <osgViewer/Viewer>
#include <osg/BufferIndexBinding>
#include <osgGA/TrackballManipulator>
#include <osg/GLExtensions>
using namespace osg;
class ShaderStorageBufferCBK : public osg::StateAttributeCallback
{
public:
void operator() (osg::StateAttribute* attr, osg::NodeVisitor* nv)
{
//if you need to process the data in your app-code , better leaving it on GPU and processing there, uploading per frame will make it slow
osg::ShaderStorageBufferBinding* ssbb = static_cast<osg::ShaderStorageBufferBinding*>(attr);
osg::ShaderStorageBufferObject* ssbo
= static_cast<osg::ShaderStorageBufferObject*>(ssbb->getBufferObject());
osg::FloatArray* array = static_cast<osg::FloatArray*>(ssbo->getBufferData(0));
for (int i = 0; i < array->size(); ++i){
osg::notify(INFO) << array->at(i) << " ";
}
osg::notify(INFO) << std::endl;
}
};
int mymain(){
FloatArray * dati = new FloatArray;
dati->push_back(1.2);
dati->push_back(1.2);
dati->push_back(1.2);
dati->push_back(1.2);
ShaderStorageBufferObject *ssbo = new osg::ShaderStorageBufferObject;
dati->setBufferObject(ssbo);
ShaderStorageBufferBinding * ssbb = new ShaderStorageBufferBinding(0, ssbo, 0, dati->size() * sizeof(GLfloat));
ssbb->setUpdateCallback(new ShaderStorageBufferCBK);
Shader * shader = new osg::Shader(osg::Shader::COMPUTE);
shader->loadShaderSourceFromFile("shaders/lightpixels.cs");
osg::ref_ptr<osg::Program> computeProg = new osg::Program;
computeProg->setComputeGroups(1, 1, 1);
shader->setType(Shader::COMPUTE);
computeProg->addShader(shader);
osg::Geometry * geom = createTexturedQuadGeometry(Vec3(-0.5, 0, -0.5), Vec3(1.0, 0, 0.0), Vec3(0, 0, 1.0));
geom->getOrCreateStateSet()->setAttributeAndModes(computeProg);
geom->getOrCreateStateSet()->setAttributeAndModes(ssbb, osg::StateAttribute::ON);
osgViewer::Viewer viewer;
viewer.setSceneData(geom);
viewer.setUpViewInWindow(100, 100, 640, 480);
viewer.realize();
viewer.setCameraManipulator(new osgGA::TrackballManipulator());
while (!viewer.done())
{
viewer.frame();
}
return 0;
}
And the trivial shader code is:
#version 430
layout (local_size_x = 1) in;
layout(std430, binding = 0) buffer destBuffer
{
float data[];
};
void main() {
data[0] = 348.0;
data[2] = 23.0;
}
Output written by callback function is:
1.2 1.2 1.2 1.2
when i expect to see:
348 1.2 23 1.2

you can using the osg GLExtensions to read buffer data, you can read the AtomicCounterBufferBinding::readData function for reference.

Related

d3d Convert 1 descriptor table (2 cbvs) to 2 descriptor table (each one cbv) get screen flicker

I am learning d3d develpoment recently.
For the triangle example, I add code about model, view matrix setting.
Here I set model, view seperate.
There are 3 methods to do this.
root constants
root constant buffer
cbv in descriptor table
There are no problem except using descriptor table for me.
For descriptor table, there are 3 ways to do. I want to practice them all to learn well-undertsanding of d3d coding.
put 2 float4x4 in one constant buffer as 1 descriptor table.
put 1 float4x4 in each constant buffer, then as 2 descriptor tables with each has 1 descriptor range.
put 1 float4x4 in each constant buffer, then as 1 descriptor table with 2 descriptor ranges.
I have tried the method 1 & 3, but when i try method 2, I get screen flicker.
Here is my code of method 2 & 3 to comprare. Please help me to find the root casue.
create root signature
#ifdef TABLE_SEPERATE
/* 2 descriptor table, 2 cbvs*/
CD3DX12_ROOT_PARAMETER slotRootParameter[2];
CD3DX12_DESCRIPTOR_RANGE dscRange[2];
dscRange[0].Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0);
dscRange[1].Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);
slotRootParameter[0].InitAsDescriptorTable(1, &dscRange[0]);
slotRootParameter[1].InitAsDescriptorTable(1, &dscRange[1]);
CD3DX12_ROOT_SIGNATURE_DESC rootSignatureDesc;
rootSignatureDesc.Init(2, slotRootParameter, 0, nullptr, D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT);
#else
/* 1 descriptor table, 2 cbvs*/
CD3DX12_ROOT_PARAMETER slotRootParameter[1];
CD3DX12_DESCRIPTOR_RANGE dscRange[2];
dscRange[0].Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0);
dscRange[1].Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);
slotRootParameter[0].InitAsDescriptorTable(2, &dscRange[0]);
CD3DX12_ROOT_SIGNATURE_DESC rootSignatureDesc;
rootSignatureDesc.Init(1, slotRootParameter, 0, nullptr, D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT);
#endif
ComPtr<ID3DBlob> signature;
ComPtr<ID3DBlob> error;
ThrowIfFailed(D3D12SerializeRootSignature(&rootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error));
ThrowIfFailed(m_d3d12Device->CreateRootSignature(0, signature->GetBufferPointer(), signature->GetBufferSize(), IID_PPV_ARGS(&m_rootSignature)));
create descriptor heaps & constant view
ModelCB model;
ViewCB view;
DirectX::XMStoreFloat4x4(&model.model, XMMatrixTranspose(XMMatrixScaling(0.1, 0.1, 1.0)));
DirectX::XMStoreFloat4x4(&view.view, XMMatrixTranspose(XMMatrixScaling(0.5, 0.5, 1.0))); // For debugging, so do scaling first. easy to see effect.
m_pModelBuffer = new CUploadBufferResource<ModelCB>(m_d3d12Device.Get(), 1, true);
m_pViewBuffer = new CUploadBufferResource<ViewCB>(m_d3d12Device.Get(), 1, true);
m_pViewBuffer->CopyData(0, view);
m_pModelBuffer->CopyData(0, model);
D3D12_DESCRIPTOR_HEAP_DESC cbvHeapDesc;
cbvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
cbvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
cbvHeapDesc.NumDescriptors = 2;
cbvHeapDesc.NodeMask = 0;
ThrowIfFailed(m_d3d12Device->CreateDescriptorHeap(&cbvHeapDesc, IID_PPV_ARGS(&m_cbvHeap)));
m_ModelBufferView.BufferLocation = m_pModelBuffer->Resouce()->GetGPUVirtualAddress();
m_ModelBufferView.SizeInBytes = m_pModelBuffer->GetElementByteSize();
m_ViewBufferView.BufferLocation = m_pViewBuffer->Resouce()->GetGPUVirtualAddress();
m_ViewBufferView.SizeInBytes = m_pViewBuffer->GetElementByteSize();
UINT cbv_srv_uavDescriptorSize = m_d3d12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
auto handle = CD3DX12_CPU_DESCRIPTOR_HANDLE(m_cbvHeap->GetCPUDescriptorHandleForHeapStart());
handle.Offset(0, cbv_srv_uavDescriptorSize);
m_d3d12Device->CreateConstantBufferView(&m_ModelBufferView, handle);
cbv_srv_uavDescriptorSize = m_d3d12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
handle = CD3DX12_CPU_DESCRIPTOR_HANDLE(m_cbvHeap->GetCPUDescriptorHandleForHeapStart());
handle.Offset(1, cbv_srv_uavDescriptorSize);
m_d3d12Device->CreateConstantBufferView(&m_ViewBufferView, handle);
shader hlsl
cbuffer V : register( b1 )
{
float4x4 view;
};
cbuffer M : register(b0)
{
float4x4 model;
};
struct PSInput
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
PSInput VSMain(float4 position : POSITION, float4 color : COLOR)
{
PSInput result;
result.position = mul(mul(position, model), view);
result.color = color;
return result;
}
float4 PSMain(PSInput input) : SV_TARGET
{
return input.color;
}
code in OnRender
1 descriptor table, 2 cbvs [Work]
ID3D12DescriptorHeap* descHeaps[] = { m_cbvHeap.Get() };
m_commandList->SetDescriptorHeaps(_countof(descHeaps), descHeaps);
m_commandList->SetPipelineState(m_pipelineState.Get());
m_commandList->SetGraphicsRootSignature(m_rootSignature.Get());
UINT cbv_srv_uavDescriptorSize = m_d3d12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
auto handle = CD3DX12_GPU_DESCRIPTOR_HANDLE(m_cbvHeap->GetGPUDescriptorHandleForHeapStart());
handle.Offset(0, cbv_srv_uavDescriptorSize);
m_commandList->SetGraphicsRootDescriptorTable(0, handle);
2 descriptor table. 2 cbvs [NOT WORK]
ID3D12DescriptorHeap* descHeaps[] = { m_cbvHeap.Get() };
m_commandList->SetDescriptorHeaps(_countof(descHeaps), descHeaps);
m_commandList->SetPipelineState(m_pipelineState.Get());
m_commandList->SetGraphicsRootSignature(m_rootSignature.Get());
UINT cbv_srv_uavDescriptorSize = m_d3d12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
auto handle = CD3DX12_GPU_DESCRIPTOR_HANDLE(m_cbvHeap->GetGPUDescriptorHandleForHeapStart());
handle.Offset(0, cbv_srv_uavDescriptorSize);
m_commandList->SetGraphicsRootDescriptorTable(0, handle);
handle = CD3DX12_GPU_DESCRIPTOR_HANDLE(m_cbvHeap->GetGPUDescriptorHandleForHeapStart());
handle.Offset(1, cbv_srv_uavDescriptorSize);
m_commandList->SetGraphicsRootDescriptorTable(1, handle);

How to get vtkboxwidget coordinates

I set a 3D array image data, render, and show it, and add a vtkboxwidget to it.
I want to get coordinates of each angle of the vtkboxwidget once user rotates, scales the vtkboxwidget. How can i do this. Here is my code.
#include "myrender.h"
#include <vtkInteractorStyleTrackballCamera.h>
// For vtkBoxWidget:
#include "vtkPlanes.h"
#include "vtkBoxWidget.h"
#include "vtkTransform.h"
#include "vtkCommand.h"
#include "vtkProperty.h"
#include "vtkPlane.h"
#include "vtkImageData.h"
#include "vtkExtractVOI.h"
#include "vtkBorderRepresentation.h"
#include "vtkProp3D.h"
class vtkMyCallback : public vtkCommand{
public:
static vtkMyCallback *New() {
return new vtkMyCallback;
}
virtual void Execute(
vtkObject *caller, unsigned long, void* ) {
// Here we use the vtkBoxWidget to transform the underlying coneActor
// (by manipulating its transformation matrix).
vtkSmartPointer<vtkTransform> t =
vtkSmartPointer<vtkTransform>::New();
vtkBoxWidget *widget = reinterpret_cast<vtkBoxWidget*>(caller);
widget->GetTransform( t );
//widget->GetProp3D()->SetUserTransform( t );
double *pos = t->GetPosition();
t->GetMatrix();
std::cout<<"position: "<<pos[0]<<" "<<pos[1]<<" "<<pos[2]<<std::endl;
}};
MyRender::MyRender() {
ROIdata = NULL;
ROI_sz0 = 0;
ROI_sz1 = 0;
ROI_sz2 = 0;
}
MyRender::~MyRender() {
}
void MyRender::setData(unsigned char *ROIdata, int ROI_sz0, int ROI_sz1, int ROI_sz2) {
this->ROIdata = ROIdata;
this->ROI_sz0 = ROI_sz0;
this->ROI_sz1 = ROI_sz1;
this->ROI_sz2 = ROI_sz2;
}
void MyRender::render(vtkSmartPointer<vtkRenderWindow> renWin) {
int width = ROI_sz0;
int height = ROI_sz1;
int depth = ROI_sz2;
/**
* RENDER WHOLE BRAIN DATA AND SHOW.
*/
//Convert the c-style image to a vtkImageData
vtkSmartPointer<vtkImageImport> imageImport = vtkSmartPointer<vtkImageImport>::New();
imageImport->SetImportVoidPointer(ROIdata);
imageImport->SetDataScalarTypeToUnsignedChar();
imageImport->SetNumberOfScalarComponents(1);
imageImport->SetDataSpacing(1.0, 1.0, 1.0);
imageImport->SetDataOrigin(0, 0, 0);
imageImport->SetDataExtent(0, width-1, 0, height-1, 0, depth-1);
imageImport->SetWholeExtent(0, width-1, 0, height-1, 0, depth-1);
imageImport->Update();
int *arr = imageImport->GetOutput()->GetDimensions();
std::cout<<"x size: "<<arr[0]<<" y size: "<<arr[1]<<" z size: "<<arr[2]<<std::endl;
//Create the standard ren, render window, and interactor
vtkSmartPointer<vtkRenderer> ren = vtkSmartPointer<vtkRenderer>::New();
//Create transfer mapping scalar value to opacity
vtkSmartPointer<vtkPiecewiseFunction> opacityFunc = vtkSmartPointer<vtkPiecewiseFunction>::New();
opacityFunc->AddPoint(0, 0.0);
opacityFunc->AddPoint(512, 1.0);
opacityFunc->ClampingOff();
//Create transfer mapping scalar value to color
vtkSmartPointer<vtkColorTransferFunction> colorFunc
= vtkSmartPointer<vtkColorTransferFunction>::New();
colorFunc->AddRGBPoint(150, 1.0, 1.0, 1.0);
//The property describes how the data will look
vtkSmartPointer<vtkVolumeProperty> volumeProperty = vtkSmartPointer<vtkVolumeProperty>::New();
volumeProperty->SetColor(colorFunc);
volumeProperty->SetScalarOpacity(opacityFunc);
volumeProperty->ShadeOn();
volumeProperty->SetInterpolationTypeToLinear();
vtkSmartPointer<vtkSmartVolumeMapper> volumeMapper = vtkSmartPointer<vtkSmartVolumeMapper>::New();
volumeMapper->SetInputConnection(imageImport->GetOutputPort());
//The volume holds the mapper and the property and can be used to position/orient the volume
vtkSmartPointer<vtkVolume> volume = vtkSmartPointer<vtkVolume>::New();
volume->SetMapper(volumeMapper);
volume->SetProperty(volumeProperty);
ren->AddVolume(volume);
ren->SetBackground(0, 0, 0);
vtkSmartPointer<vtkBoxWidget> box = vtkSmartPointer<vtkBoxWidget>::New();
box->SetInteractor(renWin->GetInteractor());
box->SetPlaceFactor(1);
box->SetInputConnection(imageImport->GetOutputPort());
box->PlaceWidget();
box->InsideOutOn();
box->SetProp3D(volume);
box->GetOutlineProperty()->SetRepresentationToSurface();
box->GetOutlineProperty()->SetOpacity(1.0);
box->RotationEnabledOff();
vtkSmartPointer<vtkMyCallback> callback = vtkSmartPointer<vtkMyCallback>::New();
box->AddObserver(vtkCommand::InteractionEvent, callback);
box->On();
renWin->AddRenderer(ren);
renWin->Render();
}
How can i get the coodinates of vtkboxwidget in vtkCommand class in real time. Thanks a lot.
From the doc:
void vtkBoxWidget::GetPolyData ( vtkPolyData * pd )
Grab the polydata (including points) that define the box widget.
The polydata consists of 6 quadrilateral faces and 15 points. The first eight points define the eight corner vertices; the next six define the -x,+x, -y,+y, -z,+z face points; and the final point (the 15th out of 15 points) defines the center of the hexahedron. These point values are guaranteed to be up-to-date when either the InteractionEvent or EndInteractionEvent events are invoked. The user provides the vtkPolyData and the points and cells are added to it.
It looks like it is what you are looking for, or not?

Arduino+processing and string information in an "if" statment

I am using an arduino to read out information from a scale into Processing.
Now I want certain things to happen depending on the weight registered. The weight is read out in a String. I have found out that >= does not work for strings. I have tried if (val >= str ("20.00")) but that doesnt work either.
I have also tried to convert it into a float using float scale = Float.parseFloat(val); But that doesnt work either. Does anyone have an idea. That would be great!
PImage pictureday;
import processing.serial.*;
import cc.arduino.*;
import org.firmata.*;
Serial myPort;
String val;
color textcolor = color(0, 50, 50, 240);
color textcolor2 = color(255, 0, 0);
void setup()
{
fullScreen();
background (189, 215, 239);
String portName = Serial.list()[0];
myPort = new Serial(this, portName, 9600);
}
void draw()
{
if ( myPort.available() > 0)
{ // If data is available,
val = myPort.readStringUntil('\n');
}
if (scale >= 20.00)
{
image(pictureday, 0, 0);
textSize(50);
fill(textcolor);
text(val, 900, 360);
text("KG ", 1030, 360);
println (val);
}
}
Like you've discovered, you can't compare Strings like that. Processing isn't smart enough to know the String contains a number.
Instead, use the float() function:
String val = "123.456";
float f = float(val);
if(f > 100){
println("here!");
}

TarsosDSP Pitch Detection from .wav file. And the result frequency is always less than half

I'm trying to use TarsosDSP library to detect pitch from a .wav file, and the result of frequency is always less than half.
Here is my code.
public class Main {
public static void main(String[] args){
try{
float sampleRate = 44100;
int audioBufferSize = 2048;
int bufferOverlap = 0;
//Create an AudioInputStream from my .wav file
URL soundURL = Main.class.getResource("/DetectPicthFromWav/329.wav");
AudioInputStream stream = AudioSystem.getAudioInputStream(soundURL);
//Convert into TarsosDSP API
JVMAudioInputStream audioStream = new JVMAudioInputStream(stream);
AudioDispatcher dispatcher = new AudioDispatcher(audioStream, audioBufferSize, bufferOverlap);
MyPitchDetector myPitchDetector = new MyPitchDetector();
dispatcher.addAudioProcessor(new PitchProcessor(PitchEstimationAlgorithm.YIN, sampleRate, audioBufferSize, myPitchDetector));
dispatcher.run();
}
catch(FileNotFoundException fne){fne.printStackTrace();}
catch(UnsupportedAudioFileException uafe){uafe.printStackTrace();}
catch(IOException ie){ie.printStackTrace();}
}
}
class MyPitchDetector implements PitchDetectionHandler{
//Here the result of pitch is always less than half.
#Override
public void handlePitch(PitchDetectionResult pitchDetectionResult,
AudioEvent audioEvent) {
if(pitchDetectionResult.getPitch() != -1){
double timeStamp = audioEvent.getTimeStamp();
float pitch = pitchDetectionResult.getPitch();
float probability = pitchDetectionResult.getProbability();
double rms = audioEvent.getRMS() * 100;
String message = String.format("Pitch detected at %.2fs: %.2fHz ( %.2f probability, RMS: %.5f )\n", timeStamp,pitch,probability,rms);
System.out.println(message);
}
}
}
The 329.wav file is generated from http://onlinetonegenerator.com/ website with 329Hz.
I don't know why the result pitch is always 164.5Hz. Is there any problem in my code?
Well I don't know what methods you are using, but by looking at how the frequency is exactly halved, it could be a problem of wrong sample rate being set?
Most operations assume an initial sample rate when the signal was sampled, maybe you've passed it as an argument (or its default value is) half that?
I just had the same problem with TarsosDSP on Android. For me the answer was that the file from http://onlinetonegenerator.com/ has 32-bit samples instead of 16-bit, which appears to be the default. Relevant code:
AssetFileDescriptor afd = getAssets().openFd("440.wav"); // 440Hz sine wave
InputStream is = afd.createInputStream();
TarsosDSPAudioFormat audioFormat = new TarsosDSPAudioFormat(
/* sample rate */ 44100,
/* HERE sample size in bits */ 32,
/* number of channels */ 1,
/* signed/unsigned data */ true,
/* big-endian byte order */ false
);
AudioDispatcher dispatcher = new AudioDispatcher(new UniversalAudioInputStream(is, audioFormat), 2048, 0);
PitchDetectionHandler pdh = ...
AudioProcessor p = new PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN, 44100, 2048, pdh);
dispatcher.addAudioProcessor(p);
new Thread(dispatcher, "Audio Dispatcher").start();

MonoTouch - WebRequest memory leak and crash?

I've got a MonoTouch app that does an HTTP POST with a 3.5MB file, and it is very unstable on the primary platforms that I test on (iPhone 3G with OS 3.1.2 and iPhone 4 with OS 4.2.1). I'll describe what I'm doing here and maybe someone can tell me if I'm doing something wrong.
In order to rule out the rest of my app, I've whittled this down to a tiny sample app. The app is an iPhone OpenGL Project and it does only this:
At startup, allocate 6MB of memory in 30k chunks. This simulates my app's memory usage.
Read a 3.5MB file into memory.
Create a thread to post the data. (Make a WebRequest object, use GetRequestStream(), and write the 3.5MB data in).
When the main thread detects that the posting thread is done, goto step 2 and repeat.
Also, each frame, I allocate 0-100k to simulate the app doing something. I don't keep any references to this data so it should be getting garbage collected.
iPhone 3G Result: The app gets through 6 to 8 uploads and then the OS kills it. There is no crash log, but there is a LowMemory log showing that the app was jettisoned.
iPhone 4 Result: It gets an Mprotect error around the 11th upload.
A few data points:
Instruments does NOT show the memory increasing as the app continues to upload.
Instruments doesn't show any significant leaks (maybe 1 kilobyte total).
It doesn't matter whether I write the post data in 64k chunks or all at once with one Stream.Write() call.
It doesn't matter whether I wait for a response (HttpWebRequest.HaveResponse) or not before starting the next upload.
It doesn't matter if the POST data is even valid. I've tried using valid POST data and I've tried sending 3MB of zeros.
If the app is not allocating any data each frame, then it takes longer to run out of memory (but as mentioned before, the memory that I'm allocating each frame is not referenced after the frame it was allocated on, so it should be scooped up by the GC).
If nobody has any ideas, I'll file a bug with Novell, but I wanted to see if I'm doing something wrong here first.
If anyone wants the full sample app, I can provide it, but I've pasted the contents of my EAGLView.cs below.
using System;
using System.Net;
using System.Threading;
using System.Collections.Generic;
using System.IO;
using OpenTK.Platform.iPhoneOS;
using MonoTouch.CoreAnimation;
using OpenTK;
using OpenTK.Graphics.ES11;
using MonoTouch.Foundation;
using MonoTouch.ObjCRuntime;
using MonoTouch.OpenGLES;
namespace CrashTest
{
public partial class EAGLView : iPhoneOSGameView
{
[Export("layerClass")]
static Class LayerClass ()
{
return iPhoneOSGameView.GetLayerClass ();
}
[Export("initWithCoder:")]
public EAGLView (NSCoder coder) : base(coder)
{
LayerRetainsBacking = false;
LayerColorFormat = EAGLColorFormat.RGBA8;
ContextRenderingApi = EAGLRenderingAPI.OpenGLES1;
}
protected override void ConfigureLayer (CAEAGLLayer eaglLayer)
{
eaglLayer.Opaque = true;
}
protected override void OnRenderFrame (FrameEventArgs e)
{
SimulateAppAllocations();
UpdatePost();
base.OnRenderFrame (e);
float[] squareVertices = { -0.5f, -0.5f, 0.5f, -0.5f, -0.5f, 0.5f, 0.5f, 0.5f };
byte[] squareColors = { 255, 255, 0, 255, 0, 255, 255, 255, 0, 0,
0, 0, 255, 0, 255, 255 };
MakeCurrent ();
GL.Viewport (0, 0, Size.Width, Size.Height);
GL.MatrixMode (All.Projection);
GL.LoadIdentity ();
GL.Ortho (-1.0f, 1.0f, -1.5f, 1.5f, -1.0f, 1.0f);
GL.MatrixMode (All.Modelview);
GL.Rotate (3.0f, 0.0f, 0.0f, 1.0f);
GL.ClearColor (0.5f, 0.5f, 0.5f, 1.0f);
GL.Clear ((uint)All.ColorBufferBit);
GL.VertexPointer (2, All.Float, 0, squareVertices);
GL.EnableClientState (All.VertexArray);
GL.ColorPointer (4, All.UnsignedByte, 0, squareColors);
GL.EnableClientState (All.ColorArray);
GL.DrawArrays (All.TriangleStrip, 0, 4);
SwapBuffers ();
}
AsyncHttpPost m_Post;
int m_nPosts = 1;
byte[] LoadPostData()
{
// Just return 3MB of zeros. It doesn't matter whether this is valid POST data or not.
return new byte[1024 * 1024 * 3];
}
void UpdatePost()
{
if ( m_Post == null || m_Post.PostStatus != AsyncHttpPostStatus.InProgress )
{
System.Console.WriteLine( string.Format( "Starting post {0}", m_nPosts++ ) );
byte [] postData = LoadPostData();
m_Post = new AsyncHttpPost(
"https://api-video.facebook.com/restserver.php",
"multipart/form-data; boundary=" + "8cdbcdf18ab6640",
postData );
}
}
Random m_Random = new Random(0);
List< byte [] > m_Allocations;
List< byte[] > m_InitialAllocations;
void SimulateAppAllocations()
{
// First time through, allocate a bunch of data that the app would allocate.
if ( m_InitialAllocations == null )
{
m_InitialAllocations = new List<byte[]>();
int nInitialBytes = 6 * 1024 * 1024;
int nBlockSize = 30000;
for ( int nCurBytes = 0; nCurBytes < nInitialBytes; nCurBytes += nBlockSize )
{
m_InitialAllocations.Add( new byte[nBlockSize] );
}
}
m_Allocations = new List<byte[]>();
for ( int i=0; i < 10; i++ )
{
int nAllocationSize = m_Random.Next( 10000 ) + 10;
m_Allocations.Add( new byte[nAllocationSize] );
}
}
}
public enum AsyncHttpPostStatus
{
InProgress,
Success,
Fail
}
public class AsyncHttpPost
{
public AsyncHttpPost( string sURL, string sContentType, byte [] postData )
{
m_PostData = postData;
m_PostStatus = AsyncHttpPostStatus.InProgress;
m_sContentType = sContentType;
m_sURL = sURL;
//UploadThread();
m_UploadThread = new Thread( new ThreadStart( UploadThread ) );
m_UploadThread.Start();
}
void UploadThread()
{
using ( MonoTouch.Foundation.NSAutoreleasePool pool = new MonoTouch.Foundation.NSAutoreleasePool() )
{
try
{
HttpWebRequest request = WebRequest.Create( m_sURL ) as HttpWebRequest;
request.Method = "POST";
request.ContentType = m_sContentType;
request.ContentLength = m_PostData.Length;
// Write the post data.
using ( Stream stream = request.GetRequestStream() )
{
stream.Write( m_PostData, 0, m_PostData.Length );
stream.Close();
}
System.Console.WriteLine( "Finished!" );
// We're done with the data now. Let it be garbage collected.
m_PostData = null;
// Finished!
m_PostStatus = AsyncHttpPostStatus.Success;
}
catch ( System.Exception e )
{
System.Console.WriteLine( "Error in AsyncHttpPost.UploadThread:\n" + e.Message );
m_PostStatus = AsyncHttpPostStatus.Fail;
}
}
}
public AsyncHttpPostStatus PostStatus
{
get
{
return m_PostStatus;
}
}
Thread m_UploadThread;
// Queued to be handled in the main thread.
byte [] m_PostData;
AsyncHttpPostStatus m_PostStatus;
string m_sContentType;
string m_sURL;
}
}
I think you should read in your file 1 KB (or some arbitrary size) at a time and write it to the web request.
Code similar to this:
byte[] buffer = new buffer[1024];
int bytesRead = 0;
using (FileStream fileStream = File.OpenRead("YourFile.txt"))
{
while ((bytesRead = fileStream.Read(buffer, 0, buffer.Length)) != 0)
{
httpPostStream.Write(buffer, 0, bytesRead);
}
}
This is off the top of my head, but I think it's right.
This way you don't have an extra 3MB floating around in memory when you don't really need to. I think tricks like this are even more important on iDevices (or other devices) than on the desktop.
Test the buffer size too, a larger buffer will get you better speeds up to a point (I remember 8KB being pretty good).

Resources