This is a small project for testing pixel level manipulation performance of NME for different builds (Windows c++, Flash).
It uses BitmapData.setPixel to modify the pixels one by one (320x240 for every frame). The C++ build runs at 22 FPS, and the flash build around ~100 FPS. Whats the reason for the huge performance drop for the C++ build compared to flash? How could I improve the code to get higher FPS using the C++ build?
Mandelbrot.hx
import nme.display.Sprite;
import nme.display.Bitmap;
import nme.display.BitmapData;
import nme.text.TextField;
import nme.events.Event;
import nme.events.TimerEvent;
import nme.utils.Timer;
import nme.geom.Matrix;
import nme.geom.Rectangle;
import nme.utils.ByteArray;
class Mandelbrot
{
public static function main() : Void
{
new Mandelbrot();
}
public var pixels:Array<Array<Int>>;
public var colorModifier:Int;
private var bitmapData:BitmapData;
private var bigBitmapData:BitmapData;
private var fps:TextField;
private var width:Int;
private var height:Int;
private var matrix:Matrix;
public function new()
{
width = 320; //Std.int(flash.Lib.current.stage.stageWidth/2);
height = 240; //Std.int(flash.Lib.current.stage.stageHeight/2);
var scale:Float = 2;//flash.Lib.current.stage.stageWidth/width;
matrix = new Matrix();
matrix.scale(scale, scale);
var setBitmap:Bitmap = new Bitmap();
bitmapData = new BitmapData( width , height , false , 0x000000 );
bigBitmapData = new BitmapData( nme.Lib.current.stage.stageWidth , nme.Lib.current.stage.stageHeight , false , 0x000000 );
setBitmap.bitmapData = bigBitmapData;
nme.Lib.current.addChild( setBitmap );
var maxIterations:Int = 128;
pixels = new Array();
var beforeTime = nme.Lib.getTimer();
var xtemp;
var iteration;
var x0:Float = 0;
var y0:Float = 0;
for(ix in 0...width) {
pixels[ix] = new Array();
for(iy in 0...height) {
x0 = 0;
y0 = 0;
iteration = 128;
while ( x0*x0 + y0*y0 <= 4 && iteration > 0 )
{
xtemp = x0*x0 - y0*y0 + (ix-14*5000)/50000;
y0 = 2*x0*y0 + (iy-(height/0.6))/50000;
x0 = xtemp;
iteration--;
}
pixels[ix][iy] = iteration;
}
}
var afterTime = nme.Lib.getTimer();
var tf = new TextField();
tf.width = 400;
tf.text = "Generating fractal took "+(afterTime-beforeTime)+" ms";
nme.Lib.current.addChild(tf);
fps = new TextField();
fps.width = 400;
fps.y = 10;
fps.text = "FPS: ";
nme.Lib.current.addChild(fps);
colorModifier = 2;
var timer:haxe.Timer = new haxe.Timer(10);
runLoop();
timer.run = runLoop;
}
public function runLoop() {
var r:Int=0, b:Int=0, g:Int=0;
var pixel:Int = 0;
var beforeTime = nme.Lib.getTimer();
for(iy in 0...height) {
for(ix in 0...width) {
pixel = pixels[ix][iy];
r = pixel + colorModifier;
g = pixel + colorModifier + r;
b = pixel + colorModifier + g;
bitmapData.setPixel(ix, iy, (r<<16 | g<<8 | b));
}
}
bigBitmapData.draw(bitmapData, matrix, null, null, null, false);
var afterTime = nme.Lib.getTimer();
fps.text = "FPS: "+Math.round(1000/(afterTime-beforeTime));
colorModifier += 2;
if(colorModifier > 65530)
colorModifier = 0;
}
}
Mandelbrot.nmml
<?xml version="1.0" encoding="utf-8"?>
<project>
<app
file="Mandelbrot.hx"
title="Mandelbrot sample"
package="org.haxe.nme.mandelbrot"
version="1.0.0"
company="nme"
main="Mandelbrot"
/>
<window
width="640"
height="480"
orientation="landscape"
fps="60"
background="0xffffff"
resizeable="true"
hardware="true"
/>
<classpath name="." />
<haxelib name="nme" />
<ndll name="std" />
<ndll name="regexp" />
<ndll name="zlib" />
<ndll name="nme" haxelib="nme" />
<setenv name="SHOW_CONSOLE"/>
</project>
Look into the nme.Memory API. The idea is to create a ByteArray with the correct size (or get it from a BitmapData), select it as the current virtual memory space and manipulate its bytes directly.
You'll get an approximately 10x speed boost with Flash and it should be way faster with the CPP target too. Don't forget to compile in Release mode or method inlining will be disabled and performances will suffer a lot.
Basic usage example (untested code) :
var rect:Rectangle = bitmapData.rect;
// 32bits integer = 4 bytes
var size:Int = bitmapData.width * bitmapData.height * 4;
// The virtual memory space we'll use
var pixels:ByteArray = new ByteArray();
// CPP does not support setting the length property directly
#if (cpp) pixels.setLength(size);
#else pixels.length = size; #end
// Select the memory space (call it once, not every frame)
Memory.select(pixels);
// And in your loop set your color
// Color is in BGRA mode, nme.Memory can only be used in little endian mode.
Memory.setI32((y * width + x) * 4, color);
// When you're done, render the BitmapData
// (don't forget to reset the ByteArray position)
pixels.position = 0;
bitmapData.setPixels(rect, pixels);
Keep in mind this is a very basic code example. In your case, you'd need to adapt it and actually use a double sized ByteArray because you need to store the iteration count too. Nested loops can be optimized in your main loop and you can avoid a lot of extra index/address computations :
// Note the size * 2 !
// First part of the ByteArray will be used to store the iteration count,
// the second part to draw the pixels.
#if (cpp) pixels.setLength(size * 2);
#else pixels.length = size * 2; #end
Memory.select(pixels);
// First loop storing iteration count
for (iy in 0...height)
{
for (ix in 0...width)
{
// ... do some stuff ...
Memory.setI32((iy * width + ix) << 2, iteration);
}
}
// In your runLoop :
for (i in 0...(height * width))
{
// Get the iteration count
var pixel:Int = Memory.getI32(i << 2);
r = pixel + colorModifier;
g = pixel + colorModifier + r;
b = pixel + colorModifier + g;
// Note that we're writing the pixel in the second part of our ByteArray
Memory.setI32(size + (i << 2), r | g << 8 | b << 16);
}
// Sets the position to the second part of our ByteArray
pixels.position = size;
bitmapData.setPixels(rect, pixels);
And this is it. If you really don't want to use Alchemy Opcodes on the Flash target, the next fastest way to blit pixels is to use getVector() / setVector() from the BitmapData class. But it's really not as fast.
Array itself is not true liner array in flash, more like a map.
For the per-pixel manipulation I can recommend to use getVector/setVector api of the BitmapData class, which can retrieve (and assign) a rectangular area of the image as flat pixel data. In which case you can access individual pixels in the vector as:
pixels[ix + image_width*iy] = <argb32>
Also, instead of constructing an intermediate Array of Arrays it would be faster to assign pixels directly.
Try to use ByteArray. It would be faster in Flash and C++, I think.
Related
I am new to Direct3D11 and I am currently trying to create a texture programatically within my code using this code I found online:
// Some Constants
int w = 256;
int h = 256;
int bpp = 4;
int *buf = new int[w*h];
//declarations
ID3D11Texture2D* tex;
D3D11_TEXTURE2D_DESC sTexDesc;
D3D11_SUBRESOURCE_DATA tbsd;
// filling the image
for (int i = 0; i<h; i++)
for (int j = 0; j<w; j++)
{
if ((i & 32) == (j & 32))
buf[i*w + j] = 0x00000000;
else
buf[i*w + j] = 0xffffffff;
}
// setting up D3D11_SUBRESOURCE_DATA
tbsd.pSysMem = (void *)buf;
tbsd.SysMemPitch = w*bpp;
tbsd.SysMemSlicePitch = w*h*bpp; // Not needed since this is a 2d texture
// initializing sTexDesc
sTexDesc.Width = w;
sTexDesc.Height = h;
sTexDesc.MipLevels = 1;
sTexDesc.ArraySize = 1;
sTexDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sTexDesc.SampleDesc.Count = 1;
sTexDesc.SampleDesc.Quality = 0;
sTexDesc.Usage = D3D11_USAGE_DEFAULT;
sTexDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
sTexDesc.CPUAccessFlags = 0;
sTexDesc.MiscFlags = 0;
hr = m_pd3dDevice->CreateTexture2D(&sTexDesc, &tbsd, &tex);
and that' all fine and dandy, but I am a bit confused about how to actually load this into the shader. Below I initialized this ID3D11ShaderResourceView:
ID3D11ShaderResourceView* m_pTextureRV = nullptr;
I found on the Microsoft tutorials I need to use the CreateShaderResourceView. But how exactly do I use it? I tried this:
hr = m_pd3dDevice->CreateShaderResourceView(tex, NULL , m_pTextureRV);
but it gives me an error, telling me that m_pTextureRV is not a valid argument for the function. What am I doing wrong here?
The correct way to call that function is:
hr = m_pd3dDevice->CreateShaderResourceView(tex, nullptr, &m_pTextureRV);
Remember that ID3D11ShaderResourceView* is a pointer to an interface. You need a pointer-to-a-pointer to get a new instance of one back.
You should really consider using a COM smart-pointer like Microsoft::WRL::ComPtr instead of raw pointers for these interfaces.
Once you have created the shader resource view for your texture object, then you need to associate it with whatever slot the HLSL expects to find it in. So, for example, if you were to write an HLSL source file as:
Texture2D texture : register( t0 );
SamplerState sampler: register( s0 );
float4 PS(float2 tex : TEXCOORD0) : SV_Target
{
return texture.Sample( sampler, tex );
}
Then compile it as a Pixel Shader, and bind it to the render pipeline via PSSetShader. Then you'd need to call:
ID3D11ShaderResourceView* srv[1] = { m_pTextureRV };
m_pImmediateContext->PSSetShaderResources( 0, 1, srv );
Of course you also need a ID3D11SamplerState* sampler bound as well:
ID3D11SamplerState* m_pSamplerLinear = nullptr;
D3D11_SAMPLER_DESC sampDesc = {};
sampDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sampDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
sampDesc.MinLOD = 0;
sampDesc.MaxLOD = D3D11_FLOAT32_MAX;
hr = m_pd3dDevice->CreateSamplerState( &sampDesc, &m_pSamplerLinear );
Then when you are about to draw:
m_pImmediateContext->PSSetSamplers( 0, 1, &m_pSamplerLinear );
I strongly recommend you check out the DirectX Tool Kit and the tutorials there.
So I am using this npm package: node-stl
And its working great. However the regexp syntax, mathematics and geometrical calculations are somewhat confusing to me. Especially all at the same time.
Basically what I want to achieve is to extend the script to calculate the bounding box of the STL.
Here is the main file that calculates the volume and weight of the STL being parsed/read.
var fs = require('fs');
// Vertex
function Vertex (v1,v2,v3) {
this.v1 = Number(v1);
this.v2 = Number(v2);
this.v3 = Number(v3);
}
// Vertex Holder
function VertexHolder (vertex1,vertex2,vertex3) {
this.vert1 = vertex1;
this.vert2 = vertex2;
this.vert3 = vertex3;
}
// transforming a Node.js Buffer into a V8 array buffer
function _toArrayBuffer (buffer) {
var
ab = new ArrayBuffer(buffer.length),
view = new Uint8Array(ab);
for (var i = 0; i < buffer.length; ++i) {
view[i] = buffer[i];
}
return ab;
}
// calculation of the triangle volume
// source: http://stackoverflow.com/questions/6518404/how-do-i-calculate-the-volume-of-an-object-stored-in-stl-files
function _triangleVolume (vertexHolder) {
var
v321 = Number(vertexHolder.vert3.v1 * vertexHolder.vert2.v2 * vertexHolder.vert1.v3),
v231 = Number(vertexHolder.vert2.v1 * vertexHolder.vert3.v2 * vertexHolder.vert1.v3),
v312 = Number(vertexHolder.vert3.v1 * vertexHolder.vert1.v2 * vertexHolder.vert2.v3),
v132 = Number(vertexHolder.vert1.v1 * vertexHolder.vert3.v2 * vertexHolder.vert2.v3),
v213 = Number(vertexHolder.vert2.v1 * vertexHolder.vert1.v2 * vertexHolder.vert3.v3),
v123 = Number(vertexHolder.vert1.v1 * vertexHolder.vert2.v2 * vertexHolder.vert3.v3);
return Number(1.0/6.0)*(-v321 + v231 + v312 - v132 - v213 + v123);
}
// parsing an STL ASCII string
function _parseSTLString (stl) {
var totalVol = 0;
// yes, this is the regular expression, matching the vertexes
// it was kind of tricky but it is fast and does the job
var vertexes = stl.match(/facet\s+normal\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+outer\s+loop\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+endloop\s+endfacet/g);
vertexes.forEach(function (vert) {
var preVertexHolder = new VertexHolder();
vert.match(/vertex\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s+([-+]?\b(?:[0-9]*\.)?[0-9]+(?:[eE][-+]?[0-9]+)?\b)\s/g).forEach(function (vertex, i) {
var tempVertex = vertex.replace('vertex', '').match(/[-+]?[0-9]*\.?[0-9]+/g);
var preVertex = new Vertex(tempVertex[0],tempVertex[1],tempVertex[2]);
preVertexHolder['vert'+(i+1)] = preVertex;
});
var partVolume = _triangleVolume(preVertexHolder);
totalVol += Number(partVolume);
})
var volumeTotal = Math.abs(totalVol)/1000;
return {
volume: volumeTotal, // cubic cm
weight: volumeTotal * 1.04 // gm
}
}
// parsing an STL Binary File
// (borrowed some code from here: https://github.com/mrdoob/three.js/blob/master/examples/js/loaders/STLLoader.js)
function _parseSTLBinary (buf) {
buf = _toArrayBuffer(buf);
var
headerLength = 80,
dataOffset = 84,
faceLength = 12*4 + 2,
le = true; // is little-endian
var
dvTriangleCount = new DataView(buf, headerLength, 4),
numTriangles = dvTriangleCount.getUint32(0, le),
totalVol = 0;
for (var i = 0; i < numTriangles; i++) {
var
dv = new DataView(buf, dataOffset + i*faceLength, faceLength),
normal = new Vertex(dv.getFloat32(0, le), dv.getFloat32(4, le), dv.getFloat32(8, le)),
vertHolder = new VertexHolder();
for(var v = 3; v < 12; v+=3) {
var vert = new Vertex(dv.getFloat32(v*4, le), dv.getFloat32((v+1)*4, le), dv.getFloat32( (v+2)*4, le ) );
vertHolder['vert'+(v/3)] = vert;
}
totalVol += _triangleVolume(vertHolder);
}
var volumeTotal = Math.abs(totalVol)/1000;
return {
volume: volumeTotal, // cubic cm
weight: volumeTotal * 1.04 // gm
}
}
// NodeStl
// =======
// > var stl = NodeStl(__dirname + '/myCool.stl');
// > console.log(stl.volume + 'cm^3');
// > console.log(stl.weight + 'gm');
function NodeStl (stlPath) {
var
buf = fs.readFileSync(stlPath),
isAscii = true;
for (var i=0, len=buf.length; i<len; i++) {
if (buf[i] > 127) { isAscii=false; break; }
}
if (isAscii)
return _parseSTLString(buf.toString());
else
return _parseSTLBinary(buf);
}
module.exports = NodeStl;
If anyone could help me with this it would be great. I know and it feels like it simple. That I just need to know max/min of the different directions(x,y,z) and could then calculate the bounding box.
But I do not understand what the max/min for x,y and z is here. Please answer if you have an idea.
I've made a new branch https://github.com/johannesboyne/node-stl/tree/boundingbox could you please verify whether the applied algorithm works?
Best,
Johannes
Edit: If the branch is stable -> works I'll push it into v.0.1.0 (don't know why it is still 0.0.1)
I try to make some concatenation of buffers which are saved in a memory streams. Then, when I'm trying to play the whole buffer it gives an exception:
An exception of type 'System.ArgumentException' occurred in
Microsoft.Xna.Framework.ni.dll but was not handled in user code
Additional information: Ensure that the buffer length is non-zero and
meets the block alignment requirements for the audio format.
When I debug the mStrm is still remains 0, can't find why.
private void mySendClick(object sender, RoutedEventArgs e)
{
var mStrmStartDelimiter = new MemoryStream();
var mStrmEndDelimiter = new MemoryStream();
BinaryWriter writer1 = new BinaryWriter(mStrmStartDelimiter);
Sinus(6500, 200, writer1, 32767);
BinaryWriter writer2 = new BinaryWriter(mStrmEndDelimiter);
Sinus(6800, 200, writer2, 32767);
var mStrm = new MemoryStream();
mStrmStartDelimiter.CopyTo(mStrm);
//ToDO
mStrmEndDelimiter.CopyTo(mStrm);
mStrm.Seek(0, SeekOrigin.Begin);
SoundEffect mySoundPlay = new SoundEffect(mStrm.ToArray(), 16000, AudioChannels.Mono);
mySoundPlay.Play();
}
public static void Sinus(double frequency, int msDuration, BinaryWriter writer, int volume)
{
double TAU = 2 * Math.PI;
double samplesPerSecond = 16000;
double theta = frequency * TAU / (double)samplesPerSecond;
int samples = (int)((decimal)samplesPerSecond * msDuration / 1000);
// 'volume' is UInt16 with range 0 thru Uint16.MaxValue ( = 65 535)
// we need 'amp' to have the range of 0 thru Int16.MaxValue ( = 32 767)
double amp = volume >> 2; // so we simply set amp = volume / 2
for (int step = 0; step < samples; step++)
{
short s = (short)(amp * Math.Sin(theta * (double)step));
writer.Write(s);
}
}
I'm targeting windows phone 8.1 silverlight platform
I got the solution for the problem: do the following before calling CopyTo()
mStrmStartDelimiter.Position = 0;
mStrmEndDelimiter.Position = 0;
I am new to posting in forums, as I usually can help my self through searches, but am really stuck here...
I have written an ASP.NET C# WebApp, which also needs to incorporate a client side serial com interface to a device that basically does live scanning, and streams the images scanned through the serial (USB) interface (These images need to be refreshed Continuously, approximately every 100ms, therefore basically creating an "animation" effect)
To get this to run Client side, I have written a small Silverlight App. running In Browser, with the following code, using a Backgroundworker trying to separate Serial Comms from UI, and refreshing the Silverlight image control from the Byte Arrays received.
I have VERY SIMILAR code working 100% in WinForms, my only issue in Silverlight, is that no image ever gets displayed in my image control.
Below is the relevant WinForms code followed by the corresponding Silverlight code.
My current suspicion is that the image is never rendered as Silverlight only allows for PixelFormat of 32bppArgb where I need to use PixelFormat.Format8bppIndexed, as per my WinForms CreateBitmap() method below.
If this is indeed the issue, I cannot find any way to create this format of Bitmap in Silverlight.
/////////////// WINFORMS CODE (Timer1 Interval = 100ms) //////////////////
private void timer1_Tick(object sender, EventArgs e)
{
BackgroundWorker bw = new BackgroundWorker();
bw.WorkerReportsProgress = true;
bw.DoWork += new DoWorkEventHandler(
delegate(object o, DoWorkEventArgs args)
{
BackgroundWorker b = o as BackgroundWorker;
int BytesToRead = COMport.Read(ReceiveBuffer);
for (int i = 0; i < BytesToRead; i++)
{
//Code that copies ReceiveBuffer to byte[] LiveImgArr
Bitmap liveBMP = CreateBitmap(LiveImgArr, imgWidth, imgHeight);
bw.ReportProgress(i, liveBMP);
}
});
bw.ProgressChanged += new ProgressChangedEventHandler(
delegate(object o, ProgressChangedEventArgs args)
{
pictureBox1.Image = (Bitmap)args.UserState;
});
bw.RunWorkerAsync();
}
private Bitmap CreateBitmap(byte[] buffer, int width, int height)
{
Bitmap bmp = new Bitmap(width, height, System.Drawing.Imaging.PixelFormat.Format8bppIndexed);
BitmapData bmpData = bmp.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, PixelFormat.Format8bppIndexed);
System.Runtime.InteropServices.Marshal.Copy(buffer, 0, bmpData.Scan0, width * height);
bmp.UnlockBits(bmpData);
ColorPalette pal = bmp.Palette;
for (int i = 0; i < 256; i++)
{
pal.Entries[i] = Color.FromArgb(i, i, i);
}
bmp.Palette = pal;
return bmp;
}
//////////////////////////////////////// END /////////////////////////////////////////
//////////////////////////////// SilverLight Code ///////////////////////////////////
public void StartTimer()//object o, RoutedEventArgs sender)
{
System.Windows.Threading.DispatcherTimer CommsTimer = new System.Windows.Threading.DispatcherTimer();
CommsTimer.Interval = new TimeSpan(0, 0, 0, 0, 100); // 100 Milliseconds
CommsTimer.Tick += new EventHandler(CommsTimer_Tick);
CommsTimer.Start();
}
public void CommsTimer_Tick(object o, EventArgs sender)
{
BackgroundWorker bw = new BackgroundWorker();
bw.WorkerReportsProgress = true;
bw.DoWork += new DoWorkEventHandler(
delegate(object b, DoWorkEventArgs args)
{
BackgroundWorker obw = b as BackgroundWorker;
int BytesToRead = COMport.Read(ReceiveBuffer);
for (int i = 0; i < BytesToRead; i++)
{
//Code that copies ReceiveBuffer to byte[] LiveImgArr
bw.ReportProgress(i, LiveImgArr);
}
}
});
bw.ProgressChanged += new ProgressChangedEventHandler(
delegate(object j, ProgressChangedEventArgs args)
{
byte[] imgByte = (byte[])args.UserState;
using (MemoryStream ms = new MemoryStream(imgByte, 0, imgByte.Length))
{
BitmapImage bmp = new BitmapImage();
bmp.SetSource(ms);
this.image1.Source = bmp;
}
});
//////////////////////////////////////// END //////////////////////////////////////////
///////////////////////////////// NEW CODE AS PER CLEMENTS/////////////////////////////
bw.ProgressChanged += new ProgressChangedEventHandler(
delegate(object j, ProgressChangedEventArgs args)
{
byte[] imgByte = (byte[])args.UserState;
WriteableBitmap wbmp = new WriteableBitmap(208, 208);
int[] wbmpArray = wbmp.Pixels;
for (int pixelIndex = 0; pixelIndex < imgByte.Length; pixelIndex++)
{
byte alpha = 128;
byte red = 255;
byte green = 255;
byte blue = 255;
double scaleAlpha = alpha / 255.0;
// we are not using scaleAlpha here
//wbmp.Pixels[pixelIndex] =
// (alpha << 24)
// | (red << 16)
// | (green << 8)
// | blue;
// notice the alpha value is NOT scaled
// it’s also very important to scale BEFORE
// shifting the values
wbmp.Pixels[pixelIndex] =
(alpha << 24)
| ((byte)(red * scaleAlpha) << 16)
| ((byte)(green * scaleAlpha) << 8)
| (byte)(blue * scaleAlpha);
}
wbmp.Invalidate();
this.image1.Source = wbmp;
//ImageBrush imgBrush = new ImageBrush();
//imgBrush.ImageSource = wbmp;
//imgRect.Fill = imgBrush;
});
BitmapImage.SetSource only accepts streams that contain an encoded image buffer (PNG or JPEG).
In order to set pixel data directly, you would have to use WriteableBitmap instead of BitmapImage. From the Remarks section in the class documentation:
• Construct an initially empty but dimensioned WriteableBitmap using
WriteableBitmap(Int32, Int32).
• Get the pixel array from Pixels.
• Loop through the array, setting the individual pixel values as
integer values that are evaluated as premultiplied ARGB32.
• Call Invalidate.
• To display the image in UI, use the WriteableBitmap as the source
for an imaging control such as Image, or as the source image for an
ImageBrush.
The following code fills a WriteableBitmap with a blue color and an opacity gradient from 0 to 100% from left to right by using only integer arithmetics:
var bitmap = new WriteableBitmap(500, 500);
int red = 0;
int green = 0;
int blue = 255;
for (int i = 0; i < bitmap.PixelWidth * bitmap.PixelHeight; i++)
{
int x = i % bitmap.PixelWidth;
int alpha = x * 256 / bitmap.PixelWidth;
bitmap.Pixels[i] =
(alpha << 24) |
((red * alpha / 256) << 16) |
((green * alpha / 256) << 8) |
(blue * alpha / 256);
}
bitmap.Invalidate();
image.Source = bitmap;
So basically i have a soundboard made in Flash CS5, is it possible to alternate the sound of the library's audio files with using Flash only? Like make the clips sound deeper or faster, thats the point. But if it's not pissbile
You cannot do it in AS2. But yes, you can change the pitch and playback speed of a sound in AS3. In fact you can do more than that (search for Audiotool) but this demonstrates how it is done:
http://plasticsturgeon.com/2012/05/changing-the-pitch-of-a-looped-sound-at-runtime-with-actionscript/
And in case my blog goes down or something, here is the relevant class that changes the pitch of a sound.:
package sound
{
import flash.events.Event;
import flash.events.SampleDataEvent;
import flash.media.Sound;
import flash.media.SoundChannel;
import flash.media.SoundTransform;
import flash.net.URLRequest;
import flash.utils.ByteArray;
/**
* #author Andre Michelle (andr...#gmail.com)
* Modified by Zach Foley aka The Plastic Sturgeon
*/
public class MP3Pitch
{
private const BLOCK_SIZE: int = 3072;
private var _mp3: Sound;
private var _sound: Sound;
private var _target: ByteArray;
private var _position: Number;
private var _rate: Number;
private var repeat:SoundChannel;
private var _volume:Number = 1;
private var byteArray:ByteArray;
// Pass in your looped Sound
public function MP3Pitch( pitchedSound: Sound)
{
_target = new ByteArray();
_mp3 = pitchedSound;
_position = 0.0;
_rate = 0.0;
_sound = new Sound();
_sound.addEventListener( SampleDataEvent.SAMPLE_DATA, sampleData );
repeat = _sound.play();
}
public function get rate(): Number
{
return _rate;
}
// Also added a handy volume setter
public function set volume( value: Number ): void
{
_volume = value;
repeat.soundTransform = new SoundTransform(_volume);
}
// use this to set the pitch of your sound
public function set rate( value: Number ): void
{
if( value < 0.0 )
value = 0;
_rate = value;
}
private function sampleData( event: SampleDataEvent ): void
{
//-- REUSE INSTEAD OF RECREATION
_target.position = 0;
//-- SHORTCUT
var data: ByteArray = event.data;
var scaledBlockSize: Number = BLOCK_SIZE * _rate;
var positionInt: int = _position;
var alpha: Number = _position - positionInt;
var positionTargetNum: Number = alpha;
var positionTargetInt: int = -1;
//-- COMPUTE NUMBER OF SAMPLES NEED TO PROCESS BLOCK (+2 FOR INTERPOLATION)
var need: int = Math.ceil( scaledBlockSize ) + 2;
//-- EXTRACT SAMPLES
var read: int = _mp3.extract( _target, need, positionInt );
var n: int = read == need ? BLOCK_SIZE : read / _rate;
var l0: Number;
var r0: Number;
var l1: Number;
var r1: Number;
for( var i: int = 0 ; i < n ; ++i )
{
//-- AVOID READING EQUAL SAMPLES, IF RATE < 1.0
if( int( positionTargetNum ) != positionTargetInt )
{
positionTargetInt = positionTargetNum;
//-- SET TARGET READ POSITION
_target.position = positionTargetInt << 3; //-- READ TWO STEREO SAMPLES FOR LINEAR INTERPOLATION l0 = _target.readFloat(); r0 = _target.readFloat(); l1 = _target.readFloat(); r1 = _target.readFloat(); } //-- WRITE INTERPOLATED AMPLITUDES INTO STREAM data.writeFloat( l0 + alpha * ( l1 - l0 ) ); data.writeFloat( r0 + alpha * ( r1 - r0 ) ); //-- INCREASE TARGET POSITION positionTargetNum += _rate; //-- INCREASE FRACTION AND CLAMP BETWEEN 0 AND 1 alpha += _rate; while( alpha >= 1.0 ) --alpha;
}
//-- FILL REST OF STREAM WITH ZEROs
if( i < BLOCK_SIZE )
{
while( i < BLOCK_SIZE ) { data.writeFloat( 0.0 ); data.writeFloat( 0.0 ); ++i; } } //-- INCREASE SOUND POSITION _position += scaledBlockSize; // My little addition here: if (_position > _mp3.length * 44.1) {
_position = 0;
_target.position = 0;
}
}
}
}