I have a winJS app that is a working launcher for a steam game. I'd like to get it to cycle through 5 images even while not running.
It uses only the small tile — there are no wide tiles images for this app.
Here's the code:
(function () {
"use strict";
WinJS.Namespace.define("Steam", {
launch: function launch(url) {
var uri = new Windows.Foundation.Uri(url);
Windows.System.Launcher.launchUriAsync(uri).then(
function (success) {
if (success) {
// File launched
window.close();
} else {
// File launch failed
}
}
);
}
});
WinJS.Namespace.define("Tile", {
enqueue: function initialize() {
var updaterHandle = Windows.UI.Notifications.TileUpdateManager.createTileUpdaterForApplication();
updaterHandle.enableNotificationQueue(true);
return updaterHandle;
},
update: function update () {
var template = Windows.UI.Notifications.TileTemplateType.tileSquareImage;
var tileXml = Windows.UI.Notifications.TileUpdateManager.getTemplateContent(template);
var randIndx = Math.floor(Math.random() * 5);
var randUpdatetime = 1000 * 3 * (((randIndx == 0) ? 1 : 0) + 1); // let the base image stay longer
var tileImageAttributes = tileXml.getElementsByTagName("image");
tileImageAttributes[0].setAttribute("src", "ms-appx:///images/Borderlands2/borderlands_2_" + randIndx + "_sidyseven.png");
tileImageAttributes[0].setAttribute("alt", "Borderlands 2");
var tileNotification = new Windows.UI.Notifications.TileNotification(tileXml);
var currentTime = new Date();
tileNotification.expirationTime = new Date(currentTime.getTime() + randUpdatetime);
tileNotification.tag = "newTile";
var updater = Tile.enqueue();
updater.update(tileNotification);
setTimeout('Tile.update();', randUpdatetime);
}
});
WinJS.Binding.optimizeBindingReferences = true;
var app = WinJS.Application;
var activation = Windows.ApplicationModel.Activation;
app.onactivated = function (args) {
if (args.detail.kind === activation.ActivationKind.launch) {
setTimeout('Steam.launch("steam://rungameid/49520");', 800);
args.setPromise(WinJS.UI.processAll().then(function () {
return WinJS.Navigation.navigate("/default.html", args).then(function () {
Tile.update();
});
}));
}
};
app.start();
})();
Notes:
The code currently does not cycle the image, instead either
apparently never changing, or after launch replacing the application
name text with a tiny view of the default image. This reverts to the
text after a short time, and the cycle may repeat. It never shows a
different image (neither in the small image it erroneously shows, nor
in the main tile).
When I run in debug and set a breakpoint at the
TileUpdater.update(TileNotification) stage, I can verify in the
console that the image src attribute is set to a random image
just as I wanted:
>>>>tileNotification.content.getElementsByTagName("image")[0].getAttribute("src")
"ms-appx:///images/Borderlands2/borderlands_2_4_sidyseven.png"
But this never actually displays on the tile.
These image files are included in the solution, and they appear in the proper directory in the Solution Explorer.
If the image src attribute is set properly in debug then the image may not have the proper "Build Action".
In the 'Properties' of each image, set "Build Action" to "Resource".
Related
I am working on Ionic angular platform.I get a shapefile that is converted to a Base64 string. I am converting it to a file and then trying to convert to arrayBuffer using 'FileReader' to fit the data to match (L.shapefile)
https://github.com/calvinmetcalf/leaflet.shapefile
EDIT: I fixed the previous problem, the current problem is the data (ArrayBuffer) added to leaflet.shapefile which returns the error.
Or if you got the zip some other way (like the File API) then with the arrayBuffer you can call
const geojson = await shp(buffer);
Source: https://www.npmjs.com/package/shpjs
I tried working in this direction to fit the shapefile into L.shapefile as follows:
import * as L from "leaflet";
import * as shp from "shpjs";
const l1 = require('../../assets/leaflet.shpfile.js');
export class Tab7Page {
map: L.Map
async ngOnInit() {
this.map = L.map("map", {
center: [49.7, 8.12],
zoom: 15,
renderer: L.canvas(),
});
L.tileLayer(
"https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}",
{
// maxZoom: 12,
attribution:
"Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community",
}
).addTo(this.map);
// this.map.setView(layer.getBounds()['_northEast'], 14);
setTimeout(() => {
this.map.invalidateSize();
}, 1);
var shape_data = "data:application/zip;base64,UEsDBAoAAAAAAHZwplQAAAAAAAAAAAAAAAAHAAAAbGF5ZXJzL1BLAwQKAAAAAAB2cKZUD7eKbDwBAAA8AQAAEgAAAGxheWVycy9QT0xZR09OLnNocAAAJwoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJ7oAwAABQAAAAAAAFCyGiBAeNAJLjeZSEAAAACYlB0gQMAkzNp mUhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAaAUAAAAAAABQshogQHjQCS43mUhAAAAAmJQdIEDAJMzafplIQAEAAAAKAAAAAAAAAAAAAJjEGiBAwCTM2n6ZSEAAAABQshogQKOtBVtUmUhAAAAAeLYbIEC8k8/CPplIQAAAABBpHCBAsUVS7DyZSED/// v9RwgQMXhbjg/mUhAAAAA6IAdIEB40AkuN5lIQAAAAJiUHSBAeeTrB2mZSEAAAACo7hwgQFKQcddwmUhAAAAAcNwbIEDbe339cZlIQAAAAJjEGiBAwCTM2n6ZSEBQSwMECgAAAAAAdnCmVObu5cZsAAAAbAAAABIAAABsYXllcnMvUE9MWUdPTi5zaHgAACcKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA26AMAAAUAAAAAAABQshogQHjQCS43mUhAAAAAmJQdIEDAJMzafplIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMgAAAGhQSwMECgAAAAAAdnCmVGt60SP0AgAA9AIAABIAAABsYXllcnMvUE9MWUdPTi5kYmYDegQGAQAAAMEAMwIAAAAAAAAAAAAAAAAAAAAAAAAAAHN0cm9rZQAAAAAAQwAAAAD AAAAAAAAAAAAAAAAAAAAc3Ryb2tlLXcAAABOAAAAABIAAAAAAAAAAAAAAAAAAABzdHJva2UtbwAAAE4AAAAAEgAAAAAAAAAAAAAAAAAAAGZpbGwAAAAAAAAAQwAAAAD AAAAAAAAAAAAAAAAAAAAZmlsbC1vcGEAAABOAAAAABIAAAAAAAAAAAAAAAAAAAAAICNjNWQ1NzMgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAyICAgICAgICAgICAgICAgICAxI2RiZDc5MCAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAwLhpQSwMECgAAAAAAdnCmVOPBUWqPAAAAjwAAABIAAABsYXllcnMvUE9MWUdPTi5wcmpHRU9HQ1NbIkdDU19XR1NfMTk4NCIsREFUVU1bIkRfV0dTXzE5ODQiLFNQSEVST0lEWyJXR1NfMTk4NCIsNjM3ODEzNywyOTguMjU3MjIzNTYzXV0sUFJJTUVNWyJHcmVlbndpY2giLDBdLFVOSVRbIkRlZ3JlZSIsMC4wMTc0NTMyOTI1MTk5NDMyOTVdXVBLAQIUAAoAAAAAAHZwplQAAAAAAAAAAAAAAAAHAAAAAAAAAAAAEAAAAAAAAABsYXllcnMvUEsBAhQACgAAAAAAdnCmVA 3imw8AQAAPAEAABIAAAAAAAAAAAAAAAAAJQAAAGxheWVycy9QT0xZR09OLnNocFBLAQIUAAoAAAAAAHZwplTm7uXGbAAAAGwAAAASAAAAAAAAAAAAAAAAAJEBAABsYXllcnMvUE9MWUdPTi5zaHhQSwECFAAKAAAAAAB2cKZUa3rRI/QCAAD0AgAAEgAAAAAAAAAAAAAAAAAtAgAAbGF5ZXJzL1BPTFlHT04uZGJmUEsBAhQACgAAAAAAdnCmVOPBUWqPAAAAjwAAABIAAAAAAAAAAAAAAAAAUQUAAGxheWVycy9QT0xZR09OLnByalBLBQYAAAAABQAFADUBAAAQBgAAAAA="
var shape_fileName = "TestFile"
var file = this.dataURLtoFile(shape_data,shape_fileName);
// console.log("Blob retrieved successfully..", blob);
// this.handleZipFile(file);
var reader = new FileReader();
reader.onload = function(){
if (reader.readyState != 2 || reader.error){
console.error("thats the error side");
return;
} else {
shp(reader.result).then(function (geojson) { //More info: https://github.com/calvinmetcalf/shapefile-js
L.geoJSON(geojson).addTo(this.map);//More info: https://github.com/calvinmetcalf/leaflet.shapefile
});
}
}
reader.readAsArrayBuffer(file);
}
dataURLtoFile(dataurl, filename) {
var bstr1 = atob(dataurl);
console.log("Byte sting", bstr1);
var arr = dataurl.split(','),
mime = arr[0].match(/:(.*?);/)[1],
bstr = atob(decodebase64),
n = bstr.length,
u8arr = new Uint8Array(n);
while(n--){
u8arr[n] = bstr.charCodeAt(n);
}
return new File([u8arr], filename, {type:mime});
}
I just get a map with the below error has informed. I am not sure which leaflet package to use shpjs or L.shapefile?
EDIT: As per comments from GIS platform i installed buffer and updated polyfill.ts and now I get the following error.
I am using node webkit v0.45, and am need to open a new window (with a pre-determined html layout) with dynamic parameters.
The only documentation I can find suggests using on "loaded" event to emit data to the new window, and then handle the data that is received using on "data" in the new window html, as per below:
Main window:
var ngui = require('nw.gui');
var newWindow = ngui.Window.open('newwindow.html', {
width: 1120,
height: 360
})
newWindow.on ('loaded', function(){
var data = {msg: "test"};
newWindow.emit("data", data);
});
New window:
var gui = require('nw.gui');
var win = gui.Window.get();
win.on("data", function(data) {
console.log(data.msg);
});
However, it seems after v0.13 this was deprecated and the Window class is no longer inherited from EventEmitter. Are there any other options for opening windows with parameters?
*** Update ***
The following code will work when the new window is opened:
var param = 'param';
ngui.Window.open('newWindow.html', {
width: 1120,
height: 360,
}, function(newWindow) {
pageWindow.on('loaded', () => {
newWindow.window.param = param;
});
});
And in newWindow.html:
<script>
function getParam(){
if (!window.param){
setTimeout(function(){
getParam();
},50);
} else {
console.log("Successfully loaded param: '" + window.param + "'");
}
}
getParam();
</script>
The Node context can be used to share data, as the Node context is shared across windows.
I am using node js and canvas to create an API that writes text on a certain image. I successfully created a route such as -> /:sometext/:fontsize/:color/:position-x/:position-y and sends a static image with the text of the given font-size, color, and position on the canvas.
What I am unable to achieve is that I want to send the font family in the route and have that rendered on the screen. Plus, isn't there any other way that I can load at least google fonts without having to download the .ttf file.
What I have tried:
GetGoogleFonts npm package (which was a bad idea, since it was stuck at the installation)
WebFontLoader (Gives "Window is not defined" error)
Steps to Reproduce
Currently, I am using a ttf file to load the font
router.get('/thumbnail/:name/:fontsize/:color/:posx/:posy', function (req, res, next) {
let name = req.params.name;
let fontsize = req.params.fontsize
let positionx = req.params.posx
let positiony = req.params.posy
let color = req.params.color
let myimage = 'static/image1.jpg'
const canvas = createCanvas(2000, 1000)
const ctx = canvas.getContext('2d')
var str = "hi "+name
registerFont('AvenirNext.ttf', { family: 'Avenir Next' })
loadImage(myimage).then((image) => {
ctx.drawImage(image, 0 , 0, 2000, 1000);
ctx.font = fontsize + "px Avenir Next"
ctx.fillStyle = color
ctx.fillText(str, positionx, positiony);
const out = fs.createWriteStream(__dirname + '/test.jpeg')
const stream = canvas.createJPEGStream()
stream.pipe(res)
out.on('finish', () => console.log('The JPEG file was created.'))
})
});
If you don't want to host the ttf files on your own server you could try to use the Google Font Github repo.
// missing imports
const http = require('http');
const fs = require('fs');
const fontFamily = req.params.fontfamily; // example: ArchivoNarrow
// download font from github
const file = fs.createWriteStream(fontFamily + '.ttf');
const request = http.get('https://github.com/google/fonts/blob/master/ofl/' + fontFamily.toLowerCase() + '/' + fontFamily + '-Regular.ttf?raw=true', function(response) {
response.pipe(file);
});
registerFont(fontFamily + '.ttf', { family: fontFamily });
// delete font after the image is created
try {
fs.unlinkSync(fontFamily + '.ttf');
} catch(err) {
console.error(err);
}
Font I used for this example: ArchivoNarrow
From the docs for registerFont
To use a font file that is not installed as a system font, use registerFont() to register the font with Canvas. This must be done before the Canvas is created.
emphasis not mine
You are calling it after you created your canvas.
// first register the font
registerFont('AvenirNext.ttf', { family: 'Avenir Next' });
// then create the canvas
const canvas = createCanvas(2000, 1000);
const ctx = canvas.getContext('2d');
var str = "hi " + name;
But note that this will try to load a font that would be available on your server. If you want to make it target a font on ://fonts.googleapis.com server, you'd need to pass the full URI to the font file (not for the .css).
Also, you should wrap your family name inside quotes (") since it contains a space:
ctx.font = fontsize + 'px "Avenir Next"';
I'm trying to write an application that allow the user to select a region of the screen (like selecting to take a screen shot).
Is that even possible?
To specifically take a full screen shot, use the following code (example pulled from Electron Demo App). You can build off of this example, and use the screen, desktopCapturer and rectangle modules in the electron api to customize the code to get a specific screen/display, or select a specific bounding box (x/y coordinates and pixel area).
const electron = require('electron')
const desktopCapturer = electron.desktopCapturer
const electronScreen = electron.screen
const shell = electron.shell
const fs = require('fs')
const os = require('os')
const path = require('path')
const screenshot = document.getElementById('screen-shot')
const screenshotMsg = document.getElementById('screenshot-path')
screenshot.addEventListener('click', function (event) {
screenshotMsg.textContent = 'Gathering screens...'
const thumbSize = determineScreenShotSize()
let options = { types: ['screen'], thumbnailSize: thumbSize }
desktopCapturer.getSources(options, function (error, sources) {
if (error) return console.log(error)
sources.forEach(function (source) {
if (source.name === 'Entire screen' || source.name === 'Screen 1') {
const screenshotPath = path.join(os.tmpdir(), 'screenshot.png')
fs.writeFile(screenshotPath, source.thumbnail.toPng(), function (error) {
if (error) return console.log(error)
shell.openExternal('file://' + screenshotPath)
const message = `Saved screenshot to: ${screenshotPath}`
screenshotMsg.textContent = message
})
}
})
})
})
function determineScreenShotSize () {
const screenSize = electronScreen.getPrimaryDisplay().workAreaSize
const maxDimension = Math.max(screenSize.width, screenSize.height)
return {
width: maxDimension * window.devicePixelRatio,
height: maxDimension * window.devicePixelRatio
}
}
Other ways you could go about this are:
Use object.getClientRects() in the DOM to specify specific elements you want to capture, although this would require foreknowledge of what they are.
Add event listeners in your view to 'draw' the shape of what you want with mouseClick, mouseMove, etc. This stack overflow question has answers which could be adapted to fit what you want to do.
I doubt you are still looking for a solution to this, but after digging i have found a way to do it using a combination of shelljs and clipboard.
const userDataPath = (app).getPath(
'userData'
)
const useP = path.join(userDataPath, 'uploads')
let randomTmpfile = uniqueFilename(useP, 'prefix')
shelljs.exec(`screencapture -ic ${randomTmpfile}.png`, function (res) {
const image = clipboard.readImage('png').toDataURL()
})
How to read image from Application folder in winjs
var item = groupedProducts.getAt(indx);
item.img = Windows.Storage.ApplicationData.current.localFolder.path + "\\" + "3766111.jpg";
groupedProducts.setAt(indx, item);
WinJS.UI.processAll();
You need to use the async APIs to access files in ApplicationData in WinJS, such as the getFileAsync function used below (this is a helper function I use in databinding for one of my apps):
function getLocalLargeMapTile(item) {
return new WinJS.Promise(
function (completed, error, progress) {
var filename;
var sourceFolder;
if (item.latlong) {
var latandlong = item.latlong.split(", ");
var lat = latandlong[0];
var lon = latandlong[1];
filename = lat + lon + ".png";
var appData = Windows.Storage.ApplicationData.current;
sourceFolder = appData.localFolder;
sourceFolder.getFileAsync(filename).then(function (file) {
var mapUrl = window.URL.createObjectURL(file, { oneTimeOnly: true });
completed(mapUrl);
},
function (error) {
handleError(error)
});
}
else {
filename = "ms-appx:///images/megaphone_256x256.png";
completed(filename);
}
}
);
}
What I'm doing in the helper function is checking whether my data includes a latitude and longitude, and if so, checking for a file with a matching filename, and since those files are in the Application Data folder, wrapping the file with an objectURL and returning a promise with the objectURL. Otherwise, I simply return an ms-appx url pointing to a static file in the app's images folder. Here's how I call this helper function, from a programmatic template (I don't think you can do this with a declarative template):
var image = document.createElement("img");
image.className = "item-image";
image.src = "ms-appx:///images/megaphone_256x256.png";
result.appendChild(image);
// additional code omitted
var promise = mapTileUtil.getLocalMapTile(currentItem);
promise.done(function (mapTileUrl) {
image.src = mapTileUrl;
});
For more info on templating functions, which provide greater control over the rendered markup than declarative templates, check out:
http://msdn.microsoft.com/en-us/library/windows/apps/jj585523.aspx
and
http://go.microsoft.com/fwlink/p/?linkid=231499
For more information on Windows Store app development in general, register for App Builder.