tf.image.cropAndResize throwing "method must be bilinear or nearest, but was undefined" error - node.js

I am trying to run an image categorization model in firebase cloud functions using tensorflow.js (specifically tfjs-node) but am running into the flowing error:
Error: method must be bilinear or nearest, but was undefined
at assert (/workspace/node_modules/#tensorflow/tfjs-core/dist/tf-core.node.js:698:15)
at cropAndResize_ (/workspace/node_modules/#tensorflow/tfjs-core/dist/tf-core.node.js:21340:5)
at Object.cropAndResize__op [as cropAndResize] (/workspace/node_modules/#tensorflow/tfjs-core/dist/tf-core.node.js:4287:29)
at prepImage (/workspace/handlers/models.js:58:35)
at /workspace/handlers/models.js:68:44
at processTicksAndRejections (internal/process/task_queues.js:97:5)
at async exports.isFurnished (/workspace/handlers/models.js:10:17)
at async exports.getanalysis (/workspace/handlers/apis.js:103:16)
The error is being thrown by the tf.image.cropAndResize() function. What is strange about this error is that cropAndResize() should be automatically using its default value of "bilinear" as specified in the docs.
Stranger yet, when I run it locally I don't get any errors. My local machine is running node v12.16.0.
Below is my code. please note that I am only lading signature.json from the firebase storage and fetching /standardizing an image (I am not loading and running the actual ts model).
const { admin, db } = require("../util/admin");
const firebase = require("firebase");
const tf = require("#tensorflow/tfjs-node");
const fetch = require("node-fetch");
exports.isFurnished = async (imgUrl) => {
const sigPath = "models/signature.json";
const signature = await loadSignature(sigPath);
const image = await loadImage(imgUrl, signature);
return "It worked!";
};
//signature---------------------------
const loadSignature = (filePath) => {
let file = admin.storage().bucket().file(filePath);
return file
.download()
.then((res) => JSON.parse(res[0].toString("utf8")))
.catch((err) => err.message);
};
//Image-------------------------------
const loadImage = (imgUrl, signature) => {
return fetchImage(imgUrl).then((image) => prepImage(image, signature));
};
const fetchImage = async (url) => {
const response = await fetch(url);
const buffer = await response.buffer();
return buffer;
};
const prepImage = (rawImage, signature) => {
const image = tf.node.decodeImage(rawImage, 3);
const [height, width] = signature.inputs.Image.shape.slice(1, 3);
const [imgHeight, imgWidth] = image.shape.slice(0, 2);
const normalizedImage = tf.div(image, tf.scalar(255));
const reshapedImage = normalizedImage.reshape([1, ...normalizedImage.shape]);
let top = 0;
let left = 0;
let bottom = 1;
let right = 1;
if (imgHeight != imgWidth) {
const size = Math.min(imgHeight, imgWidth);
left = (imgWidth - size) / 2 / imgWidth;
top = (imgHeight - size) / 2 / imgHeight;
right = (imgWidth + size) / 2 / imgWidth;
bottom = (imgHeight + size) / 2 / imgHeight;
}
return tf.image.cropAndResize(
reshapedImage,
[[top, left, bottom, right]],
[0],
[height, width]
);
};
Have I made an error that I'm just not seeing or is this a node and/or tsjs issue?
Also, adding in the "bilinear" parameter yields this error:
Error: Invalid napi_status: A number was expected

As commented above, TensorFlow.js version 2.8.0 seems to have introduced some breaking changes. Workaround (at the time of writing) is to keep using version 2.7.0.

I am working with bodypix. It was working fine until this morning. Although I haven't changed anything, since this afternoon this exact error came up. It could be Tensorflow's issue. Or,
I checked on Windows 8.1. There, it works totally fine. The problem happens on windows 10.
EDIT: I am quite sure it's from TensorFlow. Not the windows. I was using CDN to get the bodypix and after updating the cdn address the error disappeared.
previous: https://cdn.jsdelivr.net/npm/#tensorflow-models/body-pix/dist/body-pix.min.js
https://cdn.jsdelivr.net/npm/#tensorflow/tfjs/dist/tf.min.js
Now: https://cdn.jsdelivr.net/npm/#tensorflow-models/body-pix#2.0.5/dist/body-pix.min.js
https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#2.7.0/dist/tf.min.js

Related

'document is not defined' canvas/discord

I'm trying make a animated banner to discord server, but I have some problems with that. I don't know why document is not defined. I tried a bunch of ways, but none of them worked.
try {
let guild = await client.guilds.cache.get(guildId.id);
const voiceChannels = guild.channels.cache.filter((c) => c.type === "GUILD_VOICE");
let count = 0;
for (const [, voiceChannel] of voiceChannels) count += voiceChannel.members.size;
var canvas = createCanvas(420, 236);
var ctx = canvas.getContext('2d');
ctx.textAlign = "center"
ctx.font = "20px Kanit Black"
ctx.fillStyle = '000001'
const encoder = new GIFEncoder(420, 236, {emptyColor: true, dynamicColors: true})
encoder.setRepeat(0);
encoder.setQuality(10)
await gifFrames({ url: `${__dirname}/banner-beta.gif`, frames: 'all', outputType: 'canvas', cumulative: false}).then((frameData) => {
for(let i = 0; i < frameData.length; i++) {
encoder.setDelay(frameData[i].frameInfo.delay * 15);
console.log(frameData[0])
console.log(frameData[i])
ctx.drawImage((frameData[i]).getImage(), 0, 0, 420, 236)
ctx.fillText(`${count}`, 50, 90)
console.log(ctx)
encoder.addFrame(ctx);
}
encoder.finish();
console.log(encoder.out.getData())
var attachment = new MessageAttachment(encoder.out.getData(), 'banner.gif');
guild.channels.cache.get("909860310171648050").send({ files: [attachment] }).catch(console.log);
})
//guild.setBanner(canvas.toBuffer()).catch(console.error);
} catch (e) {
console.log(e)
}
error:
ReferenceError: document is not defined
at savePixels (C:\Users\hotab\Desktop\Root\node_modules\save-pixels-jpeg-js-upgrade\save-pixels.js:127:20)
at Object.getImage (C:\Users\hotab\Desktop\Root\node_modules\gif-frames\gif-frames.js:105:20)
at C:\Users\hotab\Desktop\Root\utils\banner.js:31:34
TL;DR:
Your code runs in NodeJS, in which there is no document object because it does not make sense on a backend server. So using document results in an exception.
More details
You are using the node module gif-frames that depends on another node module called save-pixels-jpeg-js-upgrade which tries to access the document object that only exists in a browser context (see code here).
Either the documentation of gif-frames stating that it can run in both Node and browsers contexts is incorrect or there are some restrictions to some APIs, which you are facing here. Based on the node module code, it seems to be limited to the CANVAS case, but I can't help you any further...

can not get cascade DDS value for SharedObjectSequence

I have a test like this, but i can not get the 'sharedMap' in 'sharedSeq1' value, i don't know how to get the 'remoteFluidObjectHandle' value.
import {MockContainerRuntimeFactory, MockFluidDataStoreRuntime, MockStorage} from "#fluidframework/test-runtime-utils";
import {SharedObjectSequence, SharedObjectSequenceFactory} from "#fluidframework/sequence";
import * as mocks from "#fluidframework/test-runtime-utils";
import {SharedMap} from "#fluidframework/map";
import {IFluidHandle} from "#fluidframework/core-interfaces";
const mockRuntime: mocks.MockFluidDataStoreRuntime = new mocks.MockFluidDataStoreRuntime();
describe('ShredObjectSequence', function () {
it('should get synchronization data from another shared object', async function () {
const dataStoreRuntime1 = new MockFluidDataStoreRuntime();
const sharedSeq1: SharedObjectSequence<IFluidHandle<SharedMap>> = new SharedObjectSequence(mockRuntime, 'shareObjectSeq1', SharedObjectSequenceFactory.Attributes,)
const containerRuntimeFactory = new MockContainerRuntimeFactory();
dataStoreRuntime1.local = false;
const containerRuntime1 = containerRuntimeFactory.createContainerRuntime(
dataStoreRuntime1,
);
const services1 = {
deltaConnection: containerRuntime1.createDeltaConnection(),
objectStorage: new MockStorage(),
};
sharedSeq1.initializeLocal();
sharedSeq1.connect(services1);
const dataStoreRuntime2 = new MockFluidDataStoreRuntime();
const containerRuntime2 = containerRuntimeFactory.createContainerRuntime(
dataStoreRuntime2,
);
const services2 = {
deltaConnection: containerRuntime2.createDeltaConnection(),
objectStorage: new MockStorage(),
};
const sharedSeq2: SharedObjectSequence<IFluidHandle<SharedMap>> = new SharedObjectSequence(mockRuntime, 'shareObjectSeq2', SharedObjectSequenceFactory.Attributes,)
sharedSeq2.initializeLocal();
sharedSeq2.connect(services2);
// insert a node into sharedSeq2, it will sync to sharedSeq1
sharedSeq2.insert(0, [<IFluidHandle<SharedMap>>new SharedMap('sharedMapId', mockRuntime, SharedMap.getFactory().attributes).handle])
containerRuntimeFactory.processAllMessages();
// next case is passed, it show we got the sharedSeq2 changed
expect(sharedSeq1.getLength()).toBe(1)
const remoteFluidObjectHandle = await sharedSeq1.getRange(0, 1)[0];
// at here, i get error: Cannot read property 'mimeType' of null, it cause by remoteFluidObjectHandle.ts:51:30
const sharedMap = await remoteFluidObjectHandle.get()
expect(sharedMap).not.toBeUndefined()
});
});
run this test will get 'Cannot read property 'mimeType' of null' error, it caused by 'remoteFluidObjectHandle.ts:51:30'
The fluid mocks have very limited and specific behaviors, it looks like you are hitting the limits of them. You'll have better luck with an end-to-end test, see packages\test\end-to-end-tests. These use the same in-memory server as our as the playground on fluidframework dot com. The in-memory server uses the same code as tinylicious, our single process server and routerlicious, our docker based reference implementation.

NodeJS: How to use image-size with base64 image?

I found the node module image-size and want to use it to get the dimensions of a base64 encoded image. The tutorial gives the following example for getting the dimensions:
var sizeOf = require('image-size');
var dimensions = sizeOf('images/funny-cats.png');
console.log(dimensions.width, dimensions.height);
An here in the comment of the second answer someone wrote it's working for base64 images as well. So I tried the follwing:
var img = Buffer.from(base64, 'base64');
var dimensions = sizeOf(img);
console.log(dimensions.width, dimensions.height);
But I get a TypeError: unsupported file type: undefined (file: undefined)
How can I use sizeOf-Method of the image-size package with a base64 string I have in a variable?
Try this
var img = Buffer.from(base64.substr(23), 'base64');
var dimensions = sizeOf(img);
console.log(dimensions.width, dimensions.height);
substr(23) cuts off data:image/jpeg;base64,, which is necessary to properly create a Buffer from your base64 data.
Here's another solution using puppeteer worth considering
const puppeteer = require('puppeteer')
// image data
const data = 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBg...'
const browser = await puppeteer.launch()
const page = await browser.newPage();
const dimensions = await page.evaluate(data => new Promise(resolve => {
// client-like
const img = new Image()
img.onload = () => resolve({ width: img.width, height: img.height })
img.src = data
}), data)
console.log(dimensions.width, dimensions.height)
browser.close()
Use this if the code is run in a wsl context :
const browser = await puppeteer.launch({
args: ['--no-sandbox', '--disable-setuid-sandbox']
})
NOTE : Of course this method is really slow (because it opens a Chromium instance in the background, loads a webpage and its scripts, waits for rendering, etc...). I am providing this solution just for reference as in some cases being able to execute a script just like in a normal browser can be really useful.

Can't change leaflet setView params when using leaflet-headless

I need to generate a map on the server side using Nodejs and then create an image of that map. I'm using leaflet-headless to create the map and generate the image.
This is the code:
const L = require('leaflet-headless');
const document = global.document;
let createMap = (lanLat) => {
const element = document.createElement('div');
element.id = 'map-leaflet-image';
document.body.appendChild(element);
const filename = path.join(__dirname, '/leaflet-image.png');
const map = L.map(element.id).setView([0, 0], 3);
L.tileLayer('http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
attribution: '© OpenStreetMap contributors'
}).addTo(map);
map.saveImage(filename, () => {
console.log('done');
})
};
This works and the image is saved but when I change the setView() parameters to setView([0,0], 1)(zoom out) I receive an error message:
return prev.apply(ctx, arguments);
Error: Image given has not completed loading
at Error (native)
at CanvasRenderingContext2D.ctx.(anonymous function) [as drawImage]
Any thoughts?
If this might interest someone, the problem was in the map.save() function which uses the leaflet-image lib.
This happened due to a weirdly specific scenario where a marker with certain coordinates, when added to the map with any other marker(?!), caused the error. I removed that marker and it worked.

select a region of desktop screen with Electron

I'm trying to write an application that allow the user to select a region of the screen (like selecting to take a screen shot).
Is that even possible?
To specifically take a full screen shot, use the following code (example pulled from Electron Demo App). You can build off of this example, and use the screen, desktopCapturer and rectangle modules in the electron api to customize the code to get a specific screen/display, or select a specific bounding box (x/y coordinates and pixel area).
const electron = require('electron')
const desktopCapturer = electron.desktopCapturer
const electronScreen = electron.screen
const shell = electron.shell
const fs = require('fs')
const os = require('os')
const path = require('path')
const screenshot = document.getElementById('screen-shot')
const screenshotMsg = document.getElementById('screenshot-path')
screenshot.addEventListener('click', function (event) {
screenshotMsg.textContent = 'Gathering screens...'
const thumbSize = determineScreenShotSize()
let options = { types: ['screen'], thumbnailSize: thumbSize }
desktopCapturer.getSources(options, function (error, sources) {
if (error) return console.log(error)
sources.forEach(function (source) {
if (source.name === 'Entire screen' || source.name === 'Screen 1') {
const screenshotPath = path.join(os.tmpdir(), 'screenshot.png')
fs.writeFile(screenshotPath, source.thumbnail.toPng(), function (error) {
if (error) return console.log(error)
shell.openExternal('file://' + screenshotPath)
const message = `Saved screenshot to: ${screenshotPath}`
screenshotMsg.textContent = message
})
}
})
})
})
function determineScreenShotSize () {
const screenSize = electronScreen.getPrimaryDisplay().workAreaSize
const maxDimension = Math.max(screenSize.width, screenSize.height)
return {
width: maxDimension * window.devicePixelRatio,
height: maxDimension * window.devicePixelRatio
}
}
Other ways you could go about this are:
Use object.getClientRects() in the DOM to specify specific elements you want to capture, although this would require foreknowledge of what they are.
Add event listeners in your view to 'draw' the shape of what you want with mouseClick, mouseMove, etc. This stack overflow question has answers which could be adapted to fit what you want to do.
I doubt you are still looking for a solution to this, but after digging i have found a way to do it using a combination of shelljs and clipboard.
const userDataPath = (app).getPath(
'userData'
)
const useP = path.join(userDataPath, 'uploads')
let randomTmpfile = uniqueFilename(useP, 'prefix')
shelljs.exec(`screencapture -ic ${randomTmpfile}.png`, function (res) {
const image = clipboard.readImage('png').toDataURL()
})

Resources