I am trying to do OCR on a PDF and am having difficulty getting it to work. I am getting this error:
GoogleError: Error opening file: gs://mybucket/a.subpath/output.json
at Operation._unpackResponse (/Users/my/project/node_modules/google-gax/build/src/longRunningCalls/longrunning.js:148:31)
at /Users/my/project/node_modules/google-gax/build/src/longRunningCalls/longrunning.js:134:18 {
code: 7
}
When running this Node.js script:
const fs = require('fs')
const vision = require('#google-cloud/vision').v1
const client = new vision.ImageAnnotatorClient()
parse('a.something')
function parse(name) {
var bucketName = `mybucket`
const features = [{type: 'DOCUMENT_TEXT_DETECTION'}]
let requests = []
let i = 0
while (i < 10) {
requests.push({
inputConfig: {
mimeType: 'application/pdf',
gcsSource: {
uri: `gs://${bucketName}/${name}.${i + 1}.pdf`
},
},
features: features,
outputConfig: {
gcsDestination: {
uri: `gs://${bucketName}/${name}.${i + 1}/`,
},
},
})
i++
}
const request = {
requests
}
client.asyncBatchAnnotateFiles(request).then(x => {
let [operation] = x
operation.promise().then(y => {
let [filesResponse] = y
const destinationUri =
filesResponse.responses[0].outputConfig.gcsDestination.uri
console.log('Json saved to: ' + destinationUri)
process.exit()
}).catch(e => {
console.log(e)
process.exit()
})
})
}
This is straight from the docs pretty much.
At first I just went into the console and uploaded the PDFs into Cloud Storage manually. I was logged in as foo#gmail.com. A few days before I created a JSON API key to export in the shell, for project bar (which I created when logged in as foo#gmail.com). I then got this error above. So what I tried doing is adding a new member to the project, who had my email foo#gmail.com. Then I gave them the roles I think equivalent to roles/storage.legacyObjectOwner, which were these:
Then, when I run the Node.js script, I get this error still. I don't know what's going on or how to fix it, any ideas?
Related
I'm trying to make a request to the KuCoin API to query the balance. I'm using the NodeJS API found here but I keep getting the error whenever I execute the code.
And here's the code snippet
data().then(api => {
const apiKey = api.api_key;
const apiSecretKey = api.api_secret;
const contactId = api.contact_id;
const exchange = api.exchange;
const passphrase = 'Passphrase';
/** Init Configure */
const config =
{
key: apiKey, // KC-API-KEY
secret: apiSecretKey, // API-Secret
passphrase: passphrase, // KC-API-PASSPHRASE
environment: "live"
}
API.init(require(config));
if (apiKey && exchange === "KuCoin-Futures") {
console.log("KuCoin Balance")
async function getBalance() {
try {
let r = await API.getAccountOverview()
console.log(r.data)
} catch(err) {
console.log(err)
}
}
return getBalance()
}
});
I the console log I get the following error
TypeError: request.charAt is not a function
at Function.Module._resolveLookupPaths (internal/modules/cjs/loader.js:617:15)
Does anyone know how I can fix this??
There are couple of things which look weird in the code snippet you provided, but the sample code from the kucoin-node-api library you linked should work perfectly fine. In case you are using that one, try this snippet which should show your account info:
const api = require('kucoin-node-api');
const config = {
apiKey: 'YOUR_KUCOIN_API_KEY',
secretKey: 'YOUR_KUCOIN_API_SECRET',
passphrase: 'YOUR_KUCOIN_API_PASSPHRASE',
environment: 'live'
};
api.init(config);
api.getAccounts().then((r) => {
console.log(r.data);
}).catch((e) => {
console.log(e);
});
In case you're using a different library, kucoin-node-sdk maybe (judging by your code snippet), then try to configure it correctly:
config.js file:
module.exports = {
baseUrl: 'https://api.kucoin.com',
apiAuth: {
key: 'YOUR_KUCOIN_API_KEY',
secret: 'YOUR_KUCOIN_API_SECRET',
passphrase: 'YOUR_KUCOIN_API_PASSPHRASE'
},
authVersion: 2
}
and your main.js (or whatever the name is):
const API = require('kucoin-node-sdk');
API.init(require('./config'));
const main = async () => {
const getTimestampRl = await API.rest.Others.getTimestamp();
console.log(getTimestampRl.data);
};
main();
The code above will show you KuCoin server timestamp only, but should be enough to keep going.
Good luck with trading!
The error is with the batchProcessDocuments line and has the error:
{
code: 3,
details: 'Request contains an invalid argument.',
metadata: Metadata {
internalRepr: Map { 'grpc-server-stats-bin' => [Array] },
options: {}
},
note: 'Exception occurred in retry method that was not classified as transient'
}
I've tried to copy the example as much as possible but without success. Is there a way of finding out more information regarding the input parameters that are required? There are very few examples of using Document AI on the web with this being a new product.
Here is my code sample:
const projectId = "95715XXXXX";
const location = "eu"; // Format is 'us' or 'eu'
const processorId = "a1e1f6a3XXXXXXXX";
const gcsInputUri = "gs://nmm-storage/test.pdf";
const gcsOutputUri = "gs://nmm-storage";
const gcsOutputUriPrefix = "out_";
// Imports the Google Cloud client library
const {
DocumentProcessorServiceClient,
} = require("#google-cloud/documentai").v1beta3;
const { Storage } = require("#google-cloud/storage");
// Instantiates Document AI, Storage clients
const client = new DocumentProcessorServiceClient();
const storage = new Storage();
const { default: PQueue } = require("p-queue");
async function batchProcessDocument() {
const name = `projects/${projectId}/locations/${location}/processors/${processorId}`;
// Configure the batch process request.
const request = {
name,
inputConfigs: [
{
gcsSource: gcsInputUri,
mimeType: "application/pdf",
},
],
outputConfig: {
gcsDestination: `${gcsOutputUri}/${gcsOutputUriPrefix}/`,
},
};
// Batch process document using a long-running operation.
// You can wait for now, or get results later.
// Note: first request to the service takes longer than subsequent
// requests.
const [operation] = await client.batchProcessDocuments(request); //.catch(err => console.log('err', err));
// Wait for operation to complete.
await operation.promise();
console.log("Document processing complete.");
}
batchProcessDocument();
I think this is the solution: https://stackoverflow.com/a/66765483/15461811
(you have to set the apiEndpoint parameter)
Here's my code.
I have went through the google cloud platform API documentation, and followed as per the GCP DOC steps correctly. But still unable to fix the encoding error, which you can see it below. I'm trying to translate an audio clip from en-US(english) to hi-IN (hindi), and it would be helpful if you can give some alternative ways for this solution.
function main(filename, encoding, sourceLanguage, targetLanguage) {
const fs = require('fs');
const {
SpeechTranslationServiceClient,
} = require('#google-cloud/media-translation');
const client = new SpeechTranslationServiceClient();
async function quickstart() {
const filename = './16kmonoceo.wav';
const encoding = 'LINEAR16';
const sourceLanguage = 'en-US';
const targetLangauge = 'hi-IN';
const config = {
audioConfig: {
audioEncoding: encoding,
sourceLanguageCode: sourceLanguage,
targetLanguageCode: targetLangauge,
},
};
const initialRequest = {
streamingConfig: config,
audioContent: null,
};
const readStream = fs.createReadStream(filename, {
highWaterMark: 4096,
encoding: 'base64',
});
const chunks = [];
readStream
.on('data', chunk => {
const request = {
streamingConfig: config,
audioContent: chunk.toString(),
};
chunks.push(request);
})
.on('close', () => {
// Config-only request should be first in stream of requests
stream.write(initialRequest);
for (let i = 0; i < chunks.length; i++) {
stream.write(chunks[i]);
}
stream.end();
});
const stream = client.streamingTranslateSpeech().on('data', response => {
const {result} = response;
if (result.textTranslationResult.isFinal) {
console.log(
`\nFinal translation: ${result.textTranslationResult.translation}`
);
console.log(`Final recognition result: ${result.recognitionResult}`);
} else {
console.log(
`\nPartial translation: ${result.textTranslationResult.translation}`
);
console.log(`Partial recognition result: ${result.recognitionResult}`);
}
});
}
quickstart();
}
main(...process.argv.slice(2));
here my error from command line.
CHECK ERROR MESSAGE
I'm using windows 10 and IDE VS CODE.
This is a case where careful reading of the error message helps.
Some module gacked on "LINEAR16" as the audioEncoding value saying there's no encoding with that name.
A quick look at the documentation shows "linear16" (lower case) as the value to use.
I am trying to do a asyncBatchAnnotation() request to annotate a bunch of images using Google Cloud Vision API.
Here is a snippet of my code:
My function to create a request for batching:
module.exports = createRequests
const LABEL_DETECTION = 'LABEL_DETECTION'
const WEB_DETECTION = 'WEB_DETECTION'
function createRequests(imageUris) {
let resources = {
requests: [],
outputConfig
}
for (let i = 0; i < imageUris.length; i++) {
let request = {
image: {source: {imageUri: imageUris[i]}},
features: [{type: LABEL_DETECTION}, {type: WEB_DETECTION}]
}
resources.requests.push(request)
}
console.log(resources)
return resources
}
My function for making the request itself:
// Imports the Google Cloud Client Library
const vision = require('#google-cloud/vision')
// Creates a client
const client = new vision.ImageAnnotatorClient()
const getImageUrls = require('./get-image-urls.js')
const createRequests = require('./create-requests.js')
const BUCKET_NAME = 'creative-engine'
function detectLabelsFromImage() {
return new Promise(async(resolve, reject) => {
try {
let imageUris = await getImageUrls(BUCKET_NAME)
let resources = createRequests(imageUris)
try {
let responses = await client.asyncBatchAnnotateImages(resources)
const imageResponses = responses[0].responses
imageResponses.forEach(imageResponse => {
console.log('LABELS: ')
const labels = imageResponse.labelAnnotations
labels.forEach(label => {
console.log(`label: ${label.description} | score: ${label.score}`)
});
console.log('WEB ENTITIES: ')
const webEntities = imageResponse.webDetection.webEntities
webEntities.forEach(webEntity => {
console.log(`label: ${webEntity.description} | score: ${webEntity.score}`)
});
})
} catch (err) {
console.error('ERROR: ', err)
}
} catch (e) {
reject(e)
}
})
}
Here is the error I get:
ERROR: Error: 3 INVALID_ARGUMENT: OutputConfig is required.
When I look at the Google Documentation here it states I need to use Google Cloud Storage for the JSON output.
I don't want to create a billing account with my information for Google Cloud. Is there a way to do this where I write to a local JSON file?
Thank you for your help!
As per the link you shared says, it is not possible to write a local JSON file using the Cloud Vision API. You must to use GCS to store the file.
I am using the npm package react-native-fetch-blob.
I have followed all the steps from the git repository to use the package.
I then imported the package using the following line:
var RNFetchBlob = require('react-native-fetch-blob');
I am trying to request a BLOB containing an image from the a server.
This is my main method.
fetchAttachment: function(attachment_uri) {
var authToken = 'youWillNeverGetThis!'
var deviceId = '123';
var xAuthToken = deviceId+'#'+authToken
//Authorization : 'Bearer access-token...',
// send http request in a new thread (using native code)
RNFetchBlob.fetch('GET', config.apiRoot+'/app/'+attachment_uri, {
'Origin': 'http://10.0.1.23:8081',
'X-AuthToken': xAuthToken
})
// when response status code is 200
.then((res) => {
// the conversion is done in native code
let base64Str = res.base64()
// the following conversions are done in js, it's SYNC
let text = res.text()
let json = res.json()
})
// Status code is not 200
.catch((errorMessage, statusCode) => {
// error handling
});
}
I keep receiving the following error:
"Possible Unhandled Promise Refection(id: 0): TypeError: RNFetchBlob.fetch is not a function".
Any ideas?
The issue is you are using ES5 style require statements with a library written against ES6/ES2015. You have two options:
ES5:
var RNFetchBlob = require('react-native-fetch-blob').default
ES6:
import RNFetchBlob from 'react-native-fetch-blob'
My import looks like this : import RNFetchBlob from 'rn-fetch-blob';
but I'v got an error : TypeError: RNFetchBlob.scanFile is not a function
My code:
const downloadAudio = async () => {
const { config, fs } = RNFetchBlob;
const meditationFilesPath =
Platform.OS == 'android'
? `${fs.dirs.DownloadDir}/meditations/${id}`
: `${fs.dirs.DocumentDir}/meditations/${id}`;
let audio_URL = track;
let options = {
fileCache: true,
path: meditationFilesPath + `/${id}.mp3`,
addAndroidDownloads: {
// Related to the Android only
useDownloadManager: true,
notification: true,
path: meditationFilesPath + `/${id}.mp3`,
description: 'Audio',
},
};
try {
const resAudio = await config(options).fetch('GET', audio_URL.uri);
if (resAudio) {
const audio = await RNFetchBlob.fs.scanFile([
{ path: resAudio.path(), mime: 'audio/mpeg' },
]);
console.log('res -> ', audio);
Alert.alert('Audio Downloaded Successfully.');
}
} catch (error) {
console.error('error from downloadAudio', error);
}
};