I'm trying to connect to & use Skype for Business API (UCWA) following this procedure, using a Node.js test script.
I've registered a test app in Azure AD and checked all permissions concerning Skype for Business Online.
I'm doing this (simplified):
var adal = require('adal-node');
var https = require('https');
var clientId = 'a5cbbd......cc4a1'; // = app ID
var clientSecret = 'IOSDk1......LJ6vE=' // test key from Azure AD
var context = new adal.AuthenticationContext('https://login.windows.net');
// 'Autodiscover' step
// (callRestAPI() makes an HTTPS request using https.request() and returns results as JSON)
callRestAPI('webdir.online.lync.com', 443, '/autodiscover/autodiscoverservice.svc/root', 'GET', null /* no specific headers */, function(err, res) {
if (err) { console.log(err); return err; }
// extract discovered domain (I get something like https://webdir1e.online.lync.com)
let regex = new RegExp('^(https?://[^/]*)', 'g');
let sfbDiscoveredDomain = regex.exec(response._links.user.href);
sfbDiscoveredDomain = sfbDiscoveredDomain[1];
// 'Acquire token' step
context.acquireTokenWithClientCredentials(sfbDiscoveredDomain, clientId, clientSecret, function(err, res) {
if (err) { console.log(err); return err; }
regex = new RegExp('^https?://([^/]*)', 'g');
let sfbHost = regex.exec(res.resource);
sfbHost = sfbHost[1]; // here I get something like 'webdir1e.online.lync.com'
// 'Resending an autodiscovery request with the bearer token' step
callRestApi(sfbHost, 443, '/autodiscover/autodiscoverservice.svc/root/oauth/user', 'GET', {'Authorization': 'Bearer '+res.accessToken}, function(err, res) {
if (err) { console.log(err); return err; }
console.log(res);
});
});
});
The last step (resending an autodiscovery request) always fails with error HTTP 403/Forbidden.
There is an additional interesting response header:
'x-ms-diagnostics': '28070;source="AM41E00EDG01.infra.lync.com";reason="Service does not allow a cross domain request from this origin."'
...but I still don't understand why this error occurs.
I've played with additional headers seen here and there in various code samples (X-Ms-Origin and Host), with no luck.
This issue (Service does not allow a cross domain request from this origin.) is mostly caused by the "Cross-Origin Resource Sharing (CORS)" and that the address which is requesting the access isn´t "whitelisted".
An Skype for Business Administrator can configure that via (more info's here) when the server is on premises (see StackOverflow question here):
$x = New-CsWebOrigin -Url "https://apps.contoso.com"
Set-CsWebServiceConfiguration -Identity "{YOUR_IDENTITY}" -CrossDomainAuthorizationList #{Add=$x}
However as your Skype for Business isn´t on premises (its online) I assume there is nothing you can do as this section is mostly controlled by the cloud admins from Microsoft.
However as UCWA is supported with Skype for Business online I assume there is something wrong on your side. Did you checked if the application is correctly registered as explained here? If yes a fiddler trace might be useful to see what caused that issue.
Related
My request for an Azure AD Oauth2 token works fine when I run my node.js app on a VM on Azure. It's making the request using another team's client id/secret because further down in the code, the program will make REST APIs calls to their services using that token. The program gets an error after fetching/requesting a token if my program (node.js app) is launched from a Github workflow. (The workflow worked successfully three times before getting this error. The error occurs every time now.) Thank you for your help! It's much appreciated and I don't have much experience in this area.
async function getToken(config) {
const params = new URLSearchParams()
params.append('grant_type', 'client_credentials')
params.append('client_id', config.clientId)
params.append('client_secret', config.clientSecret)
params.append('scope', config.scopeUrl)
//url = https://login.microsoftonline.com/{{AD.tenantId}}/oauth2/v2.0/token
const response = await fetch(config.tokenRequestUrl, { method: 'POST', body: params })
const jsonObj = await response.json()
console.log('!!!!JSON='+JSON.stringify(jsonObj))
if ((typeof jsonObj.access_token !== 'undefined') && (jsonObj.access_token != null)) {
return jsonObj.access_token
} else {
return null
}
}
Output:
!!!!3S0N=***"error":"unauthorizedclient","errordescription":"AADSTS700016: Application with identifier '***' was not found in the directory 'xxxxx Azure Account AD'. This can happen if the application has
not been installed by the administrator of the tenant or consented to by any user in the tenant. You may have sent your authentication request to the wrong tenant.***
I figured it out! The Github workflow was using the wrong client id/secret.
How to use googleapis google.auth.GoogleAuth() for google API service account in Twilio serverless function, since there is no FS path to provide as a keyFile value?
Based on the example here ( https://www.section.io/engineering-education/google-sheets-api-in-nodejs/ ) and here ( Google api node.js client documentation ) my code is based on the example here ( Receive an inbound SMS ) and looks like...
const {google} = require('googleapis')
const fs = require('fs')
exports.handler = async function(context, event, callback) {
const twiml = new Twilio.twiml.MessagingResponse()
// console.log(Runtime.getAssets()["/gservicecreds.private.json"].path)
console.log('Opening google API creds for examination...')
const creds = JSON.parse(
fs.readFileSync(Runtime.getAssets()["/gservicecreds.private.json"].path, "utf8")
)
console.log(creds)
// connect to google sheet
console.log("Getting googleapis connection...")
const auth = new google.auth.GoogleAuth({
keyFile: Runtime.getAssets()["/gservicecreds.private.json"].path,
scopes: "https://www.googleapis.com/auth/spreadsheets",
})
const authClientObj = await auth.getClient()
const sheets = google.sheets({version: 'v4', auth: authClientObj})
const spreadsheetId = "myspreadsheetID"
console.log("Processing message...")
if (String(event.Body).trim().toLowerCase() == 'KEYWORD') {
console.log('DO SOMETHING...')
try {
// see https://developers.google.com/sheets/api/guides/values#reading_a_single_range
let response = await sheets.spreadsheets.values.get({
spreadsheetId: spreadsheetId,
range: "'My Sheet'!B2B1000"
})
console.log("Got data...")
console.log(response)
console.log(response.result)
console.log(response.result.values)
} catch (error) {
console.log('An error occurred...')
console.log(error)
console.log(error.response)
console.log(error.errors)
}
}
// Return the TwiML as the second argument to `callback`
// This will render the response as XML in reply to the webhook request
return callback(null, twiml)
...where the Asset referenced in the code is for a JSON generated from creating a key pair for a Google APIs Service Account and manually copy/pasting the JSON data as an Asset in the serverless function editor web UI.
I see error messages like...
An error occurred...
{ response: '[Object]', config: '[Object]', code: 403, errors: '[Object]' }
{ config: '[Object]', data: '[Object]', headers: '[Object]', status: 403, statusText: 'Forbidden', request: '[Object]' }
[ { message: 'The caller does not have permission', domain: 'global', reason: 'forbidden' } ]
I am assuming that this is due to the keyFile not being read in right at the auth const declaration (IDK how to do it since all the example I see assume a local filepath as the value, but IDK how to do have the function access that file for a serverless function (my attempt in the code block is really just a shot in the dark)).
FYI, I can see that the service account has an Editor role in the google APIs console (though I notice the "Resources this service account can access" has the error
"Could not fund an ancestor of the selected project where you have access to view a policy report on at least one ancestor"
(I really have no idea what that means or implies at all, very new to this)). Eg...
Can anyone help with what could be going wrong here?
(BTW if there is something really dumb/obvious that I am missing (eg. a typo) just LMK in a comment so can delete this post (as it would then not serve any future value of others))
The caller does not have permission', domain: 'global', reason: 'forbidden
This actually means that the currently authenticated user (the service account) does ot have access to do what you are asking it to do.
You are trying to access a spread sheet.
Is this sheet on the service accounts google drive account? If not did you share the sheet with the service account?
The service account is just like any other user if it doesn't have access to something it cant access it. Go to the google drive web application and share the sheet with the service account like you would share it with any other user just use the service account email address i think its called client id its the one with an # in it.
delegate to user on your domain
If you set up delegation properly then you can have the service account act as a user on your domain that does have access to the file.
delegated_credentials = credentials.with_subject('userWithAccess#YourDomain.org')
Goal: Create a successful (test) Checkout Session using Stripe's API for checkout.
[the link for their tutorial on Checkout here: https://github.com/stripe-samples/checkout-one-time-payments]
I'm creating a checkout session using my UI & building the checkout session with the data supplied to the backend web service using the following code:
var options = new Stripe.Checkout.SessionCreateOptions
{
PaymentMethodTypes = new List<string>
{
"card",
},
LineItems = stripeCartLineItems,
Mode = "payment",
SuccessUrl = "https://" + HostName + "/Stripe/OrderPlaced",
CancelUrl = "https://example.com/cancel",
};
var requestOptions = new RequestOptions
{
StripeAccount = stripeConnectedAccountId,
ApiKey = StripeConfiguration.ApiKey
};
var service = new Stripe.Checkout.SessionService();
Stripe.Checkout.Session session = service.Create(options, requestOptions);
return Json(new { sessionId = session.Id });
As you can see, I receive acknowledgment back from Stripe's API with a valid checkout session id:
Logs on Stripe's Dashboard confirm a successful checkout session:
However, I keep getting this error message:
The API keys have already been refreshed and placed appropriately. That's not the issue... Loading up the test Checkout page is failing. My logs in Stripe's dashboard say this:
The Javascript call which initiates the redirect to Stripe's checkout experience is copied straight from their tutorial (pasted above). That code looks like this:
checkoutButton.addEventListener('click', function () {
$.ajax({
url: "/Stripe/CreateCheckoutSession",
method: "POST",
data: { stripeConnectedAccountId: stripeConnectedAccountId, cartLineItems: scope.cartLineItems },
}).done(function (resp) {
stripe.redirectToCheckout({
sessionId: resp.sessionId
}).then(function (result) {
// If `redirectToCheckout` fails due to a browser or network
// error, display the localized error message to your customer
// using `result.error.message`.
alert(result.error.message);
});
})
After going to: https://stripe.com/docs/error-codes/resource-missing. The docs says this for that specific error code: "The ID provided is not valid. Either the resource does not exist, or an ID for a different resource has been provided."
Ok Stripe. Sure sure. You made this API - I'll listen. However, according to your docs, Intellisense, & your sample code... my code is correct and I used the session.Id provided by the response object YOU sent me after initiating a Checkout Session:
I have no clue how to proceed.
Any ideas are appreciated.
If you have already verified the session and keys from server and stripe,
Please check the stripe key used in your client side. The public key used to initialise the stripe in both client & server should be same.
Check the logs in client side to make sure that the key is same.
I'm trying to use a Service Principle to access a Batch pool from an Azure Function and running into authentication issues that I don't understand. The initial login with the Service Principle works fine, but then using the credentials to access the batch pool returns a 401.
Below is a condensed version of my code with comments at the key points
module.exports.dispatch = function (context) {
MsRest.loginWithServicePrincipalSecret('AppId', 'Secret', 'TennantId', function(err, credentials){
if (err) throw err;
// This works as it prints the credentials
context.log(credentials);
var batch_client = new batch.ServiceClient(credentials, accountUrl);
batch_client.pool.get('mycluster', function(error, result){
if(error === null)
{
context.log('Accessed pool');
context.log(result);
}
else
{
//Request to batch service returns a 401
if(error.statusCode === 404)
{
context.log('Pool not found yet returned 404...');
}
else
{
context.log('Error occurred while retrieving pool data');
context.log(error);
}
//'Server failed to authenticate the request. Make sure the value of Authorization header is formed correctly.
context.res = { body: error.body.message.value };
context.done();
}
});
});
};
How can the initial login with a service principle work no problem, but then the credentials it returns not be able to access the batch pool?
The actual error says to check the auth header on the request, which I can see and the Authorisation header isn't even present.
I've triple checked the Active Directory access control for the batch account the App ID and secret are the ones belonging to the owner of the batch account. Any ideas what to try next?
The credentials expected by the Azure Batch npm client aren't the Azure Active Directory credentials/token, but the keys for the batch account. You can list your keys using the Azure CLI with a command like the following:
az batch account keys list -g "<resource-group-name>" -n "<batch-account-name>"
sample here
Then you can create the credentials parameter with those keys:
var credentials = new batch.SharedKeyCredentials('your-account-name', 'your-account-key');
You could still involve a Service Principal here if you wanted to store your batch keys in something like Key Vault, but then your code would be:
Get Service Principal auth against key vault to fetch name and key
Use name and key to create credentials
You cannot use the same OAuth token returned from the Azure Resource Management endpoint with Batch. Assuming your service principal has the correct RBAC permissions, auth with the Azure Batch endpoint: https://batch.core.windows.net/ instead (assuming you are using Public Azure).
You do not need to get the shared key credentials for the Batch account, credentials via AAD should be used instead if you are using an AAD service principal.
I happened to run across this same issue and I didn't have the option of using SharedKeyCredentials so I wanted to share my solution in case anyone else finds it helpful.
As fpark mentions, we need to get an OAuth token to use with Batch instead of the default Azure Resource Management. Below is the original code posted by Mark with the minor modification needed to make it work with Batch:
module.exports.dispatch = function (context) {
let authOptions = {tokenAudience: 'batch'};
MsRest.loginWithServicePrincipalSecret('AppId', 'Secret', 'TennantId', authOptions, function(err, credentials){
if (err) throw err;
// This works as it prints the credentials
context.log(credentials);
var batch_client = new batch.ServiceClient(credentials, accountUrl);
batch_client.pool.get('mycluster', function(error, result){
if(error === null)
{
context.log('Accessed pool');
context.log(result);
}
else
{
//Request to batch service returns a 401
if(error.statusCode === 404)
{
context.log('Pool not found yet returned 404...');
}
else
{
context.log('Error occurred while retrieving pool data');
context.log(error);
}
//'Server failed to authenticate the request. Make sure the value of Authorization header is formed correctly.
context.res = { body: error.body.message.value };
context.done();
}
});
});
};
Currently I'm working with a Node.js integration for DocuSign (https://www.npmjs.com/package/docusign-esign), I made all the test with the sandbox account and worked perfectly, right now I'm trying to use a production account, the login process is fine but when I'm going to create the envelope I get a USER_AUTHENTICATION_FAILED error (even if the first login went without errors). I would like to know if someone has experienced same thing or has an idea of how can I fix this.
This is the code that I took from the docusign-esign to create the envelope:
var loginAccount = new docusign.LoginAccount();
loginAccount = loginAccounts[0];
var accountId = loginAccount.accountId;
var envelopesApi = new docusign.EnvelopesApi();
envelopesApi.createEnvelope(accountId, envDef, null, function (error, envelopeSummary, response)
The account Id is the same retrieved after the login process.
One possible cause could be that your DocuSign account is hosted on na2.docusign.net, na3.docusign.net or eu.docusign.net, while your code uses the default www.docusign.com as a base URL.
The login call will pass even if you use www, however all the subsequent API calls will fail if you are not hitting the exact base URL that corresponds to your production account. You should have received this information as part of the DocuSign Go-Live process (formerly known as API Certification). You can always get the base URL from the login call response.
For Node, here how to get the correct base URL from the login call and set it up to the API Client (lines in bold are likely what is missing in your code):
authApi.login(loginOps, function (err, loginInfo, response) {
if (err) {
return next(err);
}
if (loginInfo) {
// list of user account(s)
// note that a given user may be a member of multiple accounts
var loginAccounts = loginInfo.getLoginAccounts();
console.log('LoginInformation: ' + JSON.stringify(loginAccounts));
var loginAccount = loginAccounts[0];
var accountId = loginAccount.accountId;
var baseUrl = loginAccount.baseUrl;
var accountDomain = baseUrl.split("/v2");
apiClient.setBasePath(accountDomain[0]);
docusign.Configuration.default.setDefaultApiClient(apiClient);
next(null, loginAccount);
}
});