Unable to combine data Power BI Web error - web

I have a Power BI Chart
And I have databases
And I have Queries - example of code:
= (table as table) => let
apikey = "apikey",
endpoint = "endpoint",
inputTable = Table.TransformColumnTypes(table,{{"Timestamp", type text},{"Value", type number}}),
jsontext = Text.FromBinary(Json.FromValue(inputTable)),
jsonbody = "{ ""Granularity"": ""microsecond"", ""Sensitivity"": 95, ""Series"": "& jsontext &" }",
bytesbody = Text.ToBinary(jsonbody),
headers = [#"Content-Type" = "application/json", #"Ocp-Apim-Subscription-Key" = apikey],
bytesresp = Web.Contents(endpoint, [Headers=headers, Content=bytesbody, ManualStatusHandling={400}]),
jsonresp = Json.Document(bytesresp),
respTable = Table.FromColumns({
Table.Column(inputTable, "Timestamp")
,Table.Column(inputTable, "Value")
, Record.Field(jsonresp, "IsAnomaly") as list
, Record.Field(jsonresp, "ExpectedValues") as list
, Record.Field(jsonresp, "UpperMargins")as list
, Record.Field(jsonresp, "LowerMargins") as list
, Record.Field(jsonresp, "IsPositiveAnomaly") as list
, Record.Field(jsonresp, "IsNegativeAnomaly") as list
}, {"Timestamp", "Value", "IsAnomaly", "ExpectedValues", "UpperMargin", "LowerMargin", "IsPositiveAnomaly", "IsNegativeAnomaly"}
),
respTable1 = Table.AddColumn(respTable , "UpperMargins", (row) => row[ExpectedValues] + row[UpperMargin]),
respTable2 = Table.AddColumn(respTable1 , "LowerMargins", (row) => row[ExpectedValues] - row[LowerMargin]),
respTable3 = Table.RemoveColumns(respTable2, "UpperMargin"),
respTable4 = Table.RemoveColumns(respTable3, "LowerMargin"),
results = Table.TransformColumnTypes(
respTable4,
{{"Timestamp", type datetime}, {"Value", type number}, {"IsAnomaly", type logical}, {"IsPositiveAnomaly", type logical}, {"IsNegativeAnomaly", type logical},
{"ExpectedValues", type number}, {"UpperMargins", type number}, {"LowerMargins", type number}}
)
in results
When I select database and invoking it everything works
But when I am publishing my chart from Desktop to web and trying to refresh database, I have this error:
Processing error: [Unable to combine data] Section1/Invoked Function/Source references other
queries or steps, so it may not directly access a data source. Please rebuild this data combination.
Cluster URI: WABI-WEST-EUROPE-E-PRIMARY-redirect.analysis.windows.net
Activity ID: d304e6c7-e1d6-4ba3-bbb2-bf19bf2095be
Request ID: 96e790c5-c772-1c66-cceb-792e601fc416
Time: 2022-04-20 05:54:53Z
I am using Azure Anomaly Detector

Related

Access columns in Nested Tables by its column position instead by name using Power Query

I have been working to create a Power Query for Excel that checks a folder and gets the Excel WorkBooks, consolidating the Sheets within. I’m novice in power query so I need a lot of your help providing examples to accomplish it.
I’ve been stuck iterating some changes, trying to overcome that some excel sheets do not have the same column name {‘Column1’, ‘Column2’ or ‘Column3’}, and when accessing columns by name the query will give me error.
The comparison should be done on the columns 1 to 3 of each nested table in 'First15Rows' column, and if there are 3 or more Desired Titles found in the same row, then the sheet is considered valid
So I’m asking for help
The current query looks like this.
let
Source = Folder.Files(Excel.CurrentWorkbook(){[Name="FldrLocation"]}[Content][FldrLocation]{0}),
FilterFileNames = Table.SelectRows(Source, each not Text.StartsWith([Name], "~$") and Text.Contains([Extension], ".xls")),
RemoveOtherCols1 = Table.SelectColumns(FilterFileNames,{"Content", "Name", "Date modified"}),
OnlyRecent = Table.SelectRows(RemoveOtherCols1, each [Date modified] >= Date.AddWeeks(DateTime.LocalNow(), -WeeksAgo)),
AddSheetsColumn = Table.AddColumn(OnlyRecent, "Custom", each Excel.Workbook([Content])),
ExpandSheetsFromTable = Table.ExpandTableColumn(AddSheetsColumn, "Custom", {"Name", "Data"}, {"Sheets", "Data"}),
FilterSheetNames = Table.SelectRows(ExpandSheetsFromTable, each not Text.Contains([Sheets], "Print") and not Text.StartsWith([Sheets], "_xlnm")),
RemoveEmptySheets = Table.SelectRows(FilterSheetNames, each
if Table.IsEmpty(Table.SelectRows ([Data], each _ **[Column1]** <> null)) then null else true),
AddFirst15Rows = Table.AddColumn(RemoveEmptySheets, "First15Rows", each Table.FirstN([Data], 15)),
CheckMatch = Table.SelectRows(AddFirst15Rows, each
if Table.IsEmpty(Table.SelectRows([First15Rows], each _**[Column1]** = "Date" or _**[Column2]** = "Time"))
then null
else true)
in
CheckMatch

Netsuite get "Currency Revaluation (Unrealized Gain/Loss)" Table

I am new to netsuite,
Is it possible to get the table "Currency Revaluation (Unrealized Gain/Loss)" using suitescript QL.
I would like to use it in Suite QL
here is an sample:
SELECT
NT .*
FROM
NextTransactionLineLink AS NTLL
INNER JOIN Transaction AS NT ON (NT.ID = NTLL.NextDoc)
INNER JOIN Transaction ON (Transaction.ID = NTLL.PreviousDoc)
inner join transactionline tl ON (tl.TRANSACTION = Transaction.id)
inner join subsidiary sb ON sb.id = tl.subsidiary
inner join accountingperiod ap ON (
(ap.id = Transaction.postingperiod)
AND ap.isposting = 'T'
)
inner join accountingperiod pap ON (
(pap.id = NT.postingperiod)
AND pap.isposting = 'T'
)
inner join currencyrate cr1 ON (
cr1.basecurrency = sb.currency
AND cr1.transactioncurrency = Transaction.currency
AND (
cr1.effectivedate = To_date(
ap.startdate,
'MM/DD/YYYY'
)
)
)
inner join consolidatedexchangerate cexr ON (
cexr.postingperiod = Transaction.postingperiod
AND cexr.fromsubsidiary = tl.subsidiary
AND cexr.tosubsidiary = 1
)
WHERE
(NTLL.NextDoc = 212328)
Thanks in Advance
By "getting", I am assuming you want to load this record and the fetch/update the values within.
Simply use &xml=t in the url, after the URL of the record
Something like this -
https://342xxxx-sb1.app.netsuite.com/app/accounting/transactions/vendbill.nl?id=00000&whence=&xml=t
You will get the record type.
<record recordType="vendorbill"
Use this record type in your Script.
var objRecord = record.load({
type: record.Type.VENDOR_BILL,
id: 157,
isDynamic: true,
});
If in case the record is a Custom Record, use this
var newFeatureRecord = record.load({
type: 'customrecord_feature',
id: 1,
isDynamic: true
});
Let me know in case of issues in the comments below.

Filter one list by another using power query

I have a list of elemental impurities in power query which I wish to filter according to whether or not they exist on another list known as the prop65 list.
The screenshot below shows a simplified example of what I am trying to achieve.
I appreciate that using formulas However I don't know how to achieve this using a Power query solution. If anyone know how to achieve this it would be appreciated.
Data shown:
Aluminium 33.885
Antimony 0.6777
Arsenic 3.5064
Barium 2.259
Boron 1.3554
Bromoform 0.555
Cadmium 3.18895
Chromium 0.33885
Cobalt 1.1295
Copper 0.4518
Indium 0.4518
Simplified Prop65 List
Arsenic
Bromoform
Cadmium
Furan
Lead
Nafenopin
Here is one way to do that:
Read in the two tables
Do an Inner Join
let
//get original data
Source = Excel.CurrentWorkbook(){[Name="Data"]}[Content],
data = Table.TransformColumnTypes(Source,{{"Impurity", type text}, {"Amount (ppm)", type number}}),
//get Filter
Source2 = Excel.CurrentWorkbook(){[Name="Prop65"]}[Content],
filter = Table.TransformColumnTypes(Source2,{"Simplified Prop65 List", Text.Type}),
//Join them
filteredData = Table.Join(data,"Impurity", filter, "Simplified Prop65 List",JoinKind.Inner),
//Remove unneeded column
#"Removed Columns" = Table.RemoveColumns(filteredData,{"Simplified Prop65 List"})
in
#"Removed Columns"
Another method would be a filter (Table.SelectRows) method, but it may be slower with a large dataset. At least, in a single instance where I had an opportunity to compare, the Table.Join method was faster on a 100,000 row data set.
let
//get original data
Source = Excel.CurrentWorkbook(){[Name="Data"]}[Content],
data = Table.TransformColumnTypes(Source,{{"Impurity", type text}, {"Amount (ppm)", type number}}),
//get Filter
Source2 = Excel.CurrentWorkbook(){[Name="Prop65"]}[Content],
filter = Table.TransformColumnTypes(Source2,{"Simplified Prop65 List", Text.Type})[#"Simplified Prop65 List"],
//filter the rows
filteredData = Table.SelectRows(data, each List.Contains(filter,[Impurity]))
in
filteredData

Power Query: Detect null created by Table.NestedJoin JoinKind.FullOuter

With Table.NestedJoin JoinKind.FullOuter, a null may be written into columns when there is a value in the right table "key" that does not exist in the left table "key".
However, unlike a null that is in the left table because the cell is empty, this created null does not = True with the formula [column] = null.
For example:
Table1
Note the null in row 3
Table2
Joined Table
The null in row 5 was created as a result of the Join
Custom Column
added with formula =[A]=null
note the different results for the null
MCode to reproduce the above
let
Source1 = Table.FromRecords({
[A="a"],
[A="b"],
[A=null],
[A="c"]
}),
type1 = Table.TransformColumnTypes(Source1,{"A", type text}),
Source2 = Table.FromRecords({
[A="c"],
[A="d"]
}),
type2 = Table.TransformColumnTypes(Source2,{"A", type text}),
combo = Table.NestedJoin(type1,"A",type2,"A","joined",JoinKind.FullOuter),
#"Added Custom" = Table.AddColumn(combo, "Custom", each [A]=null)
in
#"Added Custom"
Explanations and suggestions as to how to deal with this would be appreciated.
Edit In addition to the above, doing a Replace will also only replace the null in row 3, and not the null in row 5. Seems there is something different about these two nulls.
Note: If I Expand the table, the null in Column A will now test correctly.
Asking the same question on the Microsoft Q&A forum pointed me to the possibility of an issue with the Power Query Evaluation model and also this article on Lazy Evaluation and Query Folding in Power BI/Power Query.
By forcing evaluation of the table with Table.Buffer, both nulls now behave the same.
So:
let
Source1 = Table.FromRecords({
[A="a"],
[A="b"],
[A=null],
[A="c"]
}),
type1 = Table.TransformColumnTypes(Source1,{"A", type text}),
Source2 = Table.FromRecords({
[A="c"],
[A="d"]
}),
type2 = Table.TransformColumnTypes(Source2,{"A", type text}),
//Table.Buffer forces evaluation
combo = Table.Buffer(Table.NestedJoin(type1,"A",type2,"A","joined",JoinKind.FullOuter)),
//IsItNull now works
IsItNull = Table.AddColumn(combo, "[A] = null", each [A] = null)
in
IsItNull
It also seems to be the case that try ... otherwise will also force an evaluation. So instead of Table.Buffer, the following also works:
...
combo = Table.NestedJoin(type1,"A",type2,"A","joined",JoinKind.FullOuter),
//try ... otherwise seems to force Evaluation
IsItNull = Table.AddColumn(combo, "[A] = null", each try [A] = null otherwise null)
Very interesting case. Indeed, the behaviour of last null is counterintuitive in most possible implementations. If you wish to get the same behaviour for both kinds of nulls, try this approach:
= Table.AddColumn(combo, "test", each [A] ?? 10)
Quite interesting, the similar code doesn't work:
= Table.AddColumn(combo, "test", each if [A] = null then 10 else [A])
Moreover, if we want to improved the previous code by using the first syntax we still get unexpectable result (10 instead of 20 for the last null):
= Table.AddColumn(combo, "test", each if [A] = null then 10 else [A] ?? 20)
Сurious, applying ?? operator also fixes the problem with initial column. Now there are regular nulls in A column:
= Table.AddColumn(add, "test2", each [A] = null)
So, if we don't need any calculations and just want to fix invalid nulls, we may use such code:
= Table.TransformColumns(combo, {"A", each _ ?? _})
The column doesn't matter and for joined column the result is the very same:
transform = Table.TransformColumns(combo, {"joined", each _ ?? _}),
add = Table.AddColumn(transform, "test", each [A] = null)

Get BigQuery table schema using google.cloud

I can for example get BigQuery data into local python with:
import os
from google.cloud import bigquery
project_id = "example-project"
dataset_id = "exapmle_dataset"
table_id = "table_id"
os.environ["GOOGLE_CLOUD_PROJECT"] = project_id
bq = bigquery.Client()
query = "SELECT * FROM {}.{} LIMIT 5".format(dataset_id, table_id)
resp = bq.run_sync_query(query)
resp.run()
data_list = resp.rows
The result:
print(data_list)
>>> [('BEDD', '1',), ('A75', '1',), ('CE3F', '1',), ('0D8C', '1',), ('3E9C', '1',)]
How do I then go and get the schema for this table? Such that, for example
headings = ('heading1', 'heading2')
# or
schema_dict = {'fields': [{'name': 'heading1', 'type': 'STRING'}, {'name': 'heading2', 'type': 'STRING'}]}
You can use the schema method from your resp variable.
After running the query you can retrieve it:
schema = resp.schema
schema will be a list containing the definition for each column in your query.
As an example, lets say this is your query:
query = "select '1' as fv, STRUCT<i INT64, j INT64> (1, 2) t from `dataset.table` limit 1"
The schema will be a list containing 2 entries:
[<google.cloud.bigquery.schema.SchemaField at 0x7ffa64fe6e50>,
<google.cloud.bigquery.schema.SchemaField at 0x7ffa64fe6b10>]
For each object in schema, you have the methods field_type, fields, mode and name so if you run:
schema[0].field_type, schema[0].mode, schema[0].name
The result is "STRING", "NULLABLE", "fv".
As the second column is a record, then if you run:
schema[1].field_type, schema[1].mode, schema[1].name, schema[1].fields
The result is:
"RECORD", "NULLABLE", "t", [google schema 1, google schema 2]
Where google schema 1 contains the definition for the inner fields within the record.
As far as I know, there's no way of getting a dictionary as you showed in your question, which means you'll have to loop over the entries in schema and build it yourself. It should be simple though. Not sure if this is working as I haven't fully tested it but it might give you an idea on how to do it:
def extract_schema(schema_resp):
l = []
for schema_obj in schema_resp:
r = {}
r['name'] = schema_obj.name
r['type'] = schema_obj.field_type
r['mode'] = schema_obj.mode
if schema_obj.fields:
r['fields'] = extract_schema(schema_obj.fields)
l.append(r)
return l
So you'd just have to run schema = extract_schema(resp.schema) and (hopefully) you'll be good to go.

Resources