vim-startify configuration in lua with custom functions - vim

I'm trying to migrate my vimrc configuration to lua, and I'm stuck on migrating my vim-startify configuration. In particular, how can I write the gitModified and gitUntraked lists?
vimscript:
function! s:gitModified()
let files = systemlist('git ls-files -m 2>/dev/null')
return map(files, "{'line': v:val, 'path': v:val}")
endfunction
function! s:gitUntracked()
let files = systemlist('git ls-files -o --exclude-standard 2>/dev/null')
return map(files, "{'line': v:val, 'path': v:val}")
endfunction
let g:startify_lists = [
\ { 'type': 'dir', 'header': [' MRU '. getcwd()] },
\ { 'type': 'sessions', 'header': [' Sessions'] },
\ { 'type': 'bookmarks', 'header': [' Bookmarks'] },
\ { 'type': function('s:gitModified'), 'header': [' git modified']},
\ { 'type': function('s:gitUntracked'), 'header': [' git untracked']},
\ { 'type': 'commands', 'header': [' Commands'] },
\ ]
My current lua:
vim.g.startify_lists = {
{ type = "commands", header = { " Commands" } }, -- Commands from above
{ type = "dir", header = { " MRU " .. vim.fn.getcwd() } }, -- MRU files from CWD
{ type = "sessions", header = {" Sessions"} },
{ type = "bookmarks", header = {" Bookmarks"} },
}
Here I'm missing the two git related items.
Any idea?
Thanks in advance.

Here's what I did:
function CommandToStartifyTable(command)
return function()
local cmd_output = vim.fn.systemlist(command .. " 2>/dev/null")
local files =
vim.tbl_map(
function(v)
return {line = v, path = v}
end,
cmd_output
)
return files
end
end
vim.g.startify_lists = {
{type = "dir", header = {" MRU " .. vim.fn.fnamemodify(vim.fn.getcwd(), ":t")}},
{type = CommandToStartifyTable("git ls-files -m"), header = {" Git modified"}},
{type = CommandToStartifyTable("git ls-files -o --exclude-standard"), header = {" Git untracked"}}
}

Related

writeJSON groovy writes only first value to file

Below is my jenkins groovy script to read data and write as json file.
import groovy.json.JsonOutput
def arn = ""
def name = ""
def os = ""
pipeline {
agent any
stages {
stage('Hello') {
steps {
script{
def ret = sh(script: 'aws devicefarm list-devices', returnStdout: true)
def jsonObj = readJSON text: ret
def currentVersion = "13.0"
def values = currentVersion.split('\\.')
def json_str = ""
for(String item: jsonObj.devices) {
os = item.os
if(!os.contains("\\.")) {
osV = os + ".0"
} else{
osV = os
}
def osValues = osV.split('\\.')
if(values[0].toInteger() <= osValues[0].toInteger()) {
name = item.name
def data = [
name: "$name",
os: "$os",
]
json_str += JsonOutput.toJson(data)
}
}
def json_beauty = JsonOutput.prettyPrint(json_str)
writeJSON(file: 'message124.json', json: json_beauty)
}
}
}
}
}
But here, it only saves the first value. Not all the values. Could you tell me where I am wrong here
It's not 100% clear what you actually want to end up with, but I think you want a JSON file containing the items where OS is greater than a magic number.
It's helpful to provide enough data to duplicate the problem, and eliminate everything that isn't directly related. I think what you want is something like this:
jsonObj = [
devices: [
[ os: '3', name: 'Name 1'],
[ os: '10.2', name: 'Name 10.2'],
[ os: '7', name: 'Name 7'],
[ os: '3', name: 'Name 3'],
],
]
values = ['5']
def normalizeOs(os) {
os.contains(".") ? os : "$os.0"
}
def shouldSkip(normalizedOs) {
osValues = os.split('\\.')
values[0].toInteger() > osValues[0].toInteger()
}
selected = []
for (item: jsonObj.devices) {
os = normalizeOs(item.os)
if (shouldSkip(os)) continue
selected.push([name: item.name, os: os])
}
json = new groovy.json.JsonBuilder(selected)
println(json)
Outputs:
[{"name":"Name 7","os":"7.0"},{"name":"Name 10.2","os":"10.2"}]

first_error_message: 'syntax error - at ''

I'm trying to update an array inside a specific json document on a Couchbase Bucket.
Basically what I want to do is to insert a new object to the array.
When I try below query on the data Base itself, the query is working perfectly,
UPDATE BUCKETNAME SET appl[3]= {
'appl_ads_id': 'testtttt',
'appl_bnd_lvl_cd': 30,
'appl_desire_skill_tx': [
"couchbase",
"nodejs"
],
'appl_email_tx': "TESTING123#gmail.com",
'appl_full_nm': "TEST TESTING",
'appl_id': "4444",
'appl_ldr_apprvd': true,
'appl_locat_tx': "ALABAMA",
'appl_role_nm': "Engineer III",
'appl_status': "PENDING"}
where opp_id= 1 and opp_nm= "Couchbase Management Project" RETURNING *;
but when i try and write that query into my javascript code Im getting below error:
QueryErrorContext {
first_error_code: 3000,
first_error_message: 'syntax error - at '',
statement:
'UPDATE `BUCKETNAME` SET appl[3]= {'appl_ads_id': 'albm111', 'appl_bnd_lvl_cd': 30, 'appl_desire_skill_tx': ['couchbase', 'nodejs'],'appl_email_tx': 'zzzzzzz','appl_full_nm': 'z Z','appl_id': '222222','appl_ldr_apprvd': false, 'appl_locat_tx': 'ALABAMA','appl_role_nm': 'Engineer III','appl_status': 'Test'} where opp_id= $ID and opp_nm= $NAME RETURNING *;',
client_context_id: '54f4f683f306b4a5',
parameters: '',
http_response_code: 400,
http_response_body: '' } }
Please find below how I wrote the N1ql query on my nodejs code:
async function queryResults() {
const query = "UPDATE BUCKETNAME SET appl[3]= {'appl_ads_id': 'mfern685', 'appl_bnd_lvl_cd': 30, 'appl_desire_skill_tx': ['couchbase', 'nodejs'],'appl_email_tx': 'zzzzzzz','appl_full_nm': 'z Z','appl_id': '222222','appl_ldr_apprvd': false, 'appl_locat_tx': 'Sunrise','appl_role_nm': 'Engineer III','appl_status': 'Test'} where opp_id= $ID and opp_nm= $NAME RETURNING *;"
const options = { parameters: { ID: 1, NAME: 'Couchbase Management Project' } }
try {
let results = await cluster.query(query,options);
results.rows.forEach((row) => {
console.log('Query row: ', row)
})
return results
} catch (error) {
console.error('Query failed: ', error)
}
}
​
The second part of your query should use standard SQL syntax:
update my_bucket set myArray[3] = {teste: 'something'}, appl_email_tx = 'TESTING123#gmail.com';

Trying to run python script using terminal and pass inputs in terminal

I'm trying to pass my input from terminal and run python script, My input is url http://localhost:8080/api/auth and payload is { "request":"success","input":[ { "type":" ", "content":[ { "type":" ", "meta":{ "sample_type":" " , deatail":" "} ] } ], "output":[ { "type":" ","content":[ { "type":"", "meta":{ "sample_type":"", }, "deatils":" " } ] } ] }
My Code is here:
def get_response():
auth_access_Token = get_token()
parser = argparse.ArgumentParser(description='A tutorial of argparse!')
parser.add_argument("--url", action="store_true", required=True )
parser.add_argument("--payload", action="store_true", required=True )
a = parser.parse_args()
url = a.url
Header = {'Auth': 'Bearer ' + str(auth_access_Token)}
payload = a.payload
resp = requests.post(url, headers=Header, json=payload)
print(json.loads(resp.content))
get_response()
When I pass my inputs using
python test.py --url http://localhost:8080/api/auth --payload `{ "request":"success","input":[ { "type":" ", "content":[ { "type":" ", "meta":{ "sample_type":" " , deatail":" "} ] } ], "output":[ { "type":" ","content":[ { "type":"", "meta":{ "sample_type":"", }, "deatils":" " } ] } ] }`
It's giving me error: unrecognized arguments
where am I wrong?
Thanks in advance
So, there are a couple of problems with your script.
action value should be store and not store_true.
store_true is for TRUE and FALSE values.
Secondly in order to handle the spaces in the input arguments they must be enclosed in double quotes. As the payload value contains double quotes they must be replace with single quotes.
Here is the modified code
parser = argparse.ArgumentParser(description='A tutorial of argparse!')
parser.add_argument('--url', action="store", nargs=1, type=str, required=True )
parser.add_argument("--payload", nargs=1, type=str, required=True )
a = parser.parse_args()
print(a)
So when you invoke the script like this:
> python test1.py --url http://localhost:8080/api/auth --payload "{ 'request':'success','input':[ { 'type':' ', 'content':[ { 'type':' ', 'meta':{ 'sample_type':' ' , deatail':' '} ] } ], 'output':[ { 'type':' ','content':[ { 'type':'', 'meta':{ 'sample_type':'', }, 'deatils':' ' } ] } ] }"
EDIT
You have to process the payload data.
Also I found that the the data you are passing is illformed.
"meta":{ "sample_type":" " , deatail":" "}, see quotes are missing before deatail and also some matching braces.
payload = a.payload[0]
payload = re.sub(r"'", "\"", payload)
resp = requests.post(url, headers=Header, json=payload)
print(json.loads(resp.content))

How to prevent comma in VimL string concatenation?

I have an object defined like so:
let g:lightline = {
\ 'component': {
\ 'fugitive': '%{exists("*fugitive#statusline") ? "⎇ " . fugitive#statusline() : ""}'
\ },
\ }
The output of fugitive#statusline() is GIT(master), so the final string eventually appears in my statusline as ⎇ ,GIT(master) with a comma.
Why is there a comma? How can we avoid the comma?
I'm using lightline.vim to customize my status line, and the whole configuration looks like this:
let g:lightline = {
\ 'active': {
\ 'left': [
\ [ 'mode', 'paste' ],
\ [ 'filename', 'readonly', 'modified' ],
\ [ 'fugitive', ],
\ ]
\ },
\ 'inactive': {
\ 'left': [
\ [ 'filename', 'readonly', 'modified' ],
\ [ 'fugitive', ],
\ ]
\ },
\ 'component': {
\ 'readonly': '%{&readonly?"x":""}',
\ 'fugitive': '%{exists("*fugitive#statusline") ? "⎇ " . fugitive#statusline() . "" : ""}'
\ },
\ 'component_visible_condition': {
\ 'fugitive': '(exists("*fugitive#head") && ""!=fugitive#head())'
\ },
\ 'separator': { 'left': '', 'right': '' },
\ 'subseparator': { 'left': '|', 'right': '|' }
\ }
This code in the fugitive plugin either prepends a comma, or encloses the element in square braces. These two styles are also offered by the built-in statusline elements.
You can remove the undesired comma by taking only a substring ([1:]) off the result of the fugitive invocation:
'fugitive': '%{exists("*fugitive#statusline") ? "⎇ " . fugitive#statusline()[1:] : ""}'

Groovy code to convert json to CSV file

Does anyone have any sample Groovy code to convert a JSON document to CSV file? I have tried to search on Google but to no avail.
Example input (from comment):
[ company_id: '1',
web_address: 'vodafone.com/',
phone: '+44 11111',
fax: '',
email: '',
addresses: [
[ type: "office",
street_address: "Vodafone House, The Connection",
zip_code: "RG14 2FN",
geo: [ lat: 51.4145, lng: 1.318385 ] ]
],
number_of_employees: 91272,
naics: [
primary: [
"517210": "Wireless Telecommunications Carriers (except Satellite)" ],
secondary: [
"517110": "Wired Telecommunications Carriers",
"517919": "Internet Service Providers",
"518210": "Web Hosting"
]
]
More info from an edit:
def export(){
def exportCsv = [ [ id:'1', color:'red', planet:'mars', description:'Mars, the "red" planet'],
[ id:'2', color:'green', planet:'neptune', description:'Neptune, the "green" planet'],
[ id:'3', color:'blue', planet:'earth', description:'Earth, the "blue" planet'],
]
def out = new File('/home/mandeep/groovy/workspace/FirstGroovyProject/src/test.csv')
exportCsv.each {
def row = [it.id, it.color, it.planet,it.description]
out.append row.join(',')
out.append '\n'
}
return out
}
Ok, how's this:
import groovy.json.*
// Added extra fields and types for testing
def js = '''{"infile": [{"field1": 11,"field2": 12, "field3": 13},
{"field1": 21, "field4": "dave","field3": 23},
{"field1": 31,"field2": 32, "field3": 33}]}'''
def data = new JsonSlurper().parseText( js )
def columns = data.infile*.keySet().flatten().unique()
// Wrap strings in double quotes, and remove nulls
def encode = { e -> e == null ? '' : e instanceof String ? /"$e"/ : "$e" }
// Print all the column names
println columns.collect { c -> encode( c ) }.join( ',' )
// Then create all the rows
println data.infile.collect { row ->
// A row at a time
columns.collect { colName -> encode( row[ colName ] ) }.join( ',' )
}.join( '\n' )
That prints:
"field3","field2","field1","field4"
13,12,11,
23,,21,"dave"
33,32,31,
Which looks correct to me

Resources