I'm trying to remove the First line from the output file (arista_dataset2), i serached and have seen lot of suggestion but most of the solution didn'y worked for me and i came around with the solution res_data = res_Out.split("\n",1)[1];
which works. Though, i'm looking for an elegant solution if here can be one.
from __future__ import absolute_import, division, print_function
import netmiko
import json
def arista_Connect():
filename = "arista_dataset2"
connection = netmiko.ConnectHandler(ip='192.1.1.1', device_type='arista_eos', username='netuser', password='secrete')
#print(connection.send_command('show ip arp', max_loops=1000, delay_factor=20))
res_Out = connection.send_command('show ip arp',delay_factor=20)
#res_data = res_Out[1:]
res_data = res_Out.split("\n",1)[1];
#print(res_Out)
#return res_Out
filedata = open(filename, mode='w+', encoding='utf-8')
# while writing the entire dataset to a outfile Just eleminate the first line from it
filedata.write(res_data)
filedata.close
connection.disconnect()
#arista_Connect()
if __name__=='__main__':
arista_Connect()
The data into arista_dataset2 File is as follows:
I'm Just removing the line starting with ^Address.
Address Age (min) Hardware Addr Interface
127.1.0.2 0 0010.gh587.b001 Internal1/1
192.102.1.20 0 2c21.099a.9c01 Management1/1
192.102.1.23 0 c4f5.6061.b8dd Management1/1
192.102.1.25 0 c4f5.1b49.66a9 Management1/1
192.102.1.46 0 001c.7c03.1470 Management1/1
Related
I have many text files. I tried to convert the txt files into a single CSV file, but it is taking a huge time. I put the code on run mode at night and I slept, it processed only 4500 files, but still morning it is running.
There is any way to fast way to convert the text files into csv?
Here is my code:
import pandas as pd
import os
import glob
from tqdm import tqdm
# create empty dataframe
csvout = pd.DataFrame(columns =["ID","Delivery_person_ID" ,"Delivery_person_Age" ,"Delivery_person_Ratings","Restaurant_latitude","Restaurant_longitude","Delivery_location_latitude","Delivery_location_longitude","Order_Date","Time_Orderd","Time_Order_picked","Weather conditions","Road_traffic_density","Vehicle_condition","Type_of_order","Type_of_vehicle", "multiple_deliveries","Festival","City","Time_taken (min)"])
# get list of files
file_list = glob.glob(os.path.join(os.getcwd(), "train/", "*.txt"))
for filename in tqdm(file_list):
# next file/record
mydict = {}
with open(filename) as datafile:
# read each line and split on " " space
for line in tqdm(datafile):
# Note: partition result in 3 string parts, "key", " ", "value"
# array slice third parameter [::2] means steps=+2
# so only take 1st and 3rd item
name, var = line.partition(" ")[::2]
mydict[name.strip()] = var.strip()
# put dictionary in dataframe
csvout = csvout.append(mydict, ignore_index=True)
# write to csv
csvout.to_csv("train.csv", sep=";", index=False)
Here is my example text file.
ID 0xb379
Delivery_person_ID BANGRES18DEL02
Delivery_person_Age 34.000000
Delivery_person_Ratings 4.500000
Restaurant_latitude 12.913041
Restaurant_longitude 77.683237
Delivery_location_latitude 13.043041
Delivery_location_longitude 77.813237
Order_Date 25-03-2022
Time_Orderd 19:45
Time_Order_picked 19:50
Weather conditions Stormy
Road_traffic_density Jam
Vehicle_condition 2
Type_of_order Snack
Type_of_vehicle scooter
multiple_deliveries 1.000000
Festival No
City Metropolitian
Time_taken (min) 33.000000
CSV is a very simple data format for which you don't need any sophisticated tools to handle. Just text and separators.
In your hopefully simple case there is no need to use pandas and dictionaries.
Except your datafiles are corrupt missing some columns or having some additional columns to skip. But even in this case you can handle such issues better within your own code so you have more control over it and are able to get results within seconds.
Assuming your datafiles are not corrupt having all columns in the right order with no missing columns or having additional ones (so you can rely on their proper formatting), just try this code:
from time import perf_counter as T
sT = T()
filesProcessed = 0
columns =["ID","Delivery_person_ID" ,"Delivery_person_Age" ,"Delivery_person_Ratings","Restaurant_latitude","Restaurant_longitude","Delivery_location_latitude","Delivery_location_longitude","Order_Date","Time_Orderd","Time_Order_picked","Weather conditions","Road_traffic_density","Vehicle_condition","Type_of_order","Type_of_vehicle", "multiple_deliveries","Festival","City","Time_taken (min)"]
import glob, os
file_list = glob.glob(os.path.join(os.getcwd(), "train/", "*.txt"))
csv_lines = []
csv_line_counter = 0
for filename in file_list:
filesProcessed += 1
with open(filename) as datafile:
csv_line = ""
for line in datafile.read().splitlines():
# print(line)
var = line.partition(" ")[-1]
csv_line += var.strip() + ';'
csv_lines.append(str(csv_line_counter)+';'+csv_line[:-1])
csv_line_counter += 1
with open("train.csv", "w") as csvfile:
csvfile.write(';'+';'.join(columns)+'\n')
csvfile.write('\n'.join(csv_lines))
eT = T()
print(f'> {filesProcessed=}, {(eT-sT)=:8.6f}')
I guess you will get the result in a speed beyond your expectations (in seconds, not minutes or hours)
On my computer, estimating from processing time of 100 files the time required for 50.000 files will be about 3 seconds.
I could not replicate. I took the example data file and created 5000 copies of it. Then I ran your code using tqdm and without. The below shows without:
import time
import csv
import os
import glob
import pandas as pd
from tqdm import tqdm
csvout = pd.DataFrame(columns =["ID","Delivery_person_ID" ,"Delivery_person_Age" ,"Delivery_person_Ratings","Restaurant_latitude","Restaurant_longitude","Delivery_location_latitude","Delivery_location_longitude","Order_Date","Time_Orderd","Time_Order_picked","Weather conditions","Road_traffic_density","Vehicle_condition","Type_of_order","Type_of_vehicle", "multiple_deliveries","Festival","City","Time_taken (min)"])
file_list = glob.glob(os.path.join(os.getcwd(), "sample_files/", "*.txt"))
t1 = time.time()
for filename in file_list:
# next file/record
mydict = {}
with open(filename) as datafile:
# read each line and split on " " space
for line in datafile:
# Note: partition result in 3 string parts, "key", " ", "value"
# array slice third parameter [::2] means steps=+2
# so only take 1st and 3rd item
name, var = line.partition(" ")[::2]
mydict[name.strip()] = var.strip()
# put dictionary in dataframe
csvout = csvout.append(mydict, ignore_index=True)
# write to csv
csvout.to_csv("train.csv", sep=";", index=False)
t2 = time.time()
print(t2-t1)
The times I got where:
tqdm 33 seconds
no tqdm 34 seconds
Then I ran using the csv module:
t1 = time.time()
with open('output.csv', 'a', newline='') as csv_file:
columns =["ID","Delivery_person_ID" ,"Delivery_person_Age" ,"Delivery_person_Ratings","Restaurant_latitude","Restaurant_longitude","Delivery_location_latitude","Delivery_location_longitude","Order_Date","Time_Orderd","Time_Order_picked","Weather conditions","Road_traffic_density","Vehicle_condition","Type_of_order","Type_of_vehicle", "multiple_deliveries","Festival","City","Time_taken (min)"]
mydict = {}
d_Writer = csv.DictWriter(csv_file, fieldnames=columns, delimiter=',')
d_Writer.writeheader()
for filename in file_list:
with open(filename) as datafile:
for line in datafile:
name, var = line.partition(" ")[::2]
mydict[name.strip()] = var.strip()
d_Writer.writerow(mydict)
t2 = time.time()
print(t2-t1)
The time for this was:
csv 0.32231569290161133 seconds.
Try it like this.
import glob
with open('my_file.csv', 'a') as csv_file:
for path in glob.glob('./*.txt'):
with open(path) as txt_file:
txt = txt_file.read() + '\n'
csv_file.write(txt)
I have converted a .txt file to tfrecords with some changes to it. But now I want to convert or read same file so I could understand my data which is now changed. I am doing this for my knowledge graph project.
import numpy as np
import os
import tensorflow as tf
import tqdm
import pdb
import glob
import time
import sys
import re
import argparse
import fastBPE
import platform
use_py3 = platform.python_version()[0] == '3'
parser = argparse.ArgumentParser(description='TensorFlow code for creating TFRecords data')
parser.add_argument('--text_file', type=str, required=True,
help='location of text file to convert to TFRecords')
parser.add_argument('--control_code', type=str, required=True,
help='control code to use for this file. must be in the vocabulary, else it will error out.')
parser.add_argument('--sequence_len', type=int, required=True,
help='sequence length of model being fine-tuned (256 or 512)')
args = parser.parse_args()
path_to_train_file = fname = args.text_file
domain = [args.control_code]
train_text = open(path_to_train_file, 'rb').read().decode(encoding='utf-8')
bpe = fastBPE.fastBPE('../codes', '../vocab')
tokenized_train_text = bpe.apply([train_text.encode('ascii', errors='ignore') if not use_py3 else train_text])[0] # will NOT work for non-English texts
# if you want to run non-english text, please tokenize separately using ./fast applybpe and then run this script on the .bpe file with utf8 encoding
tokenized_train_text = re.findall(r'\S+|\n', tokenized_train_text)
tokenized_train_text = list(filter(lambda x: x != u'##', tokenized_train_text))
# load the vocabulary from file
vocab = open('../vocab').read().decode(encoding='utf-8').split('\n') if not use_py3 else open('../vocab', encoding='utf-8').read().split('\n')
vocab = list(map(lambda x: x.split(' ')[0], vocab)) + ['<unk>'] + ['\n']
print ('{} unique words'.format(len(vocab)))
if args.control_code not in vocab:
print('Provided control code is not in the vocabulary')
print('Please provide a different one; refer to the vocab file for allowable tokens')
sys.exit(1)
# Creating a mapping from unique characters to indices
word2idx = {u:i for i, u in enumerate(vocab)}
idx2word = np.array(vocab)
seq_length = args.sequence_len-1
def numericalize(x):
count = 0
for i in x:
if i not in word2idx:
print(i)
count += 1
return count>1, [word2idx.get(i, word2idx['<unk>']) for i in x]
tfrecords_fname = fname.lower()+'.tfrecords'
total = 0
skipped = 0
with tf.io.TFRecordWriter(tfrecords_fname) as writer:
for i in tqdm.tqdm(range(0, len(tokenized_train_text), seq_length)):
flag_input, inputs = numericalize(domain+tokenized_train_text[i:i+seq_length])
flag_output, outputs = numericalize(tokenized_train_text[i:i+seq_length+1])
total += 1
if flag_input or flag_output:
skipped += 1
continue
if len(inputs)!=seq_length+1 or len(outputs)!=seq_length+1:
break
example_proto = tf.train.Example(features=tf.train.Features(feature={'input': tf.train.Feature(int64_list=tf.train.Int64List(value=inputs)),
'output': tf.train.Feature(int64_list=tf.train.Int64List(value=outputs))}))
writer.write(example_proto.SerializeToString())
print('Done')
print('Skipped', skipped, 'of', total)
This is my code I want every changes in it except that to convert in tfrecords.
Read the TFRecord with a TFRecordDataset.
Then iterate through the TFRecordDataset and for each element, write to a new text file or print out the results.
https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset
I am using OCI Python SDK and when i am trying to download an object (from an OCI bucket) which is GZ format it is getting downloaded but the file size is zero byte. Attaching the code
Any help is much appriciable.
import os
import oci
import io
import sys
reporting_namespace = 'xygabcdef'
prefix_file = "abc/xyz"
# Update these values
destination_path = 'downloaded_reports'
# Make a directory to receive reports
if not os.path.exists(destination_path):
os.mkdir(destination_path)
# Get the list of reports
config = oci.config.from_file(oci.config.DEFAULT_LOCATION, oci.config.DEFAULT_PROFILE)
reporting_bucket = sys.argv[1]
object_storage = oci.object_storage.ObjectStorageClient(config)
report_bucket_objects = object_storage.list_objects(reporting_namespace, reporting_bucket, prefix=prefix_file)
#def download_audit():
for o in report_bucket_objects.data.objects:
print('Found file ' + o.name)
object_details = object_storage.get_object(reporting_namespace, reporting_bucket, o.name)
print (object_details)
filename = o.name.rsplit('/', 1)[-1]
with open(destination_path + '/' + filename, 'wb') as f:
for chunk in object_details.data.raw.stream(1024 * 1024, decode_content=False):
f.write(chunk)
Please see the example here. Does this work for you? Namely:
get_obj = object_storage.get_object(namespace, bucket_name, example_file_object_name)
with open('example_file_retrieved', 'wb') as f:
for chunk in get_obj.data.raw.stream(1024 * 1024, decode_content=False):
f.write(chunk)
In your example destintation_path seems to be undefined, and seems to have a typo (destintation -> destination). Could this be the problem?
Lastly, what does object_details report the file size / content-length as? It could be that the file size of the object in Object Storage is itself 0 bytes.
the .content from the .data of get_object should give you the file data (binary or text/josn/...), so here is a modified version of your code:
import os
import sys
import oci
reporting_namespace = 'xygabcdef'
prefix_file = "abc/xyz"
# Update these values
destination_path = 'downloaded_reports'
# Get the list of reports
config = oci.config.from_file(oci.config.DEFAULT_LOCATION, oci.config.DEFAULT_PROFILE)
reporting_bucket = sys.argv[1]
object_storage = oci.object_storage.ObjectStorageClient(config)
objects = object_storage.list_objects(reporting_namespace, reporting_bucket, prefix=prefix_file).data
# def download_audit():
for obj in objects:
print('Found file ' + obj.name)
object_response = object_storage.get_object(reporting_namespace, reporting_bucket, obj.name).data
print(object_response)
file_path = os.path.join(destination_path, obj.name)
# Make sure parent dirs up to the file level are created
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as file:
file.write(object_response.content)
I was asked to read from two files (left and right reads) Aip02.R1.fastq and Aip02.R2.fastq, and get an interleaved fasta file using zip function. The left and right files were fastq files, but when I zip them together to make a new fastq file, the writer function doesn't work anymore. It gives me error "SeqRecord (id=) has an invalid sequence."
#!/usr/bin/env python3
# Import Seq, SeqRecord, and SeqIO
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
leftReads = SeqIO.parse("/scratch/AiptasiaMiSeq/fastq/Aip02.R1.fastq", "fastq")
rightReads = SeqIO.parse("/scratch/AiptasiaMiSeq/fastq/Aip02.R2.fastq","fastq")
A= zip(leftReads,rightReads)
SeqIO.write(SeqRecord(list(A)), "interleave.fastq", "fastq")
Your forward and reverse sequences probably have the same ID. So use the following code to add a suffix to the IDs. I used /1 and /2 here, but things like .f and .r are also used.
from Bio import SeqIO
import itertools
def interleave(iter1, iter2) :
for (forward, reverse) in itertools.izip(iter1, iter2):
assert forward.id == reverse.id
forward.id += "/1"
reverse.id += "/2"
yield forward
yield reverse
leftReads = SeqIO.parse("/scratch/AiptasiaMiSeq/fastq/Aip02.R1.fastq", "fastq")
rightReads = SeqIO.parse("/scratch/AiptasiaMiSeq/fastq/Aip02.R2.fastq","fastq")
handle = open("interleave.fastq", "w")
count = SeqIO.write(interleave(leftReads, rightReads), handle, "fastq")
handle.close()
print("{} records written to interleave.fastq".format(count))
This code can become slow for large fastq files. For example see here where they report that it takes 14 minutes to create a 2GB fastq file. So they give this improved way:
from Bio.SeqIO.QualityIO import FastqGeneralIterator
import itertools
file_f = "/scratch/AiptasiaMiSeq/fastq/Aip02.R1.fastq"
file_r = "/scratch/AiptasiaMiSeq/fastq/Aip02.R2.fastq"
handle = open("interleave.fastq", "w")
count = 0
f_iter = FastqGeneralIterator(open(file_f,"rU"))
r_iter = FastqGeneralIterator(open(file_r,"rU"))
for (f_id, f_seq, f_q), (r_id, r_seq, r_q) in itertools.izip(f_iter,r_iter):
assert f_id == r_id
count += 2
#Write out both reads with "/1" and "/2" suffix on ID
handle.write("#%s/1n%sn+n%sn#%s/2n%sn+n%sn"
% (f_id, f_seq, f_q, r_id, r_seq, r_q))
handle.close()
print("{} records written to interleave.fastq".format(count)
at the moment I am able to create one CSV file with all the content I get at once.
Now I would like to create a list where I have different names in it.
How can I produce for every functioncall a different CSV file name? I thought about looping a list but I just want a +1 iteration at each call. I thought about saving my state somehow and use it in next functioncall. Everytime I initialize my variable with 0 and so I don't get 1. I think I could do it with Python Function Parameter calls but I have no idea how to use it. Can someone give me a little tip or example? If there are better ideas (maybe my idea is totally bullshit), how to solve this, just help please.
The comments in the code shall represent my imagination.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tenable.sc import SecurityCenter as SC
import os.path
import sys
import getpass
import csv
SC_HOST = '...'
def parse_entry(entry):
split_after_path = ''
ip = entry.get('ip', None)
pluginText = entry.get('pluginText', None)
if 'Path : ' in pluginText:
for line in pluginText.splitlines(0):
if 'Path : ' in line:
split_after_path_in_plugintext = line.split("Path : ",1)[1]
# place = ['place1', 'place2', 'place3', 'place4', 'place5']
# i = 0
# i = i+1
file_exists = os.path.isfile('testfile_path.csv')
# file_exists = os.path.isfile('testfile_path_'+place[i]+'.csv')
data = open('testfile_path.csv', 'a')
# data = open('testfile_path_'+place[i]+'.csv', 'a')
with data as csvfile:
header = ['IP Address', 'Path']
writer = csv.DictWriter(csvfile, lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC, fieldnames=header)
if not file_exists:
writer.writeheader()
writer.writerow({'IP Address': ip, 'Path': split_after_path})
data.close()
def main():
sc_user = input('[<<] username: ')
sc_pass = getpass.getpass('[<<] password: ')
sc = SC(SC_HOST)
sc.login(sc_user, sc_pass)
# Query API for data
# asset = [12,13,14,25,29]
# i = 0
# assetid = asset[i]
# vuln = sc.analysis.vulns(('pluginID', '=', '25072')('asset','=','assetid'))
# i = i+1
vuln = sc.analysis.vulns(('pluginID', '=', '25072'),('asset','=','11'))
for entry in vuln:
parse_entry(entry)
sc.logout()
return 0
if __name__ == '__main__':
sys.exit(main())
The simplest and most obvious solution is to pass the full file path to your parse_entry function, ie:
def parse_entry(entry, filepath):
# ...
if 'Path : ' in pluginText:
for line in pluginText.splitlines(0):
if 'Path : ' in line:
# ...
file_exists = os.path.isfile(filepath)
with open(filepath, 'a') as csvfile:
# ...
Then in main() use enumerate() to build sequential filenames:
def main():
# ...
for i, entry in enumerate(vuln):
path = "'testfile_path{}.csv".format(i)
parse_entry(entry, path)
You can use a function attribute to keep track of the number of times the function has been called.
def parse_entry(entry):
parse_entry.i += 1
# outside the function you have to initialize the attribute
parse_entry.i = 0
Or you can look at other ways to initialize the function attribute in this post.
Alternatively, you can use glob to get the current number of files.
from glob import glob
i = len(glob('testfile_path_*.csv'))