Create sub stream in python - python-3.x

How can I create a "sub stream" in python. Let's say I have an file opened for reading. I want to return a file-like object that you can use to read only part of that file.
with open(filename, 'rb') as f:
start = 0x34
size = 0x20
return Substream(f, start, size) # <-- How do I do this?
Seeking to 0 on this object should go to "start" on the f object. Further more reading past size should trigger eof behavior. Hope this makes sense. How do I accomplish this?

A quick subclass of io.RawIOBase seems to do the trick, at least for my use case. I understand this is not a full implementation of the io.RawIOBase interface, but it gets the job done.
class Substream(io.RawIOBase):
"""Represents a view of a subset of a file like object"""
def __init__(self, file: io.RawIOBase, start, size):
self.file = file
self.start = start
self.size = size
self.p = 0
def seek(self, offset, origin=0):
if origin == 0:
self.p = offset
elif origin == 1:
self.p += offset
# TODO: origin == 2
else:
raise ValueError("Unexpected origin: {}".format(origin))
def read(self, n):
prev = self.file.tell()
self.file.seek(self.start + self.p)
data = self.file.read(n if self.p + n <= self.size else self.size - self.p)
self.p += len(data)
self.file.seek(prev)
return data
Use it like so
with open(filename) as f:
print(Substream(f, 10, 100).read(10))
I wonder if this can be done on file descriptor level instead somehow...?

Related

Multiprocessing in python for a function

I'm new to python and I'm trying to run multiprocessing:
I'm trying to code a program to convert a tiffs file to dax file present in a directory.
This is my original code:
import os
import datawriter
import datareader
from time import time
from tqdm import tqdm
dataroot = input("Enter the folder location:")
count_for_number_of_files = 0
count_for_frames_in_filelist = 0
for subdir, dir, files in os.walk(dataroot):
for file in files:
if file.endswith(".tif"):
print(f"file {count_for_number_of_files + 1} = {file}")
count_for_number_of_files += 1
print("Total number of files:", count_for_number_of_files)
frame_list = [None] * count_for_number_of_files
for i in range(0, len(frame_list)):
frame_list[i] = input(f"Enter number of frames for file {i + 1}: ")
print("Frames in each file:", frame_list)
start_time = time()
for subdir, dir, files in os.walk(dataroot):
for file in sorted(files):
if file.endswith(".tif"):
dax_file = datawriter.DaxWriter("{}.dax".format(file[0:-4]))
print(f"Processing {frame_list[count_for_frames_in_filelist]} frames for {file}")
for i in tqdm(range(int(frame_list[count_for_frames_in_filelist]))):
data = datareader.TifReader("{}".format(file)).loadAFrame(i)
dax_file.addFrame(data)
count_for_frames_in_filelist += 1
dax_file.close()
print(f"Conversion completed for {count_for_number_of_files} files", '\n',
"Total time taken:", time() - start_time, "seconds")
The new code using multiprocessing is:
import multiprocessing as mp
from multiprocessing import Process, Lock
import numpy as np
import pandas as pd
import os, logging
import originpro as op
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
import datawriter
import datareader
from time import time
from tqdm import tqdm
import tifffile
import hashlib
import re
import threading
dataroot = input("Enter the folder location:")
class Multi:
def f(x):
count_for_number_of_files = 0
count_for_frames_in_filelist = 0
for subdir, dir, files in os.walk(x):
for file in files:
if file.endswith(".tif"):
print(f"file {count_for_number_of_files + 1} = {file}")
count_for_number_of_files += 1
print("Total number of files:", count_for_number_of_files)
frame_list = [None] * count_for_number_of_files
for i in range(0, len(frame_list)):
frame_list[i] = input(f"Enter number of frames for file {i + 1}: ")
print("Frames in each file:", frame_list)
start_time = time()
for subdir, dir, files in os.walk(dataroot):
for file in sorted(files):
if file.endswith(".tif"):
dax_file = datawriter.DaxWriter("{}.dax".format(file[0:-4]))
print(f"Processing {frame_list[count_for_frames_in_filelist]} frames for {file}")
for i in tqdm(range(int(frame_list[count_for_frames_in_filelist]))):
data = datareader.TifReader("{}".format(file)).loadAFrame(i)
dax_file.addFrame(data)
count_for_frames_in_filelist += 1
dax_file.close()
print(f"Conversion completed for {count_for_number_of_files} files", '\n',
"Total time taken:", time() - start_time, "seconds")
my_object=Multi
if __name__ == '__main__':
ctx = mp.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(Multi.f(dataroot))
p.start()
print(q.get())
p.join()
The thing is runtime still remains the same and it hasn't changed which I hoped it'll get faster.
datareader & datawriter are another python files which I'm calling into this function:
datareader.py
#!/usr/bin/env python
"""
Classes that handles reading STORM movie files. Currently this
is limited to the dax, fits, spe and tif formats.
Hazen 06/13
"""
import hashlib
import numpy
import os
import re
import tifffile
# Avoid making astropy mandatory for everybody.
try:
from astropy.io import fits
except ImportError:
pass
def inferReader(filename, verbose=False):
"""
Given a file name this will try to return the appropriate
reader based on the file extension.
"""
ext = os.path.splitext(filename)[1]
if (ext == ".dax"):
return DaxReader(filename, verbose=verbose)
elif (ext == ".fits"):
return FITSReader(filename, verbose=verbose)
elif (ext == ".spe"):
return SpeReader(filename, verbose=verbose)
elif (ext == ".tif") or (ext == ".tiff"):
return TifReader(filename, verbose=verbose)
else:
print(ext, "is not a recognized file type")
raise IOError("only .dax, .spe and .tif are supported (case sensitive..)")
class Reader(object):
"""
The superclass containing those functions that
are common to reading a STORM movie file.
Subclasses should implement:
1. __init__(self, filename, verbose = False)
This function should open the file and extract the
various key bits of meta-data such as the size in XY
and the length of the movie.
2. loadAFrame(self, frame_number)
Load the requested frame and return it as numpy array.
"""
def __init__(self, filename, verbose=False):
super(Reader, self).__init__()
self.filename = filename
self.fileptr = None
self.verbose = verbose
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.close()
def averageFrames(self, start=None, end=None):
"""
Average multiple frames in a movie.
"""
length = 0
average = numpy.zeros((self.image_height, self.image_width), numpy.float)
for [i, frame] in self.frameIterator(start, end):
if self.verbose and ((i % 10) == 0):
print(" processing frame:", i, " of", self.number_frames)
length += 1
average += frame
if (length > 0):
average = average / float(length)
return average
def close(self):
if self.fileptr is not None:
self.fileptr.close()
self.fileptr = None
def filmFilename(self):
"""
Returns the film name.
"""
return self.filename
def filmSize(self):
"""
Returns the film size.
"""
return [self.image_width, self.image_height, self.number_frames]
def filmLocation(self):
"""
Returns the picture x,y location, if available.
"""
if hasattr(self, "stage_x"):
return [self.stage_x, self.stage_y]
else:
return [0.0, 0.0]
def filmScale(self):
"""
Returns the scale used to display the film when
the picture was taken.
"""
if hasattr(self, "scalemin") and hasattr(self, "scalemax"):
return [self.scalemin, self.scalemax]
else:
return [100, 2000]
def frameIterator(self, start=None, end=None):
"""
Iterator for going through the frames of a movie.
"""
if start is None:
start = 0
if end is None:
end = self.number_frames
for i in range(start, end):
yield [i, self.loadAFrame(i)]
def hashID(self):
"""
A (hopefully) unique string that identifies this movie.
"""
return hashlib.md5(self.loadAFrame(0).tostring()).hexdigest()
def loadAFrame(self, frame_number):
assert frame_number >= 0, "Frame_number must be greater than or equal to 0, it is " + str(frame_number)
assert frame_number < self.number_frames, "Frame number must be less than " + str(self.number_frames)
def lockTarget(self):
"""
Returns the film focus lock target.
"""
if hasattr(self, "lock_target"):
return self.lock_target
else:
return 0.0
class DaxReader(Reader):
"""
Dax reader class. This is a Zhuang lab custom format.
"""
def __init__(self, filename, verbose=False):
super(DaxReader, self).__init__(filename, verbose=verbose)
# save the filenames
dirname = os.path.dirname(filename)
if (len(dirname) > 0):
dirname = dirname + "/"
self.inf_filename = dirname + os.path.splitext(os.path.basename(filename))[0] + ".inf"
# defaults
self.image_height = None
self.image_width = None
# extract the movie information from the associated inf file
size_re = re.compile(r'frame dimensions = ([\d]+) x ([\d]+)')
length_re = re.compile(r'number of frames = ([\d]+)')
endian_re = re.compile(r' (big|little) endian')
stagex_re = re.compile(r'Stage X = ([\d\.\-]+)')
stagey_re = re.compile(r'Stage Y = ([\d\.\-]+)')
lock_target_re = re.compile(r'Lock Target = ([\d\.\-]+)')
scalemax_re = re.compile(r'scalemax = ([\d\.\-]+)')
scalemin_re = re.compile(r'scalemin = ([\d\.\-]+)')
inf_file = open(self.inf_filename, "r")
while 1:
line = inf_file.readline()
if not line: break
m = size_re.match(line)
if m:
self.image_height = int(m.group(2))
self.image_width = int(m.group(1))
m = length_re.match(line)
if m:
self.number_frames = int(m.group(1))
m = endian_re.search(line)
if m:
if m.group(1) == "big":
self.bigendian = 1
else:
self.bigendian = 0
m = stagex_re.match(line)
if m:
self.stage_x = float(m.group(1))
m = stagey_re.match(line)
if m:
self.stage_y = float(m.group(1))
m = lock_target_re.match(line)
if m:
self.lock_target = float(m.group(1))
m = scalemax_re.match(line)
if m:
self.scalemax = int(m.group(1))
m = scalemin_re.match(line)
if m:
self.scalemin = int(m.group(1))
inf_file.close()
# set defaults, probably correct, but warn the user
# that they couldn't be determined from the inf file.
if not self.image_height:
print("Could not determine image size, assuming 256x256.")
self.image_height = 256
self.image_width = 256
# open the dax file
if os.path.exists(filename):
self.fileptr = open(filename, "rb")
else:
if self.verbose:
print("dax data not found", filename)
def loadAFrame(self, frame_number):
"""
Load a frame & return it as a numpy array.
"""
super(DaxReader, self).loadAFrame(frame_number)
self.fileptr.seek(frame_number * self.image_height * self.image_width * 2)
image_data = numpy.fromfile(self.fileptr, dtype='uint16', count=self.image_height * self.image_width)
image_data = numpy.reshape(image_data, [self.image_height, self.image_width])
if self.bigendian:
image_data.byteswap(True)
return image_data
class FITSReader(Reader):
"""
FITS file reader class.
FIXME: This depends on internals of astropy.io.fits that I'm sure
we are not supposed to be messing with. The problem is that
astropy.io.fits does not support memmap'd images when the
image is scaled (which is pretty much always the case?). To
get around this we set _ImageBaseHDU._do_not_scale_image_data
to True, then do the image scaling ourselves.
We want memmap = True as generally it won't make sense to
load the entire movie into memory.
Another consequence of this is that we only support
'pseudo unsigned' 16 bit FITS format files.
"""
def __init__(self, filename, verbose=False):
super(FITSReader, self).__init__(filename, verbose=verbose)
self.hdul = fits.open(filename, memmap=True)
hdr = self.hdul[0].header
# We only handle 16 bit FITS files.
assert ((hdr['BITPIX'] == 16) and (hdr['bscale'] == 1) and (hdr['bzero'] == 32768)), \
"Only 16 bit pseudo-unsigned FITS format is currently supported!"
# Get image size. We're assuming that the film is a data cube in
# the first / primary HDU.
#
self.image_height = hdr['naxis2']
self.image_width = hdr['naxis1']
if (hdr['naxis'] == 3):
self.number_frames = hdr['naxis3']
else:
self.number_frames = 1
self.hdu0 = self.hdul[0]
# Hack, override astropy.io.fits internal so that we can load
# data with memmap = True.
#
self.hdu0._do_not_scale_image_data = True
def close(self):
pass
def loadAFrame(self, frame_number):
super(FITSReader, self).loadAFrame(frame_number)
frame = self.hdu0.data[frame_number, :, :].astype(numpy.uint16)
frame -= 32768
return frame
class SpeReader(Reader):
"""
SPE (Roper Scientific) reader class.
"""
def __init__(self, filename, verbose=False):
super(SpeReader, self).__init__(filename, verbose=verbose)
# open the file & read the header
self.header_size = 4100
self.fileptr = open(filename, "rb")
self.fileptr.seek(42)
self.image_width = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
self.fileptr.seek(656)
self.image_height = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
self.fileptr.seek(1446)
self.number_frames = int(numpy.fromfile(self.fileptr, numpy.uint32, 1)[0])
self.fileptr.seek(108)
image_mode = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
if (image_mode == 0):
self.image_size = 4 * self.image_width * self.image_height
self.image_mode = numpy.float32
elif (image_mode == 1):
self.image_size = 4 * self.image_width * self.image_height
self.image_mode = numpy.uint32
elif (image_mode == 2):
self.image_size = 2 * self.image_width * self.image_height
self.image_mode = numpy.int16
elif (image_mode == 3):
self.image_size = 2 * self.image_width * self.image_height
self.image_mode = numpy.uint16
else:
print("unrecognized spe image format: ", image_mode)
def loadAFrame(self, frame_number, cast_to_int16=True):
"""
Load a frame & return it as a numpy array.
"""
super(SpeReader, self).loadAFrame(frame_number)
self.fileptr.seek(self.header_size + frame_number * self.image_size)
image_data = numpy.fromfile(self.fileptr, dtype=self.image_mode, count=self.image_height * self.image_width)
if cast_to_int16:
image_data = image_data.astype(numpy.uint16)
image_data = numpy.reshape(image_data, [self.image_height, self.image_width])
return image_data
class TifReader(Reader):
"""
TIF reader class.
This is supposed to handle the following:
1. A normal Tiff file with one frame/image per page.
2. Tiff files with multiple frames on a single page.
3. Tiff files with multiple frames on multiple pages.
"""
def __init__(self, filename, verbose=False):
super(TifReader, self).__init__(filename, verbose)
self.page_data = None
self.page_number = -1
# Save the filename
self.fileptr = tifffile.TiffFile(filename)
number_pages = len(self.fileptr.pages)
# Single page Tiff file, which might be a "ImageJ Tiff"
# with many frames on a page.
#
if (number_pages == 1):
# Determines the size without loading the entire file.
isize = self.fileptr.series[0].shape
# Check if this is actually just a single frame tiff, if
# it is we'll just load it into memory.
#
if (len(isize) == 2):
self.frames_per_page = 1
self.number_frames = 1
self.image_height = isize[0]
self.image_width = isize[1]
self.page_data = self.fileptr.asarray()
# Otherwise we'll memmap it in case it is really large.
#
else:
self.frames_per_page = isize[0]
self.number_frames = isize[0]
self.image_height = isize[1]
self.image_width = isize[2]
self.page_data = self.fileptr.asarray(out='memmap')
# Multiple page Tiff file.
#
else:
isize = self.fileptr.asarray(key=0).shape
# Check for one frame per page.
if (len(isize) == 2):
self.frames_per_page = 1
self.number_frames = number_pages
self.image_height = isize[0]
self.image_width = isize[1]
# Multiple frames per page.
#
# FIXME: No unit test for this kind of file.
#
else:
self.frames_per_page = isize[0]
self.number_frames = number_pages * isize[0]
self.image_height = isize[1]
self.image_width = isize[2]
if self.verbose:
print("{0:0d} frames per page, {1:0d} pages".format(self.frames_per_page, number_pages))
def loadAFrame(self, frame_number, cast_to_int16=True):
super(TifReader, self).loadAFrame(frame_number)
# All the data is on a single page.
if self.number_frames == self.frames_per_page:
if (self.number_frames == 1):
image_data = self.page_data
else:
image_data = self.page_data[frame_number, :, :]
# Multiple frames of data on multiple pages.
elif (self.frames_per_page > 1):
page = int(frame_number / self.frames_per_page)
frame = frame_number % self.frames_per_page
# This is an optimization for files with a large number of frames
# per page. In this case tifffile will keep loading the entire
# page over and over again, which really slows everything down.
# Ideally tifffile would let us specify which frame on the page
# we wanted.
#
# Since it was going to load the whole thing anyway we'll have
# memory overflow either way, so not much we can do about that
# except hope for small file sizes.
#
if (page != self.page_number):
self.page_data = self.fileptr.asarray(key=page)
self.page_number = page
image_data = self.page_data[frame, :, :]
# One frame on each page.
else:
image_data = self.fileptr.asarray(key=frame_number)
assert (len(image_data.shape) == 2), "Not a monochrome tif image! " + str(image_data.shape)
if cast_to_int16:
image_data = image_data.astype(numpy.uint16)
return image_data
if (__name__ == "__main__"):
import sys
if (len(sys.argv) != 2):
print("usage: <movie>")
exit()
movie = inferReader(sys.argv[1], verbose=True)
print("Movie size is", movie.filmSize())
frame = movie.loadAFrame(0)
print(frame.shape, type(frame), frame.dtype)
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
datawriter.py
#!/usr/bin/env python
"""
Writes dax files or tiff files. This is mostly used
by the simulator.
We try and follow a convention were the first dimension (slow
axis) is the image height and the second dimension (fast axis)
is the image width, so image.shape = [height, width]
Hazen 1/18
"""
import numpy
import os
import tifffile
# Import here to avoid making astropy mandatory for everybody.
try:
from astropy.io import fits
except ImportError:
pass
def inferWriter(filename, width = None, height = None):
"""
Given a file name this will try to return the appropriate
writer based on the file extension.
"""
ext = os.path.splitext(filename)[1]
if (ext == ".dax"):
return DaxWriter(filename, width = width, height = height)
elif (ext == ".fits"):
return FITSWriter(filename, width = width, height = height)
elif (ext == ".tif") or (ext == ".tiff"):
return TiffWriter(filename, width = width, height = height)
else:
print(ext, "is not a recognized file type")
raise IOError("only .dax and .tif are supported (case sensitive..)")
def dummyDaxFile(name, x_size, y_size):
ddax = DaxWriter(name, width = x_size, height = y_size)
frame = numpy.ones((x_size, y_size))
ddax.addFrame(frame)
ddax.close()
def singleFrameDax(name, frame):
[fx, fy] = frame.shape
dax_file = DaxWriter(name, width = fy, height = fx)
dax_file.addFrame(frame)
dax_file.close()
class Writer(object):
def __init__(self, width = None, height = None, **kwds):
super(Writer, self).__init__(**kwds)
self.w = width
self.h = height
def frameToU16(self, frame):
frame = frame.copy()
frame[(frame < 0)] = 0
frame[(frame > 65535)] = 65535
return numpy.round(frame).astype(numpy.uint16)
class DaxWriter(Writer):
def __init__(self, name, **kwds):
super(DaxWriter, self).__init__(**kwds)
self.name = name
if len(os.path.dirname(name)) > 0:
self.root_name = os.path.dirname(name) + "/" + os.path.splitext(os.path.basename(name))[0]
else:
self.root_name = os.path.splitext(os.path.basename(name))[0]
self.fp = open(self.name, "wb")
self.l = 0
def addFrame(self, frame):
frame = self.frameToU16(frame)
if (self.w is None) or (self.h is None):
[self.h, self.w] = frame.shape
else:
assert(self.h == frame.shape[0])
assert(self.w == frame.shape[1])
frame.tofile(self.fp)
self.l += 1
def close(self):
self.fp.close()
self.w = int(self.w)
self.h = int(self.h)
inf_fp = open(self.root_name + ".inf", "w")
inf_fp.write("binning = 1 x 1\n")
inf_fp.write("data type = 16 bit integers (binary, little endian)\n")
inf_fp.write("frame dimensions = " + str(self.w) + " x " + str(self.h) + "\n")
inf_fp.write("number of frames = " + str(self.l) + "\n")
inf_fp.write("Lock Target = 0.0\n")
if True:
inf_fp.write("x_start = 1\n")
inf_fp.write("x_end = " + str(self.w) + "\n")
inf_fp.write("y_start = 1\n")
inf_fp.write("y_end = " + str(self.h) + "\n")
inf_fp.close()
class FITSWriter(Writer):
"""
This is mostly for testing. It will store all the movie data in
memory, then dump it when the file is closed.
"""
def __init__(self, filename, **kwds):
super(FITSWriter, self).__init__(**kwds)
self.filename = filename
self.frames = []
def addFrame(self, frame):
frame = self.frameToU16(frame)
if (self.w is None) or (self.h is None):
[self.h, self.w] = frame.shape
else:
assert(self.h == frame.shape[0])
assert(self.w == frame.shape[1])
self.frames.append(frame)
def close(self):
# Remove old file, if any.
if os.path.exists(self.filename):
os.remove(self.filename)
data = numpy.zeros((len(self.frames), self.h, self.w), dtype = numpy.uint16)
for i in range(len(self.frames)):
data[i,:,:] = self.frames[i]
hdu = fits.PrimaryHDU(data)
hdu.writeto(self.filename)
class TiffWriter(Writer):
def __init__(self, filename, **kwds):
super(TiffWriter, self).__init__(**kwds)
self.tif_fp = tifffile.TiffWriter(filename)
def addFrame(self, frame):
frame = self.frameToU16(frame)
# Enforce that all the frames are the same size.
if (self.h is None) or (self.w is None):
[self.h, self.w] = frame.shape
else:
assert(self.h == frame.shape[0])
assert(self.w == frame.shape[1])
self.tif_fp.save(frame)
def close(self):
self.tif_fp.close()
Any suggestions for making my code faster shall be welcomed.

How can I use BytesIO as a pandas.read_csv data source

I am trying to perform a csv data parsing using pandas.read_csv(bytes, chunksize=n) where bytes is a ongoing stream of data which I want to receive from a database CLOB field, reading it by chunks.
reader = pandas.read_csv(io.BytesIO(b'1;qwer\n2;asdf\n3;zxcv'), sep=';', chunksize=2)
for row_chunk in reader:
print(row_chunk)
Code above is working fine, but I want to use some updatable stream instead of fixed io.BytesIO(b'...')
I tried to redefine read method like this
class BlobIO(io.BytesIO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._chunk_size = 4
self._file_data_table = 'my_table'
self._job_id = 'job_id'
self._get_raw_sql = """
select dbms_lob.substr(body, {0}, {1})
from {2}
where job_id = '{3}'
"""
dsn_tns = cx_Oracle.makedsn('host', 'port', 'service_name')
self.ora_con = cx_Oracle.connect('ora_user', 'ora_pass', dsn_tns)
self.res = b''
self.ora_cur = self.ora_con.cursor()
self.chunker = self.get_chunk()
next(self.chunker)
def get_chunk(self):
returned = 0
sended = (yield)
self._chunk_size = sended or self._chunk_size
while True:
to_exec = self._get_raw_sql.format(
self._chunk_size,
returned + 1,
self._file_data_table,
self._job_id)
self.ora_cur.execute(to_exec)
self.res = self.ora_cur.fetchall()[0][0]
returned += self._chunk_size
yield self.res
sended = (yield self.res)
self._chunk_size = sended or self._chunk_size
if not self.res:
break
def read(self, nbytes=None):
if nbytes:
self.chunker.send(nbytes)
else:
self.chunker.send(self._chunk_size)
try:
to_return = next(self.chunker)
except StopIteration:
self.ora_con.close()
to_return = b''
return to_return
buffer = BlobIO()
reader = pandas.read_csv(buffer, encoding='cp1251', sep=';', chunksize=2)
but it looks like I'm doing something completely wrong because pd.read_csv never got executed here at the last line and I don't understand what is happening there.
Maybe creating buffer = BytesIO(b'') and then writing new data to the buffer buffer.write(new_chunk_from_db) could be a better approach but I don't understand when exactly should I call such a write action.
I believe I can create a temporary file with the contents of a CLOB which I can then pass to read_csv, but I really want to skip this step and read data directly from database.
Please give me some directions.
cx_Oracle provides native way to read LOBs. Seems like overriding BytesIO read with cx_Oracle LOB read does the job:
class BlobIO(BytesIO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.res = b''
self.ora_con = db.get_conn()
self.ora_cur = self.ora_con.cursor()
self.ora_cur.execute("select lob from table")
self.res = self.ora_cur.fetchall()[0][0]
self.offset = 1
def read(self, size=None):
r = self.res.read(self.offset, size)
self.offset += size
# size + 1 should be here to perform nonoverlaping reads
# but looks like panadas C parser uses some kind of overlaping
# because while testing size+1 - parser occasionally missed some bytes
if not r:
self.ora_cur.close()
self.ora_con.close()
return r
blob_buffer = BlobIO()
reader = pandas.read_csv(
blob_buffer,
chunksize=JobContext.rchunk_size)
for row_chunk in reader:
print(row_chunk)

Wrap an io.BufferedIOBase such that it becomes seek-able

I was trying to craft a response to a question about streaming audio from a HTTP server, then play it with PyGame. I had the code mostly complete, but hit an error where the PyGame music functions tried to seek() on the urllib.HTTPResponse object.
According to the urlib docs, the urllib.HTTPResponse object (since v3.5) is an io.BufferedIOBase. I expected this would make the stream seek()able, however it does not.
Is there a way to wrap the io.BufferedIOBase such that it is smart enough to buffer enough data to handle the seek operation?
import pygame
import urllib.request
import io
# Window size
WINDOW_WIDTH = 400
WINDOW_HEIGHT = 400
# background colour
SKY_BLUE = (161, 255, 254)
### Begin the streaming of a file
### Return the urlib.HTTPResponse, a file-like-object
def openURL( url ):
result = None
try:
http_response = urllib.request.urlopen( url )
print( "streamHTTP() - Fetching URL [%s]" % ( http_response.geturl() ) )
print( "streamHTTP() - Response Status [%d] / [%s]" % ( http_response.status, http_response.reason ) )
result = http_response
except:
print( "streamHTTP() - Error Fetching URL [%s]" % ( url ) )
return result
### MAIN
pygame.init()
window = pygame.display.set_mode( ( WINDOW_WIDTH, WINDOW_HEIGHT ) )
pygame.display.set_caption("Music Streamer")
clock = pygame.time.Clock()
done = False
while not done:
# Handle user-input
for event in pygame.event.get():
if ( event.type == pygame.QUIT ):
done = True
# Keys
keys = pygame.key.get_pressed()
if ( keys[pygame.K_UP] ):
if ( pygame.mixer.music.get_busy() ):
print("busy")
else:
print("play")
remote_music = openURL( 'http://127.0.0.1/example.wav' )
if ( remote_music != None and remote_music.status == 200 ):
pygame.mixer.music.load( io.BufferedReader( remote_music ) )
pygame.mixer.music.play()
# Re-draw the screen
window.fill( SKY_BLUE )
# Update the window, but not more than 60fps
pygame.display.flip()
clock.tick_busy_loop( 60 )
pygame.quit()
When this code runs, and Up is pushed, it fails with the error:
streamHTTP() - Fetching URL [http://127.0.0.1/example.wav]
streamHTTP() - Response Status [200] / [OK]
io.UnsupportedOperation: seek
io.UnsupportedOperation: File or stream is not seekable.
io.UnsupportedOperation: seek
io.UnsupportedOperation: File or stream is not seekable.
Traceback (most recent call last):
File "./sound_stream.py", line 57, in <module>
pygame.mixer.music.load( io.BufferedReader( remote_music ) )
pygame.error: Unknown WAVE format
I also tried re-opening the the io stream, and various other re-implementations of the same sort of thing.
Seeking seeking
According to the urlib docs, the urllib.HTTPResponse object (since v3.5) is an io.BufferedIOBase. I expected this would make the stream seek()able, however it does not.
That's correct. The io.BufferedIOBase interface doesn't guarantee the I/O object is seekable. For HTTPResponse objects, IOBase.seekable() returns False:
>>> import urllib.request
>>> response = urllib.request.urlopen("http://httpbin.org/get")
>>> response
<http.client.HTTPResponse object at 0x110870ca0>
>>> response.seekable()
False
That's because the BufferedIOBase implementation offered by HTTPResponse is wrapping a socket object, and sockets are not seekable either.
You can't wrap an BufferedIOBase object in a BufferedReader object and add seeking support. The Buffered* wrapper objects can only wrap RawIOBase types, and they rely on the wrapped object to provide seeking support. You would have to emulate seeking at raw I/O level, see below.
You can still provide the same functionality at a higher level, but take into account that seeking on remote data is a lot more involved; this isn't a simple change a simple OS variable that represents a file position on disk operation. For larger remote file data, seeking without backing the whole file on disk locally could be as sophisticated as using HTTP range requests and local (in memory or on-disk) buffers to balance sound play-back performance and minimising local data storage. Doing this correctly for a wide range of use-cases can be a lot of effort, so is certainly not part of the Python standard library.
If your sound files are small
If your HTTP-sourced sound files are small enough (a few MB at most) then just read the whole response into an in-memory io.BytesIO() file object. I really do not think it is worth making this more complicated than that, because the moment you have enough data to make that worth pursuing your files are large enough to take up too much memory!
So this would be more than enough if your sound files are smaller (no more than a few MB):
from io import BytesIO
import urllib.error
import urllib.request
def open_url(url):
try:
http_response = urllib.request.urlopen(url)
print(f"streamHTTP() - Fetching URL [{http_response.geturl()}]")
print(f"streamHTTP() - Response Status [{http_response.status}] / [{http_response.reason}]")
except urllib.error.URLError:
print("streamHTTP() - Error Fetching URL [{url}]")
return
if http_response.status != 200:
print("streamHTTP() - Error Fetching URL [{url}]")
return
return BytesIO(http_response.read())
This doesn't require writing a wrapper object, and because BytesIO is a native implementation, once the data is fully copied over, access to the data is faster than any Python-code wrapper could ever give you.
Note that this returns a BytesIO file object, so you no longer need to test for the response status:
remote_music = open_url('http://127.0.0.1/example.wav')
if remote_music is not None:
pygame.mixer.music.load(remote_music)
pygame.mixer.music.play()
If they are more than a few MB
Once you go beyond a few megabytes, you could try pre-loading the data into a local file object. You can make this more sophisticated by using a thread to have shutil.copyfileobj() copy most of the data into that file in the background and give the file to PyGame after loading just an initial amount of data.
By using an actual file object, you can actually help performance here, as PyGame will try to minimize interjecting itself between the SDL mixer and the file data. If there is an actual file on disk with a file number (the OS-level identifier for a stream, something that the SDL mixer library can make use of), then PyGame will operate directly on that and so minimize blocking the GIL (which in turn will help the Python portions of your game perform better!). And if you pass in a filename (just a string), then PyGame gets out of the way entirely and leaves all file operations over to the SDL library.
Here's such an implementation; this should, on normal Python interpreter exit, clean up the downloaded files automatically. It returns a filename for PyGame to work on, and finalizing downloading the data is done in a thread after the initial few KB has been buffered. It will avoid loading the same URL more than once, and I've made it thread-safe:
import shutil
import urllib.error
import urllib.request
from tempfile import NamedTemporaryFile
from threading import Lock, Thread
INITIAL_BUFFER = 1024 * 8 # 8kb initial file read to start URL-backed files
_url_files_lock = Lock()
# stores open NamedTemporaryFile objects, keeping them 'alive'
# removing entries from here causes the file data to be deleted.
_url_files = {}
def open_url(url):
with _url_files_lock:
if url in _url_files:
return _url_files[url].name
try:
http_response = urllib.request.urlopen(url)
print(f"streamHTTP() - Fetching URL [{http_response.geturl()}]")
print(f"streamHTTP() - Response Status [{http_response.status}] / [{http_response.reason}]")
except urllib.error.URLError:
print("streamHTTP() - Error Fetching URL [{url}]")
return
if http_response.status != 200:
print("streamHTTP() - Error Fetching URL [{url}]")
return
fileobj = NamedTemporaryFile()
content_length = http_response.getheader("Content-Length")
if content_length is not None:
try:
content_length = int(content_length)
except ValueError:
content_length = None
if content_length:
# create sparse file of full length
fileobj.seek(content_length - 1)
fileobj.write(b"\0")
fileobj.seek(0)
fileobj.write(http_response.read(INITIAL_BUFFER))
with _url_files_lock:
if url in _url_files:
# another thread raced us to this point, we lost, return their
# result after cleaning up here
fileobj.close()
http_response.close()
return _url_files[url].name
# store the file object for this URL; this keeps the file
# open and so readable if you have the filename.
_url_files[url] = fileobj
def copy_response_remainder():
# copies file data from response to disk, for all data past INITIAL_BUFFER
with http_response:
shutil.copyfileobj(http_response, fileobj)
t = Thread(daemon=True, target=copy_response_remainder)
t.start()
return fileobj.name
Like the BytesIO() solution, the above returns either None or a value ready for passing to pass to pygame.mixer.music.load().
The above will probably not work if you try to immediately set an advanced playing position in your sound files, as later data may not yet have been copied into the file. It's a trade-off.
Seeking and finding third party libraries
If you need to have full seeking support on remote URLs and don't want to use on-disk space for them and don't want to have to worry about their size, you don't need to re-invent the HTTP-as-seekable-file wheel here. You could use an existing project that offers the same functionality. I found two that offer io.BufferedIOBase-based implementations:
smart_open
httpio
Both use HTTP Range requests to implement seeking support. Just use httpio.open(URL) or smart_open.open(URL) and pass that directly to pygame.mixer.music.load(); if the URL can't be opened, you can catch that by handling the IOError exception:
from smart_open import open as url_open # or from httpio import open
try:
remote_music = url_open('http://127.0.0.1/example.wav')
except IOError:
pass
else:
pygame.mixer.music.load(remote_music)
pygame.mixer.music.play()
smart_open uses an in-memory buffer to satisfy reads of a fixed size, but creates a new HTTP Range request for every call to seek that changes the current file position, so performance may vary. Since the SDL mixer executes a few seeks on audio files to determine their type, I expect this to be a little slower.
httpio can buffer blocks of data and so might handle seeks better, but from a brief glance at the source code, when actually setting a buffer size the cached blocks are never evicted from memory again so you'd end up with the whole file in memory, eventually.
Implementing seeking ourselves, via io.RawIOBase
And finally, because I'm not able to find efficient HTTP-Range-backed I/O implementations, I wrote my own. The following implements the io.RawIOBase interface, specifically so you can then wrap the object in a io.BufferedIOReader() and so delegate caching to a caching buffer that will be managed correctly when seeking:
import io
from copy import deepcopy
from functools import wraps
from typing import cast, overload, Callable, Optional, Tuple, TypeVar, Union
from urllib.request import urlopen, Request
T = TypeVar("T")
#overload
def _check_closed(_f: T) -> T: ...
#overload
def _check_closed(*, connect: bool, default: Union[bytes, int]) -> Callable[[T], T]: ...
def _check_closed(
_f: Optional[T] = None,
*,
connect: bool = False,
default: Optional[Union[bytes, int]] = None,
) -> Union[T, Callable[[T], T]]:
def decorator(f: T) -> T:
#wraps(cast(Callable, f))
def wrapper(self, *args, **kwargs):
if self.closed:
raise ValueError("I/O operation on closed file.")
if connect and self._fp is None or self._fp.closed:
self._connect()
if self._fp is None:
# outside the seekable range, exit early
return default
try:
return f(self, *args, **kwargs)
except Exception:
self.close()
raise
finally:
if self._range_end and self._pos >= self._range_end:
self._fp.close()
del self._fp
return cast(T, wrapper)
if _f is not None:
return decorator(_f)
return decorator
def _parse_content_range(
content_range: str
) -> Tuple[Optional[int], Optional[int], Optional[int]]:
"""Parse a Content-Range header into a (start, end, length) tuple"""
units, *range_spec = content_range.split(None, 1)
if units != "bytes" or not range_spec:
return (None, None, None)
start_end, _, size = range_spec[0].partition("/")
try:
length: Optional[int] = int(size)
except ValueError:
length = None
start_val, has_start_end, end_val = start_end.partition("-")
start = end = None
if has_start_end:
try:
start, end = int(start_val), int(end_val)
except ValueError:
pass
return (start, end, length)
class HTTPRawIO(io.RawIOBase):
"""Wrap a HTTP socket to handle seeking via HTTP Range"""
url: str
closed: bool = False
_pos: int = 0
_size: Optional[int] = None
_range_end: Optional[int] = None
_fp: Optional[io.RawIOBase] = None
def __init__(self, url_or_request: Union[Request, str]) -> None:
if isinstance(url_or_request, str):
self._request = Request(url_or_request)
else:
# copy request objects to avoid sharing state
self._request = deepcopy(url_or_request)
self.url = self._request.full_url
self._connect(initial=True)
def readable(self) -> bool:
return True
def seekable(self) -> bool:
return True
def close(self) -> None:
if self.closed:
return
if self._fp:
self._fp.close()
del self._fp
self.closed = True
#_check_closed
def tell(self) -> int:
return self._pos
def _connect(self, initial: bool = False) -> None:
if self._fp is not None:
self._fp.close()
if self._size is not None and self._pos >= self._size:
# can't read past the end
return
request = self._request
request.add_unredirected_header("Range", f"bytes={self._pos}-")
response = urlopen(request)
self.url = response.geturl() # could have been redirected
if response.status not in (200, 206):
raise OSError(
f"Failed to open {self.url}: "
f"{response.status} ({response.reason})"
)
if initial:
# verify that the server supports range requests. Capture the
# content length if available
if response.getheader("Accept-Ranges") != "bytes":
raise OSError(
f"Resource doesn't support range requests: {self.url}"
)
try:
length = int(response.getheader("Content-Length", ""))
if length >= 0:
self._size = length
except ValueError:
pass
# validate the range we are being served
start, end, length = _parse_content_range(
response.getheader("Content-Range", "")
)
if self._size is None:
self._size = length
if (start is not None and start != self._pos) or (
length is not None and length != self._size
):
# non-sensical range response
raise OSError(
f"Resource at {self.url} served invalid range: pos is "
f"{self._pos}, range {start}-{end}/{length}"
)
if self._size and end is not None and end + 1 < self._size:
# incomplete range, not reaching all the way to the end
self._range_end = end
else:
self._range_end = None
fp = cast(io.BufferedIOBase, response.fp) # typeshed doesn't name fp
self._fp = fp.detach() # assume responsibility for the raw socket IO
#_check_closed
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int:
relative_to = {
io.SEEK_SET: 0,
io.SEEK_CUR: self._pos,
io.SEEK_END: self._size,
}.get(whence)
if relative_to is None:
if whence == io.SEEK_END:
raise IOError(
f"Can't seek from end on unsized resource {self.url}"
)
raise ValueError(f"whence value {whence} unsupported")
if -offset > relative_to: # can't seek to a point before the start
raise OSError(22, "Invalid argument")
self._pos = relative_to + offset
# there is no point in optimising an existing connection
# by reading from it if seeking forward below some threshold.
# Use a BufferedIOReader to avoid seeking by small amounts or by 0
if self._fp:
self._fp.close()
del self._fp
return self._pos
# all read* methods delegate to the SocketIO object (itself a RawIO
# implementation).
#_check_closed(connect=True, default=b"")
def read(self, size: int = -1) -> Optional[bytes]:
assert self._fp is not None # show type checkers we already checked
res = self._fp.read(size)
if res is not None:
self._pos += len(res)
return res
#_check_closed(connect=True, default=b"")
def readall(self) -> bytes:
assert self._fp is not None # show type checkers we already checked
res = self._fp.readall()
self._pos += len(res)
return res
#_check_closed(connect=True, default=0)
def readinto(self, buffer: bytearray) -> Optional[int]:
assert self._fp is not None # show type checkers we already checked
n = self._fp.readinto(buffer)
self._pos += n or 0
return n
Remember that this is a RawIOBase object, which you really want to wrap in a BufferReader(). Doing so in open_url() looks like this:
def open_url(url, *args, **kwargs):
return io.BufferedReader(HTTPRawIO(url), *args, **kwargs)
This gives you fully buffered I/O, with full support seeking, over a remote URL, and the BufferedReader implementation will minimise resetting the HTTP connection when seeking. I've found that using this with the PyGame mixer, only single HTTP connection is made, as all the test seeks are within the default 8KB buffer.
If your fine with using the requests module (which supports streaming) instead of urllib, you could use a wrapper like this:
class ResponseStream(object):
def __init__(self, request_iterator):
self._bytes = BytesIO()
self._iterator = request_iterator
def _load_all(self):
self._bytes.seek(0, SEEK_END)
for chunk in self._iterator:
self._bytes.write(chunk)
def _load_until(self, goal_position):
current_position = self._bytes.seek(0, SEEK_END)
while current_position < goal_position:
try:
current_position = self._bytes.write(next(self._iterator))
except StopIteration:
break
def tell(self):
return self._bytes.tell()
def read(self, size=None):
left_off_at = self._bytes.tell()
if size is None:
self._load_all()
else:
goal_position = left_off_at + size
self._load_until(goal_position)
self._bytes.seek(left_off_at)
return self._bytes.read(size)
def seek(self, position, whence=SEEK_SET):
if whence == SEEK_END:
self._load_all()
else:
self._bytes.seek(position, whence)
Then I guess you can do something like this:
WINDOW_WIDTH = 400
WINDOW_HEIGHT = 400
SKY_BLUE = (161, 255, 254)
URL = 'http://localhost:8000/example.wav'
pygame.init()
window = pygame.display.set_mode( ( WINDOW_WIDTH, WINDOW_HEIGHT ) )
pygame.display.set_caption("Music Streamer")
clock = pygame.time.Clock()
done = False
font = pygame.font.SysFont(None, 32)
state = 0
def play_music():
response = requests.get(URL, stream=True)
if (response.status_code == 200):
stream = ResponseStream(response.iter_content(64))
pygame.mixer.music.load(stream)
pygame.mixer.music.play()
else:
state = 0
while not done:
for event in pygame.event.get():
if ( event.type == pygame.QUIT ):
done = True
if event.type == pygame.KEYDOWN and state == 0:
Thread(target=play_music).start()
state = 1
window.fill( SKY_BLUE )
window.blit(font.render(str(pygame.time.get_ticks()), True, (0,0,0)), (32, 32))
pygame.display.flip()
clock.tick_busy_loop( 60 )
pygame.quit()
using a Thread to start streaming.
I'm not sure this works 100%, but give it a try.

How could I create my own custom .zip format in python34?

I want to make my own zip file similar to this one but in python 3.4
If you are interested in creating your own ZIP file, you might be interested in checking out the following two files. The first provides a GUI interface for compressing and decompressing directories, and the second has classes that use a custom serialization format. Use this idea to create your own system.
Archive3.py
#! /usr/bin/env python3
"""Provide an efficient alternative to zipping directories into archives.
This simple application is designed to serialize and deserialize directories
and their files into BZ2 compressed files. No active progress is shown, but
the window reappears once the requested operations have been completed."""
# Import the compression library along with tools to create a GUI on screen.
import bz2
import pathlib
import tkinter.filedialog
import tkinter.messagebox
import tkinter.ttk
# Import the custom serialization module to handle directories.
import daf_stream
# Include supplemental information along with a public API definition.
__author__ = 'Stephen "Zero" Chappell <Noctis.Skytower#gmail.com>'
__date__ = '21 February 2017'
__version__ = 3, 0, 0
__all__ = 'Application', 'find_parent_of_type'
class Application(tkinter.ttk.Frame):
"""Application(master=None, **kw) -> Application instance"""
# Define some of the options used when dealing with directories and files.
DIRECTORY_OPTIONS = dict(
initialdir=pathlib.Path.home(),
mustexist=True
)
FILE_OPTIONS = dict(
defaultextension='.caa',
filetypes=['Archive .caa'],
initialdir=pathlib.Path.home()
)
#classmethod
def main(cls):
"""Create a root window and display an Application instance in it."""
tkinter.NoDefaultRoot()
root = tkinter.Tk()
root.title('Archive 3')
root.resizable(False, False)
frame = cls(root)
frame.grid(sticky=tkinter.NSEW)
root.mainloop()
def __init__(self, master=None, **kw):
"""Initialize the Application's instance attributes."""
super().__init__(master, **kw)
self.label = self.compress_button = self.decompress_button = None
self.create_widgets()
self.configure_widgets()
def create_widgets(self):
"""Build the controls the application will shown on screen."""
self.label = tkinter.ttk.Label(
self,
text='''\
WARNING:
This program is not backward-compatible and
cannot be used with the Archive 2.0 program.''',
justify=tkinter.CENTER
)
self.compress_button = tkinter.ttk.Button(
self,
text='Compress Directory to File',
command=lambda: self.wrapper(self.compress_directory)
)
self.decompress_button = tkinter.ttk.Button(
self,
text='Decompress File to Directory',
command=lambda: self.wrapper(self.decompress_file)
)
def configure_widgets(self):
"""Set up the controls so they show up in their frame."""
options = dict(padx=5, pady=5, sticky=tkinter.NSEW)
self.label.grid(row=0, column=0, **options)
self.compress_button.grid(row=1, column=0, **options)
self.decompress_button.grid(row=2, column=0, **options)
def wrapper(self, method):
"""Handle the root window, execute the method, and show any errors."""
root = find_parent_of_type(self, tkinter.Tk)
root.withdraw()
try:
method()
except Exception as error:
tkinter.messagebox.showerror(
'Exception',
f'{type(error).__name__}: {error}',
master=self
)
root.deiconify()
def compress_directory(self):
"""Pick a source and serialize it to the destination."""
source = tkinter.filedialog.askdirectory(
parent=self,
title='Where is the directory you want to archive?',
**self.DIRECTORY_OPTIONS
)
if source:
destination = tkinter.filedialog.asksaveasfilename(
confirmoverwrite=True,
parent=self,
title='Where should the compressed file be saved?',
**self.FILE_OPTIONS
)
if destination:
with bz2.open(destination, 'wb') as destination:
daf_stream.Serializer(destination).run(source)
def decompress_file(self):
"""Pick a source and deserialize it to the destination."""
source = tkinter.filedialog.askopenfilename(
multiple=False,
parent=self,
title='Where is the file you want to decompress?',
**self.FILE_OPTIONS
)
if source:
destination = tkinter.filedialog.askdirectory(
parent=self,
title='Where should the data archive be loaded?',
**self.DIRECTORY_OPTIONS
)
if destination:
with bz2.open(source, 'rb') as source:
daf_stream.Deserializer(source).run(destination)
def find_parent_of_type(widget, desired_type):
"""Retrieve the control's parent that is of the desired type."""
while True:
widget = widget.master
if widget is None:
raise AttributeError('cannot find parent of desired type')
if isinstance(widget, desired_type):
return widget
if __name__ == '__main__':
Application.main()
daf_stream.py
#! /usr/bin/env python3
"""Provide a simple directory and file serialization protocol.
This module implements two classes that can handle the DFS (Directory &
File Serialization) file format. Both classes can deal with file-like
objects and stream directories and files to and from the file system."""
# Import other modules needed for this module to work properly.
import abc
import collections
import enum
import io
import pathlib
# Include supplemental information along with a public API definition.
__author__ = 'Stephen "Zero" Chappell <Noctis.Skytower#gmail.com>'
__date__ = '9 February 2017'
__version__ = 3, 0, 0
__all__ = 'Serializer', 'Deserializer'
# The organization of the serialized data is fairly simple as shown below.
SERIALIZATION_FORMAT = '''\
Directory
Header
0,aaa,b,c,dd (Bit Mapping)
0 = Directory
a = Pointer Length
b = Name Size Length
c = Content Flag
d = Type Code
00 = Separator
01 = Reserved
10 = Reserved
11 = Genuine
Pointer to Parent
Name Size
Name
---------------------------------
File
Header
1,aaa,b,ccc (Bit Mapping)
1 = File
a = Pointer Length
b = Name Size Length
c = Data Size Length
Pointer to Parent
Name Size
Name
Data Size
Data
'''
#enum.unique
class _RecordType(enum.IntEnum):
"""Enumeration of the different types a record may represent."""
DIRECTORY = 0b0
FILE = 0b1
#enum.unique
class _DirectoryTypeCode(enum.IntEnum):
"""Enumeration of codes directories may specify for their type."""
SEPARATOR = 0b00
RESERVED_A = 0b01
RESERVED_B = 0b10
GENUINE = 0b11
# Define the necessary components used to describe a bit field.
_BitField = collections.namedtuple('_BitField', 'offset, width')
class _Common(abc.ABC):
"""Abstract class for supporting Serializer and Deserializer classes."""
# Define a few static attributes for use in derived classes.
BUFFER_SIZE = 1 << 20
BYTE_WIDTH = 8
BYTE_MASK = (1 << BYTE_WIDTH) - 1
NAME_ENCODING = 'utf_8' # Set to 'mbcs' for Archive 2.0 compatibility.
NULL_BYTE = b'\0'
# Define the bit fields used in header bytes.
RECORD_TYPE = _BitField(7, 1)
POINTER_LENGTH = _BitField(4, 3)
NAME_SIZE_LENGTH = _BitField(3, 1)
CONTENT_FLAG = _BitField(2, 1)
DIRECTORY_TYPE_CODE = _BitField(0, 2)
FILE_DATA_SIZE_LENGTH = _BitField(0, 3)
#abc.abstractmethod
def __init__(self, stream):
"""Initialize the _Common instance's attributes."""
self._stream = stream
self._header = None
#classmethod
def _int_to_bytes(cls, integer):
"""Convert a number into a byte string of variable length."""
if integer:
array = bytearray()
while integer:
array.insert(0, integer & cls.BYTE_MASK)
integer >>= cls.BYTE_WIDTH
return bytes(array)
return cls.NULL_BYTE
#classmethod
def _bytes_to_int(cls, array):
"""Convert a byte string of variable length into a number."""
integer = 0
for byte in array:
integer <<= cls.BYTE_WIDTH
integer |= byte
return integer
#staticmethod
def _write(file, buffer):
"""Write buffer to file until it is completely written."""
while True:
written = file.write(buffer)
if written is None:
raise IOError('nothing could be written to the file')
if written == len(buffer):
break
buffer = buffer[written:]
class Serializer(_Common):
"""Serializer(destination) -> Serializer instance"""
def __init__(self, destination):
"""Initialize the Serializer instance's attributes."""
super().__init__(destination)
self._started = False
self._pointer = None
def run(self, source, keep_zombies=True):
"""Dump the source file or directory contents onto the destination."""
path = pathlib.Path(source).resolve()
zombies = []
if path.is_dir():
self._prime_run()
self._acquire_dir(path, self.NULL_BYTE, keep_zombies, zombies)
elif path.is_file():
self._prime_run()
self._acquire_file(path, self.NULL_BYTE, keep_zombies, zombies)
else:
raise ValueError('source must be a dir or a file')
return zombies
def _prime_run(self):
"""Reset some attributes before a serialization run."""
self._pointer = 0
if self._started:
self._write(self._stream, self.NULL_BYTE)
else:
self._started = True
def _acquire_dir(self, source, parent, keep_zombies, zombies):
"""Serialize a directory."""
try:
paths = tuple(source.iterdir())
except OSError:
zombies.append(source)
if not keep_zombies:
return
paths = ()
self._write_complete_dir_header(source, parent, bool(paths))
if paths:
self._pointer += 1
parent = self._int_to_bytes(self._pointer)
for path in paths:
if path.is_dir():
self._acquire_dir(path, parent, keep_zombies, zombies)
elif path.is_file():
self._acquire_file(path, parent, keep_zombies, zombies)
def _write_complete_dir_header(self, source, parent, content):
"""Record all directory information except its contents."""
name = source.name.encode(self.NAME_ENCODING)
name_size = self._int_to_bytes(len(name))
self._write_dir_header_byte(parent, name_size, content)
self._write(self._stream, parent)
self._write(self._stream, name_size)
self._write(self._stream, name)
def _write_dir_header_byte(self, pointer, name_size, content):
"""Record the directory header byte using the correct format."""
self._header = 0
self._set_bits(_RecordType.DIRECTORY, self.RECORD_TYPE)
self._set_bits(len(pointer) - 1, self.POINTER_LENGTH)
self._set_bits(len(name_size) - 1, self.NAME_SIZE_LENGTH)
self._set_bits(content, self.CONTENT_FLAG)
self._set_bits(_DirectoryTypeCode.GENUINE, self.DIRECTORY_TYPE_CODE)
self._write(self._stream, bytes([self._header]))
def _set_bits(self, integer, bit_field):
"""Help build the header byte while checking certain arguments."""
if not 0 <= integer < 1 << bit_field.width:
raise ValueError('integer does not fit in width numbers of bits')
self._header |= integer << bit_field.offset
def _acquire_file(self, source, parent, keep_zombies, zombies):
"""Serialize a file."""
restore_point = self._stream.tell()
try:
with source.open('rb') as file:
file_length = file.seek(0, io.SEEK_END)
self._write_complete_file_header(source, parent, file_length)
future_data = file.seek(0, io.SEEK_END)
if future_data != file_length:
raise OSError('source changed size after writing header')
file.seek(0, io.SEEK_SET)
while future_data:
buffer = file.read(min(future_data, self.BUFFER_SIZE))
if not buffer:
raise OSError('source file ended with remaining data')
self._write(self._stream, buffer)
future_data -= len(buffer)
if file.seek(0, io.SEEK_END) != file_length:
raise OSError('file changed size during serialization')
except OSError:
self._stream.seek(restore_point, io.SEEK_SET)
self._stream.truncate()
zombies.append(source)
if keep_zombies:
self._write_complete_file_header(source, parent, 0)
def _write_complete_file_header(self, source, parent, file_length):
"""Record all file information except its data."""
name = source.name.encode(self.NAME_ENCODING)
name_size = self._int_to_bytes(len(name))
data_size = self._int_to_bytes(file_length)
self._write_file_header_byte(parent, name_size, data_size)
self._write(self._stream, parent)
self._write(self._stream, name_size)
self._write(self._stream, name)
self._write(self._stream, data_size)
def _write_file_header_byte(self, pointer, name_size, data_size):
"""Record the file header byte using the correct format."""
self._header = 0
self._set_bits(_RecordType.FILE, self.RECORD_TYPE)
self._set_bits(len(pointer) - 1, self.POINTER_LENGTH)
self._set_bits(len(name_size) - 1, self.NAME_SIZE_LENGTH)
self._set_bits(len(data_size) - 1, self.FILE_DATA_SIZE_LENGTH)
self._write(self._stream, bytes([self._header]))
class Deserializer(_Common):
"""Deserializer(source) -> Deserializer instance"""
def __init__(self, source):
"""Initialize the Deserializer instance's attributes."""
super().__init__(source)
self._finished = False
self._parents = None
#property
def finished(self):
"""Check if the object has reached the end of the file yet."""
return self._finished
def run(self, destination):
"""Load the source file-like object onto the destination directory."""
if self._finished:
raise EOFError('end of file was found')
self._parents = [pathlib.Path(destination).resolve()]
starting_run = True
while True:
byte = self._stream.read(1)
if not byte:
self._finished = True
if starting_run:
raise IOError('unexpected file termination detected')
break
self._header = byte[0]
if self._get_bits(self.RECORD_TYPE) == _RecordType.FILE:
self._release_file()
else:
type_code = self._get_bits(self.DIRECTORY_TYPE_CODE)
if type_code == _DirectoryTypeCode.GENUINE:
self._release_dir()
elif type_code == _DirectoryTypeCode.SEPARATOR:
if starting_run:
raise IOError('empty record detected')
break
else:
raise IOError('reserved directory type code detected')
starting_run = False
def _get_bits(self, bit_field):
"""Extract width number of bits from header starting at offset."""
return self._header >> bit_field.offset & (1 << bit_field.width) - 1
def _release_dir(self):
"""Deserialize a directory."""
pointer_length = self._get_bits(self.POINTER_LENGTH) + 1
name_size_length = self._get_bits(self.NAME_SIZE_LENGTH) + 1
content_flag = bool(self._get_bits(self.CONTENT_FLAG))
# After decoding the header byte, read and process the remaining data.
pointer = self._bytes_to_int(self._read(pointer_length))
name_size = self._bytes_to_int(self._read(name_size_length))
name = self._read(name_size).decode(self.NAME_ENCODING)
path = self._parents[pointer] / name
path.mkdir()
if content_flag:
self._parents.append(path)
def _release_file(self):
"""Deserialize a file."""
pointer_length = self._get_bits(self.POINTER_LENGTH) + 1
name_size_length = self._get_bits(self.NAME_SIZE_LENGTH) + 1
data_size_length = self._get_bits(self.FILE_DATA_SIZE_LENGTH) + 1
# After decoding the header byte, read and process the remaining data.
pointer = self._bytes_to_int(self._read(pointer_length))
name_size = self._bytes_to_int(self._read(name_size_length))
name = self._read(name_size).decode(self.NAME_ENCODING)
with (self._parents[pointer] / name).open('wb') as destination:
future_data = self._bytes_to_int(self._read(data_size_length))
while future_data:
buffer = self._stream.read(min(future_data, self.BUFFER_SIZE))
if not buffer:
raise IOError('end of file was found')
self._write(destination, buffer)
future_data -= len(buffer)
def _read(self, future_data):
"""Read at least as many bytes from the source as requested."""
if future_data:
buffer = bytearray()
while future_data:
data = self._stream.read(future_data)
if not data:
raise IOError('end of file was found')
buffer.extend(data)
future_data -= len(data)
return buffer
raise IOError('request for zero bytes found')

endless container iterator with backward\forward movement support

Is in standart library container with endless forward/backward movement support, like itertools.cycle? Or how to implement one-liner for it?
Current code (github):
def __init__(self, ...):
self.__weapons = [Weapon()("Blaster"), Weapon()("Laser"), Weapon()("UM")]
self.__weapon = self.__weapons[0]
...
def next_weapon(self):
ind = self.__weapons.index(self.__weapon)
if ind < len(self.__weapons) - 1:
self.__weapon = self.__weapons[ind+1]
else:
self.__weapon = self.__weapons[0]
And almost the same code for prev_weapon method.
I want to iterate on endless container in both directions=)
Thanks in advance,
Paul
I decided that best solution is to extend List.
class InfList(list):
"""Infinite list container"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._index = 0
def current(self):
return self[self._index]
def next(self):
self._index = (self._index + 1) % len(self)
return self[self._index]
def prev(self):
self._index = (self._index - 1) % len(self)
return self[self._index]

Resources