Python: stream a tarfile to S3 using multipart upload - python-3.x

I would like to create a .tar file in an S3 bucket from Python code running in an AWS Lambda function. Lambda functions are very memory- and disk- constrained. I want to create a .tar file that contains multiple files that are too large to fit in the Lambda function's memory or disk space.
Using "S3 multipart upload," it is possible to upload a large file by uploading chunks of 5MB or more in size. I have this figured out and working. What I need to figure out is how to manage a buffer of bytes in memory that won't grow past the limits of the Lambda function's runtime environment.
I think the solution is to create an io.BytesIO() object and manage both a read pointer and a write pointer. I can then write into the buffer (from files that I want to add to the .tar file) and every time the buffer exceeds some limit (like 5MB) I can read off a chunk of data and send another file part to S3.
What I haven't quite wrapped my head around is how to truncate the part of the buffer that has been read and is no longer needed in memory. I need to trim the head of the buffer, not the tail, so the truncate() function of BytesIO won't work for me.
Is the 'correct' solution to create a new BytesIO buffer, populating it with the contents of the existing buffer from the read pointer to the end of the buffer, when I truncate? Is there a better way to truncate the head of the BytesIO buffer? Is there a better solution than using BytesIO?

For the random Google-r who stumbles onto this question six years in the future and thinks, "man, that describes my problem exactly!", here's what I came up with:
import io
import struct
from tarfile import BLOCKSIZE
#This class was designed to write a .tar file to S3 using multipart upload
#in a memory- and disk constrained environment, such as AWS Lambda Functions.
#
#Much of this code is copied or adapted from the Python source code tarfile.py
#file at https://github.com/python/cpython/blob/3.10/Lib/tarfile.py
#
#No warranties expressed or implied. Your mileage may vary. Lather, rinse, repeat
class StreamingTarFileWriter:
#Various constants from tarfile.py that we need
GNU_FORMAT = 1
NUL = b"\0"
BLOCKSIZE = 512
RECORDSIZE = BLOCKSIZE * 20
class MemoryByteStream:
def __init__(self, bufferFullCallback = None, bufferFullByteCount = 0):
self.buf = io.BytesIO()
self.readPointer = 0
self.writePointer = 0
self.bufferFullCallback = bufferFullCallback
self.bufferFullByteCount = bufferFullByteCount
def write(self, buf: bytes):
self.buf.seek(self.writePointer)
self.writePointer += self.buf.write(buf)
bytesAvailableToRead = self.writePointer - self.readPointer
if self.bufferFullByteCount > 0 and bytesAvailableToRead > self.bufferFullByteCount:
if self.bufferFullCallback:
self.bufferFullCallback(self, bytesAvailableToRead)
def read(self, byteCount = None):
self.buf.seek(self.readPointer)
if byteCount:
chunk = self.buf.read(byteCount)
else:
chunk = self.buf.read()
self.readPointer += len(chunk)
self._truncate()
return chunk
def size(self):
return self.writePointer - self.readPointer
def _truncate(self):
self.buf.seek(self.readPointer)
self.buf = io.BytesIO(self.buf.read())
self.readPointer = 0
self.writePointer = self.buf.seek(0, 2)
def stn(self, s, length, encoding, errors):
#Convert a string to a null-terminated bytes object.
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * self.NUL
def itn(self, n, digits=8, format=GNU_FORMAT):
#Convert a python number to a number field.
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
original_n = n
n = int(n)
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, n), "ascii") + self.NUL
elif format == self.GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(self, buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def __init__(self, bufferFullCallback = None, bufferFullByteCount = 0):
self.buf = self.MemoryByteStream(bufferFullCallback, bufferFullByteCount)
self.expectedFileSize = 0
self.fileBytesWritten = 0
self.offset = 0
pass
def addFileRecord(self, filename, filesize):
REGTYPE = b"0" # regular file
encoding = "utf-8"
LENGTH_NAME = 100
GNU_MAGIC = b"ustar \0" # magic gnu tar string
errors="surrogateescape"
#Copied from TarInfo.tobuf()
tarinfo = {
"name": filename,
"mode": 0o644,
"uid": 0,
"gid": 0,
"size": filesize,
"mtime": 0,
"chksum": 0,
"type": REGTYPE,
"linkname": "",
"uname": "",
"gname": "",
"devmajor": 0,
"devminor": 0,
"magic": GNU_MAGIC
}
buf = b""
if len(tarinfo["name"].encode(encoding, errors)) > LENGTH_NAME:
raise Exception("Filename is too long for tar file header.")
devmajor = self.stn("", 8, encoding, errors)
devminor = self.stn("", 8, encoding, errors)
parts = [
self.stn(tarinfo.get("name", ""), 100, encoding, errors),
self.itn(tarinfo.get("mode", 0) & 0o7777, 8, self.GNU_FORMAT),
self.itn(tarinfo.get("uid", 0), 8, self.GNU_FORMAT),
self.itn(tarinfo.get("gid", 0), 8, self.GNU_FORMAT),
self.itn(tarinfo.get("size", 0), 12, self.GNU_FORMAT),
self.itn(tarinfo.get("mtime", 0), 12, self.GNU_FORMAT),
b" ", # checksum field
tarinfo.get("type", REGTYPE),
self.stn(tarinfo.get("linkname", ""), 100, encoding, errors),
tarinfo.get("magic", GNU_MAGIC),
self.stn(tarinfo.get("uname", ""), 32, encoding, errors),
self.stn(tarinfo.get("gname", ""), 32, encoding, errors),
devmajor,
devminor,
self.stn(tarinfo.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = self.calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
self.buf.write(buf)
self.expectedFileSize = filesize
self.fileBytesWritten = 0
self.offset += len(buf)
def addFileData(self, buf):
self.buf.write(buf)
self.fileBytesWritten += len(buf)
self.offset += len(buf)
pass
def completeFileRecord(self):
if self.fileBytesWritten != self.expectedFileSize:
raise Exception(f"Expected {self.expectedFileSize:,} bytes but {self.fileBytesWritten:,} were written.")
#write the end-of-file marker
blocks, remainder = divmod(self.fileBytesWritten, BLOCKSIZE)
if remainder > 0:
self.buf.write(self.NUL * (BLOCKSIZE - remainder))
self.offset += BLOCKSIZE - remainder
def completeTarFile(self):
self.buf.write(self.NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
blocks, remainder = divmod(self.offset, self.RECORDSIZE)
if remainder > 0:
self.buf.write(self.NUL * (self.RECORDSIZE - remainder))
An example use of the class is:
OUTPUT_CHUNK_SIZE = 1024 * 1024 * 5
f_out = open("test.tar", "wb")
def get_file_block(blockNum):
block = f"block_{blockNum:010,}"
block += "0123456789abcdef" * 31
return bytes(block, 'ascii')
def buffer_full_callback(x: StreamingTarFileWriter.MemoryByteStream, bytesAvailable: int):
while x.size() > OUTPUT_CHUNK_SIZE:
buf = x.read(OUTPUT_CHUNK_SIZE)
#This is where you would write the chunk to S3
f_out.write(buf)
x = StreamingTarFileWriter(buffer_full_callback, OUTPUT_CHUNK_SIZE)
import random
numFiles = random.randint(3,8)
print(f"Creating {numFiles:,} files.")
for fileIdx in range(numFiles):
minSize = 1025 #1kB plus 1 byte
maxSize = 10 * 1024 * 1024 * 1024 + 5 #10GB plus 5 bytes
numBytes = random.randint(minSize, maxSize)
print(f"Creating file {str(fileIdx)} with {numBytes:,} bytes.")
blocks,remainder = divmod(numBytes, 512)
x.addFileRecord(f"File{str(fileIdx)}", numBytes)
for block in range(blocks):
x.addFileData(get_file_block(block))
x.addFileData(bytes(("X" * remainder), 'ascii'))
x.completeFileRecord()

Related

Image compression in python

For my image Compression, I am using the pillow library to get every pixel in rgb (for ex: (100, 0, 200). Using the Huffman encoding I already convert to binary to reduce the number of bits. For now, I have to save the sequence of bits into a text or binary file. The compress files to be consistently smaller than original, but for now, my txt file is larger than the original. What should I do ?
And after that how can I read the file and decompress it. Here is an instruction:
Your code should read in an image file, compute how many bits are required for a fixed length encoding
and then apply a compression algorithm to create a smaller encoding – you need to implement the
compression, you cannot use a compression library. You should output how many bits are required to store the image in your compressed format as well as the compression ratio achieved. When it comes
to saving your compressed image, you won’t be able to save it as a standard image format, since you will
have created your own encoding, but you can save the sequence of bits into a text or binary file.
Your code should also be able to prompt the user for the filename of a text file containing a compressed
sequence of bits and then decompress that file into the original image – you can assume that the file
uses the same compression format as the last file you compressed. So, for example, if you compressed pacificat.bmp into a series of bits stored in pacificat.txt and then the user asked you to decompress alt_encode.txt, you could assume that alt_pacificat.txt used the same compression data structure as encode.txt (it might be a subset of the data from the original image, for example).
There are a number of libraries that can help you store formatted data into a file from Python. If you research the options and find a way to store your compression data structure into a file, such that the user can select both a bit file and a data structure file and use the data structure to decompress the bit file
just use my current image: flag2.bmp
here is my code
from PIL import Image
import sys, string
import copy
import time
codes = {}
def sortFreq (freqs) :
letters = freqs.keys()
tuples = []
for let in letters :
tuples.append((freqs[let],let))
tuples.sort()
return tuples
def buildTree(tuples) :
while len(tuples) > 1 :
leastTwo = tuple(tuples[0:2]) # get the 2 to combine
theRest = tuples[2:] # all the others
combFreq = leastTwo[0][0] + leastTwo[1][0] # the branch points freq
tuples = theRest + [(combFreq,leastTwo)] # add branch point to the end
tuples.sort() # sort it into place
return tuples[0] # Return the single tree inside the list
def trimTree (tree) :
# Trim the freq counters off, leaving just the letters
p = tree[1] # ignore freq count in [0]
if type(p) == type("") : return p # if just a leaf, return it
else : return (trimTree(p[0]), trimTree(p[1])) # trim left then right and recombine
def assignCodes(node, pat=''):
global codes
if type(node) == type("") :
codes[node] = pat # A leaf. set its code
else : #
assignCodes(node[0], pat+"0") # Branch point. Do the left branch
assignCodes(node[1], pat+"1") # then do the right branch.
start = time.time()
dictionary = {}
table = {}
image = Image.open('flag2.bmp')
#image.show()
width, height = image.size
px= image.load()
totalpixel = width*height
print("Total pixel: "+ str(totalpixel))
for x in range(width):
for y in range(height):
# print(px[x, y])
for i in range(3):
if dictionary.get(str(px[x, y][i])) is None:
dictionary[str(px[x, y][i])] = 1
else:
dictionary[str(px[x, y][i])] = dictionary[str(px[x, y][i])] +1
table = copy.deepcopy(dictionary)
def encode2 (str) :
global codes
output = ""
for ch in str : output += codes[ch]
return output
def decode (tree, str) :
output = ""
p = tree
for bit in str :
if bit == '0' : p = p[0] # Head up the left branch
else : p = p[1] # or up the right branch
if type(p) == type("") :
output += p # found a character. Add to output
p = tree # and restart for next character
return output
combination = len(dictionary)
for value in table:
table[value] = table[value] / (totalpixel * combination) * 100
print(table)
print(dictionary)
sortdic = sortFreq(dictionary)
tree = buildTree(sortdic)
print("tree")
print(tree)
trim = trimTree(tree)
print("trim")
print(trim)
print("assign 01")
assignCodes(trim)
print(codes)
empty_tuple = ()
f = open("answer.txt","w")
for x in range(width):
for y in range(height):
list = []
list.append(codes[str(px[x, y][0])])
list.append(codes[str(px[x, y][1])])
list.append(codes[str(px[x, y][2])])
print(str(px[x, y]) + ": " +str(list))
f.write(str(list))
print("decode test:", str(decode (trim, "1100")))
stop = time.time()
times = (stop - start) * 1000
print("Run time takes %d miliseconds" % times)
[flag2.bmp][1]
Code Cleanup
Let's try to refactor your code a little, taking advantage of algorithms provided by Python standard library, while keeping to the spirit of your approach to Huffman tree calculation and image encoding.
Calculating Symbol Counts
First of all, we can refactor the symbol counting into a function and rewrite it in more concise way:
Use Image.getdata() to iterate over all the pixels in the image
Since each pixel is represented by a tuple, use itertools.chain.from_iterable to get a flattened view of intensities.
Take advantage of collections.Counter to get the symbol (intensity counts)
Additionally, we can change it to return a list of (symbol, count), sorted in ascending order by (count, symbol). To do so, we can combine it with a rewritten version of your sortFreq(...) function, taking advantage of:
Python sorted(...) function (which allows us to define the key to sort by), together with
Tuple slicing to reverse the (symbol, count) tuples for sorting
Implementation:
from collections import Counter
from itertools import chain
def count_symbols(image):
pixels = image.getdata()
values = chain.from_iterable(pixels)
counts = Counter(values).items()
return sorted(counts, key=lambda x:x[::-1])
Building the Tree
Only a small change is needed here -- since we already have the symbol counts sorted, we just need to reverse the tuples to let your existing tree-building algorithm to work. We can use list comprehension together with tuple slicing to express this concisely.
Implementation:
def build_tree(counts) :
nodes = [entry[::-1] for entry in counts] # Reverse each (symbol,count) tuple
while len(nodes) > 1 :
leastTwo = tuple(nodes[0:2]) # get the 2 to combine
theRest = nodes[2:] # all the others
combFreq = leastTwo[0][0] + leastTwo[1][0] # the branch points freq
nodes = theRest + [(combFreq, leastTwo)] # add branch point to the end
nodes.sort() # sort it into place
return nodes[0] # Return the single tree inside the list
Trimming the Tree
Again, just two small changes from your original implementation:
Change the test to check for tuple (node), to be independent of how a symbol is represented.
Get rid of the unnecessary else
Implementation:
def trim_tree(tree) :
p = tree[1] # Ignore freq count in [0]
if type(p) is tuple: # Node, trim left then right and recombine
return (trim_tree(p[0]), trim_tree(p[1]))
return p # Leaf, just return it
Assigning Codes
The most important change here is to eliminate the reliance on a global codes variable. To resolve it, we can split the implementation into two functions, one which handles the recursive code assignment, and a wrapper which creates a new local codes dictionary, dispatches the recursive function on it, and returns the output.
Let's also switch the representation of codes from strings to lists of bits (integers in range [0,1]) -- the usefulness of this will be apparent later.
Once more, we'll change the test to check for tuples (for same reason as when trimming).
Implementation:
def assign_codes_impl(codes, node, pat):
if type(node) == tuple:
assign_codes_impl(codes, node[0], pat + [0]) # Branch point. Do the left branch
assign_codes_impl(codes, node[1], pat + [1]) # then do the right branch.
else:
codes[node] = pat # A leaf. set its code
def assign_codes(tree):
codes = {}
assign_codes_impl(codes, tree, [])
return codes
Encoding
Let's make a small detour, and talk about encoding of the data.
First of all, let's observe that a raw RGB pixel is represented by 3 bytes (one for each colour channel. That's 24 bits per pixel, and forms our baseline.
Now, your current algorithm encodes the first pixel as the following ASCII string:
['000', '0010', '0011']
That's 23 bytes in total (or 184 bits). That's much, much worse than raw. Let's examine why:
There are two spaces, which just make it more readable to a human. Those carry no information. (2 bytes)
Each of the three codes is delimited by two apostrophes. Since the codes only consist of 0s and 1s, the apostrophes are unnecessary for parsing, and thus also carry no information. (6 bytes)
Each of the codes is a prefix code, therefore they can be parsed unambiguously, and thus the two commas used for code separation are also unnecessary. (2 bytes)
We know there are three codes per pixel, so we don't need the braces ([,]) to delimit pixels either (for same reason as above). (2 bytes)
In total, that's 12 bytes per pixel that carry no information at all. The remaining 11 bytes (in this particular case) do carry some information... but how much?
Notice that the only two possible symbols in the output alphabet are 0 and 1. That means that each symbol carries 1 bit of information. Since you store each symbol as ASCII character (a byte), you use 8 bits for each 1 bit of information.
Put together, in this particular case, you used 184 bits to represent 11 bits of information -- ~16.7x more than necessary, and ~7.67x worse than just storing the pixels in raw format.
Obviously, using a naive text representation of the encoded data will not yield any compression. We will need a better approach.
Bitstreams
From our earlier analysis, it becomes evident that in order to perform compression (and decompression) effectively, we need to be able to treat our output (or input) as a stream of individual bits. The standard Python libraries do not provide a direct solution to do this -- at the lowest granularity, we can only read or write a file one byte at a time.
Since we want to encode values that may consist of multiple bits, it's essential to decode on how they shall be ordered based on significance. Let's order them from the most significant to the least significant.
Bit I/O Utilities
As mentioned earlier, we shall represent a sequence of bits as a list of integers in range [0,1]. Let's start by writing some simple utility functions:
A function that converts an integer into the shortest sequence of bits that uniquely represents it (i.e. at least 1 bit, but otherwise no leading zeros).
A function that converts a sequence of bits into an integer.
A function that zero-extends (adds zeros to most significant positions) a sequence of bits (to allow fixed-length encoding).
Implementation:
def to_binary_list(n):
"""Convert integer into a list of bits"""
return [n] if (n <= 1) else to_binary_list(n >> 1) + [n & 1]
def from_binary_list(bits):
"""Convert list of bits into an integer"""
result = 0
for bit in bits:
result = (result << 1) | bit
return result
def pad_bits(bits, n):
"""Prefix list of bits with enough zeros to reach n digits"""
assert(n >= len(bits))
return ([0] * (n - len(bits)) + bits)
Example Usage:
>>> to_binary_list(14)
[1, 1, 1, 0]
>>> from_binary_list([1,1,1,0])
14
>>> pad_bits(to_binary_list(14),8)
[0, 0, 0, 0, 1, 1, 1, 0]
Output Bitstream
Since the file I/O API allows us to save only whole bytes, we need to create a wrapper class that will buffer the bits written into a stream in memory.
Let's provide means to write a single bit, as well as a sequence of bits.
Each write command (of 1 or more bits) will first add the bits into the buffer. Once the buffer contains more than 8 bits, groups of 8 bits are removed from the front, converted to an integer in range [0-255] and saved to the output file. This is done until the buffer contains less than 8 bits.
Finally, let's provide a way to "flush" the stream -- when the buffer is non-empty, but doesn't contain enough bits to make a whole byte, add zeros to the least significant position until there are 8 bits, and then write the byte. We need this when we're closing the bitstream (and there are some other benefits that we'll see later).
Implementation:
class OutputBitStream(object):
def __init__(self, file_name):
self.file_name = file_name
self.file = open(self.file_name, 'wb')
self.bytes_written = 0
self.buffer = []
def write_bit(self, value):
self.write_bits([value])
def write_bits(self, values):
self.buffer += values
while len(self.buffer) >= 8:
self._save_byte()
def flush(self):
if len(self.buffer) > 0: # Add trailing zeros to complete a byte and write it
self.buffer += [0] * (8 - len(self.buffer))
self._save_byte()
assert(len(self.buffer) == 0)
def _save_byte(self):
bits = self.buffer[:8]
self.buffer[:] = self.buffer[8:]
byte_value = from_binary_list(bits)
self.file.write(bytes([byte_value]))
self.bytes_written += 1
def close(self):
self.flush()
self.file.close()
Input Bitstream
Input bit stream follows similar theme. We want to read 1 or more bits at a time. To do so, we load bytes from the file, convert each byte to a list of bits and add it to the buffer, until there are enough to satisfy the read request.
The flush command in this case purges the buffer (assuring it contains only zeros).
Implementation:
class InputBitStream(object):
def __init__(self, file_name):
self.file_name = file_name
self.file = open(self.file_name, 'rb')
self.bytes_read = 0
self.buffer = []
def read_bit(self):
return self.read_bits(1)[0]
def read_bits(self, count):
while len(self.buffer) < count:
self._load_byte()
result = self.buffer[:count]
self.buffer[:] = self.buffer[count:]
return result
def flush(self):
assert(not any(self.buffer))
self.buffer[:] = []
def _load_byte(self):
value = ord(self.file.read(1))
self.buffer += pad_bits(to_binary_list(value), 8)
self.bytes_read += 1
def close(self):
self.file.close()
Compressed Format
Next we need to define the format of our compressed bitstream. There are three essential chunks of information that are needed to decode the image:
The shape of the image (height and width), with the assumption that it's a 3-channel RGB image.
Information necessary to reconstruct the Huffman codes on the decode side
Huffman-encoded pixel data
Let's make our compressed format as follows:
Header
Image height (16 bits, unsigned)
Image width (16 bits, unsigned)
Huffman table (beginning aligned to whole byte)
See this for the algorithm.
Pixel codes (beginning aligned to whole byte)
width * height * 3 Huffman codes in sequence
Compression
Implementation:
from PIL import Image
def compressed_size(counts, codes):
header_size = 2 * 16 # height and width as 16 bit values
tree_size = len(counts) * (1 + 8) # Leafs: 1 bit flag, 8 bit symbol each
tree_size += len(counts) - 1 # Nodes: 1 bit flag each
if tree_size % 8 > 0: # Padding to next full byte
tree_size += 8 - (tree_size % 8)
# Sum for each symbol of count * code length
pixels_size = sum([count * len(codes[symbol]) for symbol, count in counts])
if pixels_size % 8 > 0: # Padding to next full byte
pixels_size += 8 - (pixels_size % 8)
return (header_size + tree_size + pixels_size) / 8
def encode_header(image, bitstream):
height_bits = pad_bits(to_binary_list(image.height), 16)
bitstream.write_bits(height_bits)
width_bits = pad_bits(to_binary_list(image.width), 16)
bitstream.write_bits(width_bits)
def encode_tree(tree, bitstream):
if type(tree) == tuple: # Note - write 0 and encode children
bitstream.write_bit(0)
encode_tree(tree[0], bitstream)
encode_tree(tree[1], bitstream)
else: # Leaf - write 1, followed by 8 bit symbol
bitstream.write_bit(1)
symbol_bits = pad_bits(to_binary_list(tree), 8)
bitstream.write_bits(symbol_bits)
def encode_pixels(image, codes, bitstream):
for pixel in image.getdata():
for value in pixel:
bitstream.write_bits(codes[value])
def compress_image(in_file_name, out_file_name):
print('Compressing "%s" -> "%s"' % (in_file_name, out_file_name))
image = Image.open(in_file_name)
print('Image shape: (height=%d, width=%d)' % (image.height, image.width))
size_raw = raw_size(image.height, image.width)
print('RAW image size: %d bytes' % size_raw)
counts = count_symbols(image)
print('Counts: %s' % counts)
tree = build_tree(counts)
print('Tree: %s' % str(tree))
trimmed_tree = trim_tree(tree)
print('Trimmed tree: %s' % str(trimmed_tree))
codes = assign_codes(trimmed_tree)
print('Codes: %s' % codes)
size_estimate = compressed_size(counts, codes)
print('Estimated size: %d bytes' % size_estimate)
print('Writing...')
stream = OutputBitStream(out_file_name)
print('* Header offset: %d' % stream.bytes_written)
encode_header(image, stream)
stream.flush() # Ensure next chunk is byte-aligned
print('* Tree offset: %d' % stream.bytes_written)
encode_tree(trimmed_tree, stream)
stream.flush() # Ensure next chunk is byte-aligned
print('* Pixel offset: %d' % stream.bytes_written)
encode_pixels(image, codes, stream)
stream.close()
size_real = stream.bytes_written
print('Wrote %d bytes.' % size_real)
print('Estimate is %scorrect.' % ('' if size_estimate == size_real else 'in'))
print('Compression ratio: %0.2f' % (float(size_raw) / size_real))
Decompression
Implementation:
from PIL import Image
def decode_header(bitstream):
height = from_binary_list(bitstream.read_bits(16))
width = from_binary_list(bitstream.read_bits(16))
return (height, width)
# https://stackoverflow.com/a/759766/3962537
def decode_tree(bitstream):
flag = bitstream.read_bits(1)[0]
if flag == 1: # Leaf, read and return symbol
return from_binary_list(bitstream.read_bits(8))
left = decode_tree(bitstream)
right = decode_tree(bitstream)
return (left, right)
def decode_value(tree, bitstream):
bit = bitstream.read_bits(1)[0]
node = tree[bit]
if type(node) == tuple:
return decode_value(node, bitstream)
return node
def decode_pixels(height, width, tree, bitstream):
pixels = bytearray()
for i in range(height * width * 3):
pixels.append(decode_value(tree, bitstream))
return Image.frombytes('RGB', (width, height), bytes(pixels))
def decompress_image(in_file_name, out_file_name):
print('Decompressing "%s" -> "%s"' % (in_file_name, out_file_name))
print('Reading...')
stream = InputBitStream(in_file_name)
print('* Header offset: %d' % stream.bytes_read)
height, width = decode_header(stream)
stream.flush() # Ensure next chunk is byte-aligned
print('* Tree offset: %d' % stream.bytes_read)
trimmed_tree = decode_tree(stream)
stream.flush() # Ensure next chunk is byte-aligned
print('* Pixel offset: %d' % stream.bytes_read)
image = decode_pixels(height, width, trimmed_tree, stream)
stream.close()
print('Read %d bytes.' % stream.bytes_read)
print('Image size: (height=%d, width=%d)' % (height, width))
print('Trimmed tree: %s' % str(trimmed_tree))
image.save(out_file_name)
Test Run
from PIL import ImageChops
def raw_size(width, height):
header_size = 2 * 16 # height and width as 16 bit values
pixels_size = 3 * 8 * width * height # 3 channels, 8 bits per channel
return (header_size + pixels_size) / 8
def images_equal(file_name_a, file_name_b):
image_a = Image.open(file_name_a)
image_b = Image.open(file_name_b)
diff = ImageChops.difference(image_a, image_b)
return diff.getbbox() is None
if __name__ == '__main__':
start = time.time()
compress_image('flag.png', 'answer.txt')
print('-' * 40)
decompress_image('answer.txt', 'flag_out.png')
stop = time.time()
times = (stop - start) * 1000
print('-' * 40)
print('Run time takes %d miliseconds' % times)
print('Images equal = %s' % images_equal('flag.png', 'flag_out.png'))
I ran the script with the sample image you provided.
Console Output:
Compressing "flag.png" -> "answer.txt"
Image shape: (height=18, width=23)
RAW image size: 1246 bytes
Counts: [(24, 90), (131, 90), (215, 90), (59, 324), (60, 324), (110, 324)]
Tree: (1242, ((594, ((270, ((90, 215), (180, ((90, 24), (90, 131))))), (324, 59))), (648, ((324, 60), (324, 110)))))
Trimmed tree: (((215, (24, 131)), 59), (60, 110))
Codes: {215: [0, 0, 0], 24: [0, 0, 1, 0], 131: [0, 0, 1, 1], 59: [0, 1], 60: [1, 0], 110: [1, 1]}
Estimated size: 379 bytes
Writing...
* Header offset: 0
* Tree offset: 4
* Pixel offset: 12
Wrote 379 bytes.
Estimate is correct.
Compression ratio: 3.29
----------------------------------------
Decompressing "answer.txt" -> "flag_out.png"
Reading...
* Header offset: 0
* Tree offset: 4
* Pixel offset: 12
Read 379 bytes.
Image size: (height=18, width=23)
Trimmed tree: (((215, (24, 131)), 59), (60, 110))
----------------------------------------
Run time takes 32 miliseconds
Images equal = True
Potential Improvements
Huffman table per colour channel
Palette image support
Transformation filter (delta coding per channel, or more sophisticated predictor)
Model to handle repetitions (RLE, LZ...)
Canonical Huffman tables

Python Image Compression

I am using the Pillow library of Python to read in image files. How can I compress and decompress using Huffman encoding? Here is an instruction:
You have been given a set of example images and your goal is to compress them as much as possible without losing any perceptible information –upon decompression they should appear identical to the original images. Images are essentially stored as a series of points of color, where each point is represented as a combination of red, green, and blue (rgb). Each component of the rgb value ranges between 0-255, so for example: (100, 0, 200) would represent a shade of purple. Using a fixed-length encoding, each component of the rgb value requires 8 bits to encode (28= 256) meaning that the entire rgb value requires 24 bits to encode. You could use a compression algorithm like Huffman encoding to reduce the number of bits needed for more common values and thereby reduce the total number of bits needed to encode your image.
# For my current code I just read the image, get all the rgb and build the tree
from PIL import Image
import sys, string
import copy
codes = {}
def sortFreq(freqs):
letters = freqs.keys()
tuples = []
for let in letters:
tuples.append (freqs[let],let)
tuples.sort()
return tuples
def buildTree(tuples):
while len (tuples) > 1:
leastTwo = tuple (tuples[0:2]) # get the 2 to combine
theRest = tuples[2:] # all the others
combFreq = leastTwo[0][0] + leastTwo[1][0] # the branch points freq
tuples = theRest + [(combFreq, leastTwo)] # add branch point to the end
tuples.sort() # sort it into place
return tuples[0] # Return the single tree inside the list
def trimTree(tree):
# Trim the freq counters off, leaving just the letters
p = tree[1] # ignore freq count in [0]
if type (p) == type (""):
return p # if just a leaf, return it
else:
return (trimTree (p[0]), trimTree (p[1]) # trim left then right and recombine
def assignCodes(node, pat=''):
global codes
if type (node) == type (""):
codes[node] = pat # A leaf. Set its code
else:
assignCodes(node[0], pat+"0") # Branch point. Do the left branch
assignCodes(node[1], pat+"1") # then do the right branch.
dictionary = {}
table = {}
image = Image.open('fall.bmp')
#image.show()
width, height = image.size
px = image.load()
totalpixel = width*height
print ("Total pixel: "+ str(totalpixel))
for x in range (width):
for y in range (height):
# print (px[x, y])
for i in range (3):
if dictionary.get(str(px[x, y][i])) is None:
dictionary[str(px[x, y][i])] = 1
else:
dictionary[str(px[x, y][i])] = dictionary[str(px[x, y][i])] +1
table = copy.deepcopy(dictionary)
#combination = len(dictionary)
#for value in table:
# table[value] = table[value] / (totalpixel * combination) * 100
#print(table)
print(dictionary)
sortdic = sortFreq(dictionary)
tree = buildTree(sortdic)
trim = trimTree(tree)
print(trim)
assignCodes(trim)
print(codes)
The class HuffmanCoding takes complete path of the text file to be compressed as parameter. (as its data members store data specific to the input file).
The compress() function returns the path of the output compressed file.
The function decompress() requires path of the file to be decompressed. (and decompress() is to be called from the same object created for compression, so as to get code mapping from its data members)
import heapq
import os
class HeapNode:
def __init__(self, char, freq):
self.char = char
self.freq = freq
self.left = None
self.right = None
def __cmp__(self, other):
if(other == None):
return -1
if(not isinstance(other, HeapNode)):
return -1
return self.freq > other.freq
class HuffmanCoding:
def __init__(self, path):
self.path = path
self.heap = []
self.codes = {}
self.reverse_mapping = {}
# functions for compression:
def make_frequency_dict(self, text):
frequency = {}
for character in text:
if not character in frequency:
frequency[character] = 0
frequency[character] += 1
return frequency
def make_heap(self, frequency):
for key in frequency:
node = HeapNode(key, frequency[key])
heapq.heappush(self.heap, node)
def merge_nodes(self):
while(len(self.heap)>1):
node1 = heapq.heappop(self.heap)
node2 = heapq.heappop(self.heap)
merged = HeapNode(None, node1.freq + node2.freq)
merged.left = node1
merged.right = node2
heapq.heappush(self.heap, merged)
def make_codes_helper(self, root, current_code):
if(root == None):
return
if(root.char != None):
self.codes[root.char] = current_code
self.reverse_mapping[current_code] = root.char
return
self.make_codes_helper(root.left, current_code + "0")
self.make_codes_helper(root.right, current_code + "1")
def make_codes(self):
root = heapq.heappop(self.heap)
current_code = ""
self.make_codes_helper(root, current_code)
def get_encoded_text(self, text):
encoded_text = ""
for character in text:
encoded_text += self.codes[character]
return encoded_text
def pad_encoded_text(self, encoded_text):
extra_padding = 8 - len(encoded_text) % 8
for i in range(extra_padding):
encoded_text += "0"
padded_info = "{0:08b}".format(extra_padding)
encoded_text = padded_info + encoded_text
return encoded_text
def get_byte_array(self, padded_encoded_text):
if(len(padded_encoded_text) % 8 != 0):
print("Encoded text not padded properly")
exit(0)
b = bytearray()
for i in range(0, len(padded_encoded_text), 8):
byte = padded_encoded_text[i:i+8]
b.append(int(byte, 2))
return b
def compress(self):
filename, file_extension = os.path.splitext(self.path)
output_path = filename + ".bin"
with open(self.path, 'r+') as file, open(output_path, 'wb') as output:
text = file.read()
text = text.rstrip()
frequency = self.make_frequency_dict(text)
self.make_heap(frequency)
self.merge_nodes()
self.make_codes()
encoded_text = self.get_encoded_text(text)
padded_encoded_text = self.pad_encoded_text(encoded_text)
b = self.get_byte_array(padded_encoded_text)
output.write(bytes(b))
print("Compressed")
return output_path
""" functions for decompression: """
def remove_padding(self, padded_encoded_text):
padded_info = padded_encoded_text[:8]
extra_padding = int(padded_info, 2)
padded_encoded_text = padded_encoded_text[8:]
encoded_text = padded_encoded_text[:-1*extra_padding]
return encoded_text
def decode_text(self, encoded_text):
current_code = ""
decoded_text = ""
for bit in encoded_text:
current_code += bit
if(current_code in self.reverse_mapping):
character = self.reverse_mapping[current_code]
decoded_text += character
current_code = ""
return decoded_text
def decompress(self, input_path):
filename, file_extension = os.path.splitext(self.path)
output_path = filename + "_decompressed" + ".txt"
with open(input_path, 'rb') as file, open(output_path, 'w') as output:
bit_string = ""
byte = file.read(1)
while(byte != ""):
byte = ord(byte)
bits = bin(byte)[2:].rjust(8, '0')
bit_string += bits
byte = file.read(1)
encoded_text = self.remove_padding(bit_string)
decompressed_text = self.decode_text(encoded_text)
output.write(decompressed_text)
print("Decompressed")
return output_path
Running the program:
Save the above code, in a file huffman.py.
Create a sample text file. Or download a sample file from sample.txt (right click, save as)
Save the code below, in the same directory as the above code, and Run this python code (edit the path variable below before running. initialize it to text file path)
UseHuffman.py
from huffman import HuffmanCoding
#input file path
path = "/home/ubuntu/Downloads/sample.txt"
h = HuffmanCoding(path)
output_path = h.compress()
h.decompress(output_path)
The compressed .bin file and the decompressed file are both saved in the same directory as of the input file.
Result
On running on the above linked sample text file:
Initial Size: 715.3 kB
Compressed file Size: 394.0 kB
Plus, the decompressed file comes out to be exactly the same as the original file, without any data loss.
And that is all for Huffman Coding implementation, with compression and decompression. This was fun to code.
The above program requires the decompression function to be run using the same object that created the compression file (because the code mapping is stored in its data members). We can also make the compression and decompression function run independently, if somehow, during compression we store the mapping info also in the compressed file (in the beginning). Then, during decompression, we will first read the mapping info from the file, then use that mapping info to decompress the rest file.

struct.error: argument for 's' must be a bytes object

I'm trying to upgrade an old Python 2.x class to work with 3.x (written by someone else, and not maintained). Here is a fragment.
def getByte(self):
if (self.available < 1):
self.request(4096);
self.available = self.available - 1
result = ord(self.bytes[0]);
self.bytes = self.bytes[1:]
return result
def request(self, size):
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((self.host, self.port))
contentSize = len(self.user)+len(self.password)+6;
contentString = "!BHB"+str(len(self.user))+"sB"+str(len(self.password))+"sL"
data = pack(contentString, 0, contentSize, len(self.user), self.user, len(self.password), self.password, size)
sock.sendall(data)
print("Sent!")
data = sock.recv(6)
fields = unpack("!BBL", str(data));
if (fields[0] != 0):
raise(ServiceDeniedError(fields[0],fields[1]))
self.bytes = sock.recv(fields[2]);
self.available = len(self.bytes);
print("Received: ")
print(self.available)
sock.close()
Imported and called with print (rand.getByte()).
Tracebacks:
print (rand.getByte())
self.request(4096);
data = pack(contentString, 0, contentSize, len(self.user), self.user, len(self.password), self.password, size)
struct.error: argument for 's' must be a bytes object
I figured it out. Two arguments in the pack() function had to be converted to bytes.
data = pack(contentString, 0, contentSize, len(self.user), bytes(self.user, 'utf-8'), len(self.password), bytes(self.password, 'utf-8'), size)

Python - binary to num and num to binary - Wrong Output

While working on some complex project, I came across an interesting bug:
Code reads the file, converts binary to integers, writes to the file.
Other fellow reads this file and converts integers to binary and writes to a file.
Ideally, input file and converted files should be same. But that is not happening.
Pl find the code below:
# read file -> convert to binary -> binary to num -> write file
def bits(f):
byte = (ord(b) for b in f.read())
for b in byte:
bstr = []
for i in range(8):
bstr.append( (b >> i) & 1)
yield bstr
def binaryToNum(S):
bits = len(S)
if (S==''): return 0
elif (S[0] == '0'): return binaryToNum(S[1:])
elif (S[0] == '1'): return ((2**(bits-1))) + binaryToNum(S[1:])
bstr = []
for b in bits(open('input_test', 'r')):
bstr.append(b)
dstr = ''
for i in bstr:
b_num = str(binaryToNum(''.join(str(e) for e in i))).zfill(6)
dstr = dstr + b_num
ter = open('im1', 'w')
for item in dstr:
ter.write(item)
ter.close()
This part seems correct, I checked manually for a-z, A-Z and 0-9
The code on other machine does this:
def readDecDataFromFile(filename):
data = []
with open(filename) as f:
data = data + f.readlines()
chunks, chunk_size = len(data[0]), 6
return [ data[0][i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
def numToBinary(N):
return str(int(bin(int(N))[2:]))
ddata = readDecDataFromFile('im1')
bytes = []
for d in ddata:
bits = numToBinary(d)
bytes.append(int(bits[::-1], 2).to_bytes(1, 'little'))
f = open('orig_input', 'wb')
for b in bytes:
f.write(b)
f.close()
And here is the output:
input_test: my name is XYZ
orig_input: my7ameisY-
bytes list in last code yields:
[b'm', b'y', b'\x01', b'7', b'a', b'm', b'e', b'\x01', b'i', b's', b'\x01', b'\x0b', b'Y', b'-', b'\x05']
What could be the potential error?
two modifications are required.
while reading the bits, current order is little endian. To convert it,
reversed(range(8))
should be used in bits function.
While converting from bits to bytes at the time of writing, bit string is reversed. That is not needed. So Code changes from
bytes.append(int(bits[::-1], 2).to_bytes(1, 'little'))
To
bytes.append(int(bits, 2).to_bytes(1, 'little'))

How to read binary-protobuf gz files in Spark / Spark Streaming?

I have to read the gz file from local / hdfs / kafka, and decompress it and parse it. Who have any experiences about this?
Or the other type likes bin.tar.gz
You can use sc.binaryFiles to read binary files and do whatever you like with the content bytes.
As for tar.gz, see Read whole text files from a compression in Spark
This is what I did:
1. read binary data = sc.binaryFiles(path)
2. extract content
data = (data
.map(lambda x: (x[0], ungzip(x[1])))
)
def ungzip(df):
compressed_file = io.BytesIO(df)
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
return decompressed_file.read()
parse messages
def _VarintDecoder(mask):
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
if pos > len(buffer) - 1:
raise NotEnoughDataExcption("Not enough data to decode varint")
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise ValueError('Too many bytes when decoding varint.')
return DecodeVarint
.
def parse_binary(data):
decoder = _VarintDecoder((1 << 64) - 1)
next_pos, pos = 0, 0
messages = []
try:
while 1:
next_pos, pos = decoder(data[1], pos)
messages.append((data[0], data[1][pos:pos + next_pos]))
pos += next_pos
except:
return messages
.
data = (data
.flatMap(lambda x: parse_binary(x))
)
after this you have you protobuf messages one per row and you can apply your protobuf_parsing function in parallel

Resources