struct.error: argument for 's' must be a bytes object - python-3.x

I'm trying to upgrade an old Python 2.x class to work with 3.x (written by someone else, and not maintained). Here is a fragment.
def getByte(self):
if (self.available < 1):
self.request(4096);
self.available = self.available - 1
result = ord(self.bytes[0]);
self.bytes = self.bytes[1:]
return result
def request(self, size):
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((self.host, self.port))
contentSize = len(self.user)+len(self.password)+6;
contentString = "!BHB"+str(len(self.user))+"sB"+str(len(self.password))+"sL"
data = pack(contentString, 0, contentSize, len(self.user), self.user, len(self.password), self.password, size)
sock.sendall(data)
print("Sent!")
data = sock.recv(6)
fields = unpack("!BBL", str(data));
if (fields[0] != 0):
raise(ServiceDeniedError(fields[0],fields[1]))
self.bytes = sock.recv(fields[2]);
self.available = len(self.bytes);
print("Received: ")
print(self.available)
sock.close()
Imported and called with print (rand.getByte()).
Tracebacks:
print (rand.getByte())
self.request(4096);
data = pack(contentString, 0, contentSize, len(self.user), self.user, len(self.password), self.password, size)
struct.error: argument for 's' must be a bytes object

I figured it out. Two arguments in the pack() function had to be converted to bytes.
data = pack(contentString, 0, contentSize, len(self.user), bytes(self.user, 'utf-8'), len(self.password), bytes(self.password, 'utf-8'), size)

Related

Python: stream a tarfile to S3 using multipart upload

I would like to create a .tar file in an S3 bucket from Python code running in an AWS Lambda function. Lambda functions are very memory- and disk- constrained. I want to create a .tar file that contains multiple files that are too large to fit in the Lambda function's memory or disk space.
Using "S3 multipart upload," it is possible to upload a large file by uploading chunks of 5MB or more in size. I have this figured out and working. What I need to figure out is how to manage a buffer of bytes in memory that won't grow past the limits of the Lambda function's runtime environment.
I think the solution is to create an io.BytesIO() object and manage both a read pointer and a write pointer. I can then write into the buffer (from files that I want to add to the .tar file) and every time the buffer exceeds some limit (like 5MB) I can read off a chunk of data and send another file part to S3.
What I haven't quite wrapped my head around is how to truncate the part of the buffer that has been read and is no longer needed in memory. I need to trim the head of the buffer, not the tail, so the truncate() function of BytesIO won't work for me.
Is the 'correct' solution to create a new BytesIO buffer, populating it with the contents of the existing buffer from the read pointer to the end of the buffer, when I truncate? Is there a better way to truncate the head of the BytesIO buffer? Is there a better solution than using BytesIO?
For the random Google-r who stumbles onto this question six years in the future and thinks, "man, that describes my problem exactly!", here's what I came up with:
import io
import struct
from tarfile import BLOCKSIZE
#This class was designed to write a .tar file to S3 using multipart upload
#in a memory- and disk constrained environment, such as AWS Lambda Functions.
#
#Much of this code is copied or adapted from the Python source code tarfile.py
#file at https://github.com/python/cpython/blob/3.10/Lib/tarfile.py
#
#No warranties expressed or implied. Your mileage may vary. Lather, rinse, repeat
class StreamingTarFileWriter:
#Various constants from tarfile.py that we need
GNU_FORMAT = 1
NUL = b"\0"
BLOCKSIZE = 512
RECORDSIZE = BLOCKSIZE * 20
class MemoryByteStream:
def __init__(self, bufferFullCallback = None, bufferFullByteCount = 0):
self.buf = io.BytesIO()
self.readPointer = 0
self.writePointer = 0
self.bufferFullCallback = bufferFullCallback
self.bufferFullByteCount = bufferFullByteCount
def write(self, buf: bytes):
self.buf.seek(self.writePointer)
self.writePointer += self.buf.write(buf)
bytesAvailableToRead = self.writePointer - self.readPointer
if self.bufferFullByteCount > 0 and bytesAvailableToRead > self.bufferFullByteCount:
if self.bufferFullCallback:
self.bufferFullCallback(self, bytesAvailableToRead)
def read(self, byteCount = None):
self.buf.seek(self.readPointer)
if byteCount:
chunk = self.buf.read(byteCount)
else:
chunk = self.buf.read()
self.readPointer += len(chunk)
self._truncate()
return chunk
def size(self):
return self.writePointer - self.readPointer
def _truncate(self):
self.buf.seek(self.readPointer)
self.buf = io.BytesIO(self.buf.read())
self.readPointer = 0
self.writePointer = self.buf.seek(0, 2)
def stn(self, s, length, encoding, errors):
#Convert a string to a null-terminated bytes object.
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * self.NUL
def itn(self, n, digits=8, format=GNU_FORMAT):
#Convert a python number to a number field.
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
original_n = n
n = int(n)
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, n), "ascii") + self.NUL
elif format == self.GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(self, buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def __init__(self, bufferFullCallback = None, bufferFullByteCount = 0):
self.buf = self.MemoryByteStream(bufferFullCallback, bufferFullByteCount)
self.expectedFileSize = 0
self.fileBytesWritten = 0
self.offset = 0
pass
def addFileRecord(self, filename, filesize):
REGTYPE = b"0" # regular file
encoding = "utf-8"
LENGTH_NAME = 100
GNU_MAGIC = b"ustar \0" # magic gnu tar string
errors="surrogateescape"
#Copied from TarInfo.tobuf()
tarinfo = {
"name": filename,
"mode": 0o644,
"uid": 0,
"gid": 0,
"size": filesize,
"mtime": 0,
"chksum": 0,
"type": REGTYPE,
"linkname": "",
"uname": "",
"gname": "",
"devmajor": 0,
"devminor": 0,
"magic": GNU_MAGIC
}
buf = b""
if len(tarinfo["name"].encode(encoding, errors)) > LENGTH_NAME:
raise Exception("Filename is too long for tar file header.")
devmajor = self.stn("", 8, encoding, errors)
devminor = self.stn("", 8, encoding, errors)
parts = [
self.stn(tarinfo.get("name", ""), 100, encoding, errors),
self.itn(tarinfo.get("mode", 0) & 0o7777, 8, self.GNU_FORMAT),
self.itn(tarinfo.get("uid", 0), 8, self.GNU_FORMAT),
self.itn(tarinfo.get("gid", 0), 8, self.GNU_FORMAT),
self.itn(tarinfo.get("size", 0), 12, self.GNU_FORMAT),
self.itn(tarinfo.get("mtime", 0), 12, self.GNU_FORMAT),
b" ", # checksum field
tarinfo.get("type", REGTYPE),
self.stn(tarinfo.get("linkname", ""), 100, encoding, errors),
tarinfo.get("magic", GNU_MAGIC),
self.stn(tarinfo.get("uname", ""), 32, encoding, errors),
self.stn(tarinfo.get("gname", ""), 32, encoding, errors),
devmajor,
devminor,
self.stn(tarinfo.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = self.calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
self.buf.write(buf)
self.expectedFileSize = filesize
self.fileBytesWritten = 0
self.offset += len(buf)
def addFileData(self, buf):
self.buf.write(buf)
self.fileBytesWritten += len(buf)
self.offset += len(buf)
pass
def completeFileRecord(self):
if self.fileBytesWritten != self.expectedFileSize:
raise Exception(f"Expected {self.expectedFileSize:,} bytes but {self.fileBytesWritten:,} were written.")
#write the end-of-file marker
blocks, remainder = divmod(self.fileBytesWritten, BLOCKSIZE)
if remainder > 0:
self.buf.write(self.NUL * (BLOCKSIZE - remainder))
self.offset += BLOCKSIZE - remainder
def completeTarFile(self):
self.buf.write(self.NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
blocks, remainder = divmod(self.offset, self.RECORDSIZE)
if remainder > 0:
self.buf.write(self.NUL * (self.RECORDSIZE - remainder))
An example use of the class is:
OUTPUT_CHUNK_SIZE = 1024 * 1024 * 5
f_out = open("test.tar", "wb")
def get_file_block(blockNum):
block = f"block_{blockNum:010,}"
block += "0123456789abcdef" * 31
return bytes(block, 'ascii')
def buffer_full_callback(x: StreamingTarFileWriter.MemoryByteStream, bytesAvailable: int):
while x.size() > OUTPUT_CHUNK_SIZE:
buf = x.read(OUTPUT_CHUNK_SIZE)
#This is where you would write the chunk to S3
f_out.write(buf)
x = StreamingTarFileWriter(buffer_full_callback, OUTPUT_CHUNK_SIZE)
import random
numFiles = random.randint(3,8)
print(f"Creating {numFiles:,} files.")
for fileIdx in range(numFiles):
minSize = 1025 #1kB plus 1 byte
maxSize = 10 * 1024 * 1024 * 1024 + 5 #10GB plus 5 bytes
numBytes = random.randint(minSize, maxSize)
print(f"Creating file {str(fileIdx)} with {numBytes:,} bytes.")
blocks,remainder = divmod(numBytes, 512)
x.addFileRecord(f"File{str(fileIdx)}", numBytes)
for block in range(blocks):
x.addFileData(get_file_block(block))
x.addFileData(bytes(("X" * remainder), 'ascii'))
x.completeFileRecord()

How to pass values from one thread to another thread Python?

I begin to code the python and I am not have more experience in syntax of python
I want to send a value from the RFID reader. If it saves successfully, the PIR Motion Sensor will be disabled in a moment. and reworking within the specified time.
If the temperature is measured And send the value to the function RFID Reader to record the temperature and the PIR Motion Sensor stops working as well.
class Security:
global dbHost
global dbUser
global dbPass
global dbNameEmp
global dbNameSecure
dbHost = 'localhost'
dbUser = 'root'
dbPass = 'iTdev#2020'
dbNameEmp = 'empinfo'
dbNameSecure = 'secureinfo'
def __init__(self):
self.msg=""
def Temperature(self):
while True:
GPIO.output(TRIG, False)
time.sleep(0.5)
GPIO.output(TRIG, True)
time.sleep(0.01)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0 :
pulse_start = time.time()
while GPIO.input(ECHO) == 1 :
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = pulse_duration * 11150
distance = round(distance,2)
if distance > 0 and distance < 3 :
buzzer.on()
time.sleep(0.1)
buzzer.off()
print ("Distance: %scm" % (distance))
#print "Ambient Temperature :", sensor.get_ambient()
print ("Temperature: %.1fc" % (sensor.get_object_1()))
msgTemp = ("%.1f" % (sensor.get_object_1()))
w, h = textsize(msgTemp, font=proportional(CP437_FONT))
if w <= device.width:
x = round((device.width - w) / 2)
with canvas(device) as draw:
text(draw, (x, 0), msgTemp, fill="white", font=proportional(CP437_FONT))
else:
show_message(device, msgTemp, fill="white", font=proportional(CP437_FONT),scroll_delay=0.04)
time.sleep(1)
device.clear()
def rfid_callback(self, state, dev):
rfid_presented = ""
keys = "X^1234567890XXXXqwertzuiopXXXXasdfghjklXXXXXyxcvbnmXXXXXXXXXXXXXXXXXXXXXXX"
while True:
r,w,x = select([dev], [], [])
for event in dev.read():
if event.type==1 and event.value==1:
if event.code==28:
rfid_presented = rfid_presented.replace("X", "")
travel = state.replace("Thread-", "")
dbConnEmp = mysql.connect(host=dbHost, user=dbUser, passwd=dbPass, db=dbNameEmp)
curEmp = dbConnEmp.cursor()
curEmp.execute("Select RFidEmp FROM RFidMaster WHERE (RFidStatus = 1) AND RFidNumber = '%s'" % (rfid_presented))
resultRFid = curEmp.fetchone()
if curEmp.rowcount != 1:
# print("Access Denied." + travel)
with canvas(device) as draw:
text(draw, (0, 0), "None", fill="white", font=proportional(CP437_FONT))
time.sleep(0.5)
device.clear()
else:
# print("Unlocking Door." + travel)
dbConnSecure = mysql.connect(host=dbHost, user=dbUser, passwd=dbPass, db=dbNameSecure)
curSecure = dbConnSecure.cursor()
curSecure.execute("SELECT EntraId,DATE_FORMAT(CreateDate, '%Y%m%d') AS CreateDate FROM entranlog ORDER BY EntraId DESC, CreateDate DESC LIMIT 1")
resultKey = curSecure.fetchone()
KeyDate = time.strftime("%Y%m%d")
if curSecure.rowcount != 1:
KeyId = KeyDate+"0001"
else:
if resultKey[1] == KeyDate:
iSum = int(resultKey[0])
def sum(x, y):
return x + y
KeyId = ('%d' % sum(iSum, 1))
else:
KeyId = KeyDate+"0001"
create_date = time.strftime('%Y-%m-%d %H:%M:%S')
insertSecure = "INSERT INTO entranlog (EntraId,EntraAction,EntraStatus,CreateBy,CreateDate) VALUES (%s,%s,%s,%s,%s)"
valSecure = (KeyId,travel,1,resultRFid[0],create_date)
curSecure.execute(insertSecure, valSecure)
dbConnSecure.commit()
# print("Welcome: " + resultRFid[0])
with canvas(device) as draw:
text(draw, (0, 0), "Hello", fill="white", font=proportional(CP437_FONT))
print(curSecure.rowcount, "record inserted.")
time.sleep(0.8)
device.clear()
rfid_presented = ""
else:
rfid_presented += keys[ event.code ]
def PirSensor(self):
while True:
led_blue.on()
time.sleep(0.1)
current_state = GPIO.input(pir_sensor)
if current_state == 1:
print("GPIO pin %s is %s" % (pir_sensor, current_state))
GPIO.output(relay_alarm,True)
led_blue.off()
led_red.blink(0.1, 0.2)
# buzzer.beep(0.5, 0.5, 3)
time.sleep(5)
GPIO.output(relay_alarm,False)
led_red.off()
led_yellow.blink(0.5, 0.5)
time.sleep(5)
led_yellow.off()
def main(self):
devIn = threading.Thread(target=self.rfid_callback, args=['1', InputDevice('/dev/input/event0')])
devIn.start()
devOut = threading.Thread(target=self.rfid_callback, args=['2', InputDevice('/dev/input/event1')])
devOut.start()
Pir = threading.Thread(target=self.PirSensor)
Pir.start()
Temp = threading.Thread(target=self.Temperature)
Temp.start()
if __name__ == "__main__":
g=Security()
g.main()

I want to trace the recursive code that I have written

I am novice to recursion, I have written code to find the path of a given node, when I dry run my code(trace the stack) it is giving the right answer but when I am running the same on machine it is not showing the expected output can someone please help me in tracing out the code(e.g. using the call stack)?
class NewNode:
def __init__(self, data):
self.data = data
self.left = self.right = None
arr = [1, 2, 3, 4, 5, 6, 7]
q = []
def create_level_order_binary_tree(i):
root = None
if i < len(arr):
root = NewNode(arr[i])
root.left = create_level_order_binary_tree(2 * i + 1)
root.right = create_level_order_binary_tree(2 * i + 2)
return root
def dfs(root, p, temp_path, path):
print(temp_path)
if root is None:
return path
if root.data == p:
if len(temp_path) == 0:
path.append(root.data)
return path
else:
temp_path.append(root.data)
path.append(temp_path)
return path
temp_path.append(root.data)
path = dfs(root.left, 6, temp_path, path)
if len(path) == 0:
path = dfs(root.right, 6, temp_path, path)
return path
root_node = create_level_order_binary_tree(0)
path_to_node = dfs(root_node, 6, [], [])
print(path_to_node`enter code here`)
The following are two approaches to solving your problem. While I haven't timed the routines, I suspect the non-recursive approach will be faster, since it doesn't utilize the stack as much.
First using a non-recursive approach employing a simple stack (Last In First Out) data structure.
from copy import deepcopy
def nrc_dfs(nde, p):
stck = [(nde, [])] #LIFO stack implementation
while stck:
nd, pth = stck.pop(len(stck)-1) #pop lastv entry
if nd:
pth.append(nd.data)
if nd.data == p:
return pth
stck.append((nd.right, deepcopy(pth)))
stck.append((nd.left, deepcopy(pth)))
return []
The second approach, using a recursive technique.
def rc_dfs(nde, p):
def dfs_sch(nde, p, path):
if nde:
path.append(nde.data)
if nde.data == p:
return path
pl = dfs_sch(nde.left, p, [])
if pl:
path.extend(pl)
return path
pr = dfs_sch(nde.right, p, [])
if pr:
path.extend(pr)
return path
return []
return dfs_sch(nde, p, [])

Python Image Compression

I am using the Pillow library of Python to read in image files. How can I compress and decompress using Huffman encoding? Here is an instruction:
You have been given a set of example images and your goal is to compress them as much as possible without losing any perceptible information –upon decompression they should appear identical to the original images. Images are essentially stored as a series of points of color, where each point is represented as a combination of red, green, and blue (rgb). Each component of the rgb value ranges between 0-255, so for example: (100, 0, 200) would represent a shade of purple. Using a fixed-length encoding, each component of the rgb value requires 8 bits to encode (28= 256) meaning that the entire rgb value requires 24 bits to encode. You could use a compression algorithm like Huffman encoding to reduce the number of bits needed for more common values and thereby reduce the total number of bits needed to encode your image.
# For my current code I just read the image, get all the rgb and build the tree
from PIL import Image
import sys, string
import copy
codes = {}
def sortFreq(freqs):
letters = freqs.keys()
tuples = []
for let in letters:
tuples.append (freqs[let],let)
tuples.sort()
return tuples
def buildTree(tuples):
while len (tuples) > 1:
leastTwo = tuple (tuples[0:2]) # get the 2 to combine
theRest = tuples[2:] # all the others
combFreq = leastTwo[0][0] + leastTwo[1][0] # the branch points freq
tuples = theRest + [(combFreq, leastTwo)] # add branch point to the end
tuples.sort() # sort it into place
return tuples[0] # Return the single tree inside the list
def trimTree(tree):
# Trim the freq counters off, leaving just the letters
p = tree[1] # ignore freq count in [0]
if type (p) == type (""):
return p # if just a leaf, return it
else:
return (trimTree (p[0]), trimTree (p[1]) # trim left then right and recombine
def assignCodes(node, pat=''):
global codes
if type (node) == type (""):
codes[node] = pat # A leaf. Set its code
else:
assignCodes(node[0], pat+"0") # Branch point. Do the left branch
assignCodes(node[1], pat+"1") # then do the right branch.
dictionary = {}
table = {}
image = Image.open('fall.bmp')
#image.show()
width, height = image.size
px = image.load()
totalpixel = width*height
print ("Total pixel: "+ str(totalpixel))
for x in range (width):
for y in range (height):
# print (px[x, y])
for i in range (3):
if dictionary.get(str(px[x, y][i])) is None:
dictionary[str(px[x, y][i])] = 1
else:
dictionary[str(px[x, y][i])] = dictionary[str(px[x, y][i])] +1
table = copy.deepcopy(dictionary)
#combination = len(dictionary)
#for value in table:
# table[value] = table[value] / (totalpixel * combination) * 100
#print(table)
print(dictionary)
sortdic = sortFreq(dictionary)
tree = buildTree(sortdic)
trim = trimTree(tree)
print(trim)
assignCodes(trim)
print(codes)
The class HuffmanCoding takes complete path of the text file to be compressed as parameter. (as its data members store data specific to the input file).
The compress() function returns the path of the output compressed file.
The function decompress() requires path of the file to be decompressed. (and decompress() is to be called from the same object created for compression, so as to get code mapping from its data members)
import heapq
import os
class HeapNode:
def __init__(self, char, freq):
self.char = char
self.freq = freq
self.left = None
self.right = None
def __cmp__(self, other):
if(other == None):
return -1
if(not isinstance(other, HeapNode)):
return -1
return self.freq > other.freq
class HuffmanCoding:
def __init__(self, path):
self.path = path
self.heap = []
self.codes = {}
self.reverse_mapping = {}
# functions for compression:
def make_frequency_dict(self, text):
frequency = {}
for character in text:
if not character in frequency:
frequency[character] = 0
frequency[character] += 1
return frequency
def make_heap(self, frequency):
for key in frequency:
node = HeapNode(key, frequency[key])
heapq.heappush(self.heap, node)
def merge_nodes(self):
while(len(self.heap)>1):
node1 = heapq.heappop(self.heap)
node2 = heapq.heappop(self.heap)
merged = HeapNode(None, node1.freq + node2.freq)
merged.left = node1
merged.right = node2
heapq.heappush(self.heap, merged)
def make_codes_helper(self, root, current_code):
if(root == None):
return
if(root.char != None):
self.codes[root.char] = current_code
self.reverse_mapping[current_code] = root.char
return
self.make_codes_helper(root.left, current_code + "0")
self.make_codes_helper(root.right, current_code + "1")
def make_codes(self):
root = heapq.heappop(self.heap)
current_code = ""
self.make_codes_helper(root, current_code)
def get_encoded_text(self, text):
encoded_text = ""
for character in text:
encoded_text += self.codes[character]
return encoded_text
def pad_encoded_text(self, encoded_text):
extra_padding = 8 - len(encoded_text) % 8
for i in range(extra_padding):
encoded_text += "0"
padded_info = "{0:08b}".format(extra_padding)
encoded_text = padded_info + encoded_text
return encoded_text
def get_byte_array(self, padded_encoded_text):
if(len(padded_encoded_text) % 8 != 0):
print("Encoded text not padded properly")
exit(0)
b = bytearray()
for i in range(0, len(padded_encoded_text), 8):
byte = padded_encoded_text[i:i+8]
b.append(int(byte, 2))
return b
def compress(self):
filename, file_extension = os.path.splitext(self.path)
output_path = filename + ".bin"
with open(self.path, 'r+') as file, open(output_path, 'wb') as output:
text = file.read()
text = text.rstrip()
frequency = self.make_frequency_dict(text)
self.make_heap(frequency)
self.merge_nodes()
self.make_codes()
encoded_text = self.get_encoded_text(text)
padded_encoded_text = self.pad_encoded_text(encoded_text)
b = self.get_byte_array(padded_encoded_text)
output.write(bytes(b))
print("Compressed")
return output_path
""" functions for decompression: """
def remove_padding(self, padded_encoded_text):
padded_info = padded_encoded_text[:8]
extra_padding = int(padded_info, 2)
padded_encoded_text = padded_encoded_text[8:]
encoded_text = padded_encoded_text[:-1*extra_padding]
return encoded_text
def decode_text(self, encoded_text):
current_code = ""
decoded_text = ""
for bit in encoded_text:
current_code += bit
if(current_code in self.reverse_mapping):
character = self.reverse_mapping[current_code]
decoded_text += character
current_code = ""
return decoded_text
def decompress(self, input_path):
filename, file_extension = os.path.splitext(self.path)
output_path = filename + "_decompressed" + ".txt"
with open(input_path, 'rb') as file, open(output_path, 'w') as output:
bit_string = ""
byte = file.read(1)
while(byte != ""):
byte = ord(byte)
bits = bin(byte)[2:].rjust(8, '0')
bit_string += bits
byte = file.read(1)
encoded_text = self.remove_padding(bit_string)
decompressed_text = self.decode_text(encoded_text)
output.write(decompressed_text)
print("Decompressed")
return output_path
Running the program:
Save the above code, in a file huffman.py.
Create a sample text file. Or download a sample file from sample.txt (right click, save as)
Save the code below, in the same directory as the above code, and Run this python code (edit the path variable below before running. initialize it to text file path)
UseHuffman.py
from huffman import HuffmanCoding
#input file path
path = "/home/ubuntu/Downloads/sample.txt"
h = HuffmanCoding(path)
output_path = h.compress()
h.decompress(output_path)
The compressed .bin file and the decompressed file are both saved in the same directory as of the input file.
Result
On running on the above linked sample text file:
Initial Size: 715.3 kB
Compressed file Size: 394.0 kB
Plus, the decompressed file comes out to be exactly the same as the original file, without any data loss.
And that is all for Huffman Coding implementation, with compression and decompression. This was fun to code.
The above program requires the decompression function to be run using the same object that created the compression file (because the code mapping is stored in its data members). We can also make the compression and decompression function run independently, if somehow, during compression we store the mapping info also in the compressed file (in the beginning). Then, during decompression, we will first read the mapping info from the file, then use that mapping info to decompress the rest file.

Python sendto telling me it needs an int

from socket import *
import packets
image = "testfile.bmp"
# open image
bufferSize = 2048
myfile = open(image, 'rb')
sequenceNumber = 0
totalBytes = 0
serverName = "127.0.0.1"
serverSendPort = 12000
serverSendPort = int(serverSendPort)
serverListenPort = 12001
clientSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket = socket(AF_INET, SOCK_DGRAM)
while (1):
if (sequenceNumber == 0):
data = packets.mkepckt(myfile.read(bufferSize), 0,
packets.calculateChecksum(myfile.read(bufferSize), bufferSize))
clientSocket.sendto(bytearray(data),(serverName, serverSendPort))
sequenceNumber = 1
ACK, = serverSocket.recvfrom(bufferSize)
ACKchecksum = packets.calculateChecksum(ACK[1], bufferSize)
while (ACK[0] is not 0 or ACKchecksum is not ACK[2]):
clientSocket.sendto(data, (serverName, serverSendPort))
ACK, = serverSocket.recvfrom(bufferSize)
ACKchecksum = packets.calculateChecksum(ACK[1], bufferSize)
elif (sequenceNumber == 1):
data = packets.mkepckt(myfile.read(bufferSize), 1,
packets.calculateChecksum(myfile.read(bufferSize), bufferSize))
clientSocket.sendto(bytearray(data),(serverName, serverSendPort))
sequenceNumber = 0
ACK, = serverSocket.recvfrom(bufferSize)
ACKchecksum = packets.checksum(bufferSize, ACK[1])
while (ACK[0] is not 1 or ACKchecksum is not ACK[2]):
clientSocket.sendto(data, (serverName, serverSendPort))
ACK, = serverSocket.recvfrom(bufferSize)
ACKchecksum = packets.calculateChecksum(ACK[1], bufferSize)
myfile.close()
clientSocket.close()
serverSocket.close()
Not sure I needed to put my whole code, but better safe than sorry.
So for some reason, despite the fact that I have an int in the sendto. I have also tried all sorts of typecasting, from typecasting each individual element to typecasting the whole thing as an int, and I still get this error.
Thanks to help from #RemyLebeau, I was able to solve this problem by importing pickle and using that to dump my tuple into a variable, then sending that variable as the data. Not sure why the byte data gave off an error about an int, but there you go.

Resources