assigning textvariable to entry causes validation to be disabled on DoubleVar - python-3.x

I am modifying tk.Entry() to validate a numeric input and notify a user if there was an error in applying it.
Beginning with the code below:
import tkinter as tk
class FloatEntry(tk.Entry):
def __init__(self, parent, *args, **kwargs):
tk.Entry.__init__(self, parent, validate="focusout", validatecommand=self.validate, *args, **kwargs)
def validate(self):
try:
print(float(self.get()))
return True
except ValueError as e:
print(e)
return False
if __name__ == "__main__":
root = tk.Tk()
root.bind_all("<1>", lambda event:event.widget.focus_set()) # make all widgets focusable
var = tk.DoubleVar()
frame = tk.Frame()
frame.pack(fill="both", expand=True)
FloatEntry(frame, textvariable=var).pack()
tk.Label(frame, textvariable=var).pack()
root.mainloop()
This results in the exception being triggered and printing, "could not convert string to float:". After this, neither the try nor the except in validate() are triggered again, so I assume it somehow returned None and disabled validation (I could be wrong here).
If I change the variable var = DoubleVar() to var =tk.StringVar(), then the validation works as expected, printing the float if the string can be parsed as a float, and printing the exception otherwise.
Finally, if I add a check to the value returned by get() before the try/except block, validation works as expected as well.
def validate(self):
try:
val = self.get()
if(val is not ''):
print(float(self.get()))
return True
except ValueError as e:
print(e)
return False
What is causing validation to be disabled on creation of a FloatEntry object?(or if that isn't happening, what is?)

I'll be honest and say I don't fully understand why this is happening. However, a simple workaround seems to be to configure the validatecommand after you've called the __init__ of the superclass:
class FloatEntry(tk.Entry):
def __init__(self, parent, *args, **kwargs):
tk.Entry.__init__(self, parent, validate="focusout", *args, **kwargs)
self.configure(validatecommand=self.validate)
When I make the above changes, I see the validate being called every time the widget loses focus.

Related

Is this really encapsulated?

I wondered wheather it is actually impossible to make a protected python class, there always seems to be a way of getting around that, but i can't find one for this:
I attempted to code out this properly encapsulated class. Challenge:
Attempt setting somevalue to the value 69, without:
changing the code from line 1 - 32
polymorphism
from sbNative.debugtools import log # module not neccessary, only for logging and debugging purposes imported
from inspect import stack
import traceback
class Protected:
def __init__(self):
self._somevalue = "Unset"
log(self._somevalue)
def __setattr__(self, name, value):
if isinstance(stack()[1][0].f_locals.get("self"), Protected) or not name.startswith("_"):
super(Protected, self).__setattr__(name, value)
else:
raise AttributeError("Protected class from setting")
def __getattribute__(self, name):
if isinstance(stack()[1][0].f_locals.get("self"), Protected) or not name.startswith("_"):
return super(Protected, self).__getattribute__(name)
else:
raise AttributeError("Protected class from getting")
#property
def somevalue(self):
return self._somevalue
#somevalue.setter
def somevalue(self, value):
if value == 69:
raise ValueError(f"{value} is an illegal value.")
self._somevalue = value
log("Instantiates without a problem:")
p = Protected()
print("\n")
log("Fails because it is not allowed to set to this value:")
try:
p.somevalue = 69
except ValueError:
traceback.print_exc()
print("\n")
log("Fails because it attemps setting and getting directly:")
try:
p._somevalue = 69
except AttributeError:
traceback.print_exc()
print("")
try:
log(p._somevalue)
except AttributeError:
traceback.print_exc()
print("\n")
log("Succeeds because it is allowed to set and get this value:")
p.somevalue = 420
log(p.somevalue)
print("ⁿᶦᶜᵉ ˡᶦⁿᵉ ⁿᵘᵐᵇᵉʳ ᵇᵗʷ")

How could I create a docstring decorator in the presence of properties?

I have a collection of ever more specialized classes which correspond to collections of the same kind of data (temperature, density, etc) but for different drifts, for example, one subclass has dimensions (nx, ny) and a different suclass has dimensions (ncv), and I want to reflect that in the docstrings, for having a better documentation using Sphinx.
After reading many very useful threads here in Stack Overflow, I have arrived to this model:
import numpy as np
from functools import wraps
def class_decorator(cls):
import ipdb; ipdb.set_trace()
clsdict = {}
mro = cls.mro()
mro.reverse()
for tmp in mro[1:]: ##Ignore object class parent.
clsdict.update(tmp.__dict__)
for name, method in clsdict.items():
if hasattr(method, '__og_doc__'):
try:
method.__doc__ = method.__og_doc__.format(**clsdict)
except:
pass
else:
try:
method.__og_doc__ = method.__doc__
method.__doc__ = method.__doc__.format(**clsdict)
except:
pass
return cls
def mark_documentation(fn):
if not hasattr(fn, '__og_doc__'):
try:
fn.__og_doc__ = fn.__doc__
except:
pass
#wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
return wrapped
def documented_property(fn):
if not hasattr(fn, '__og_doc__'):
try:
fn.__og_doc__ = fn.__doc__
except:
pass
#wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
prp= property(wrapped)
prp.__og_doc__ = fn.__og_doc__
return prp
#class_decorator
class Base(object):
_GRID_DIM = 'nx, ny'
_TYPE = 'BaseData'
def __init__(self, name):
self.name = name
def shape(self):
""" This docstring contains the type '{_TYPE}' of class."""
print('Simple')
def operation(self, a, b, oper=np.sum, **kwargs):
""" Test for functions with args and kwargs in {_TYPE}"""
return oper([a,b])
#classmethod
def help(cls, var):
try:
print(get(cls, var).__doc__)
except:
print("No docstring yet.")
#class_decorator
class Advanced(Base):
_GRID_DIM = 'ncv'
_TYPE = 'AdvancedData'
def __init__(self,name):
super().__init__(name)
#property
#mark_documentation
# #documented_property
def arkansas(self):
"""({_GRID_DIM}, ns): Size of Arkansaw."""
return 'Yeah'
I am aiming to get the correctly formatted docstring when I call the help method or I use Sphinx, so that:
> adv = Advanced('ADV')
> adv.help("arkansas")
(ncv, ns): Size of Arkansaw.
> adv.help("operation")
Test for functions with args and kwargs in AdvancedData
I have managed to make it work so far, except for properties, because I assigned __og_doc__ to the function, but the property does not have that attribute. My last attempt at monkeypatching this, documented_property, fails because property is inmutable (as expected), and I cannot come up with any way to avoid this roadblock.
Is there any way around this problem?

Python unittest class issue

I'm trying to write a unit test class in python but feel like I'm missing something fundamental as it's not doing what I would expect. Here is my class:
from unittest import TestCase
class MyTestClass(TestCase):
def __init__(self):
self.file_name = None
def setUp(self):
self.file_name = 'give this file a name'
return self.file_name
def test_a_file_name(self):
assert self.file_name == 'give this file a name', 'fail'
tester = MyTestClass()
tester.setUp()
tester.test_a_file_name()
I would expect when running this that the test would pass but I'm getting a __init__() takes 1 positional argument but 2 were given error and I can't see why?
When running unittest.main your class that inherits from TestCase gets handed the test method to call. As such you need to allow your class to be handed that argument and pass it on to the parent class __init__.
from unittest import TestCase, main
class MyTestClass(TestCase):
# accept arbitrary positional and keyword arguments
def __init__(self, *args, **kwargs):
self.file_name = None
# pass them on to the parent
super().__init__(*args, **kwargs)
def setUp(self):
self.file_name = 'give this file a name'
return self.file_name
def test_a_file_name(self):
assert self.file_name == 'give this file a name', 'fail'
if __name__ == '__main__':
main()
As you noticed, you also don't need to handle instantiation and method calling. unittest.main() will do that for you.
In the future, if you ever get an error with arguments, a helpful debugging tip is throwing in an *args, **kwargs and printing them to see what is being handed that you're not handling.

PySide2: How to re-implement QFormLayout.takeRow()?

I've noticed that QFormLayout in Pyside2 does not have the takeRow method like its PyQt5 counterpart. I've attempted to subclass QFormLayout to incorporate a similar method, but I've run into Runtime Errors, as the removal behavor of the LabelRole item is different than the FieldRole item. Another issue being that the LabelRole item does not actually get taken off the row even when the row itself is removed.
The following is the test sample I've been working with using Python 3.8.6:
from PySide2.QtWidgets import *
import sys
class MyFormLayout(QFormLayout):
def __init__(self, *args, **kwargs):
super(MyFormLayout, self).__init__(*args, **kwargs)
self.cache = []
print(f"Formlayout's identity: {self=}\nwith parent {self.parent()=}")
def takeRow(self, row: int):
print(f"Called {self.takeRow.__name__}")
print(f"{self.rowCount()=}")
label_item = self.itemAt(row, QFormLayout.LabelRole)
field_item = self.itemAt(row, QFormLayout.FieldRole)
print(f"{label_item=}\n{field_item=}")
self.removeItem(label_item)
self.removeItem(field_item)
self.removeRow(row) ## <-- This seems necessary to make the rowCount() decrement. Alternative?
label_item.widget().setParent(None) ## <-- Runtime Error Here?
field_item.layout().setParent(None)
self.cache.append(label_item.widget(), field_item)
print(f"{self.rowCount()=}")
print(f"{self.cache=}")
print(self.cache[0])
print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
return label_item, field_item
def restoreRow(self, insert_idx: int):
print(f"Called {self.restoreRow.__name__}")
print(f"{self.rowCount()=}")
print(f"{self.cache=}")
to_insert = self.cache.pop()
self.insertRow(insert_idx, to_insert[0], to_insert[1])
print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
class MyWindow(QWidget):
def __init__(self):
super(MyWindow, self).__init__()
self.mainlay = MyFormLayout(self)
self.cmb = QComboBox()
self.cmb.addItems(["Placeholder", "Remove 1 and 2"])
self.cmb.currentTextChanged.connect(self.remove_rows_via_combo)
self.current_text = self.cmb.currentText()
self.hlay1, self.le1, self.btn1 = self.le_and_btn(placeholderText="1")
self.hlay2, self.le2, self.btn2 = self.le_and_btn(placeholderText="2")
self.hlay3, self.le3, self.btn3 = self.le_and_btn(placeholderText="3")
self.hlay4, self.le4, self.btn4 = self.le_and_btn(placeholderText="4")
self.remove_btn = QPushButton("Remove", clicked=self.remove_row_via_click)
self.restore_btn = QPushButton("Restore", clicked=self.restore_a_row_via_click)
self.mainlay.addRow("Combobox", self.cmb)
for ii, hlayout in zip(range(1, 5), [self.hlay1, self.hlay2, self.hlay3, self.hlay4]):
self.mainlay.addRow(f"Row {ii}", hlayout)
self.mainlay.addRow(self.remove_btn)
self.mainlay.addRow(self.restore_btn)
#staticmethod
def le_and_btn(**kwargs):
hlay, le, btn = QHBoxLayout(), QLineEdit(**kwargs), QPushButton()
hlay.addWidget(le)
hlay.addWidget(btn)
return hlay, le, btn
def remove_row_via_click(self):
self.mainlay.takeRow(1)
def restore_a_row_via_click(self):
self.mainlay.restoreRow(1)
def remove_rows_via_combo(self, text):
print(f"{self.remove_rows_via_combo.__name__} received the text: {text}")
if text == "Remove 1 and 2":
self.mainlay.takeRow(1)
self.mainlay.takeRow(1)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = MyWindow()
win.show()
sys.exit(app.exec_())
I would like to understand why the behavior of the role items is different and how the method may be properly re-implemented.
The problem is that the label was created internally by Qt from a string, rather than by explicitly creating a QLabel in Python. This means that when the row is removed, the last remaining reference is also removed, which deletes the label on the C++ side. After that, all that's left on the Python side is an empty PyQt wrapper - so when you try to call setParent on it, a RuntimeError will be raised, because the underlying C++ part no longer exists.
Your example can therefore be fixed by getting python references to the label/field objects before the layout-item is removed:
class MyFormLayout(QFormLayout):
...
def takeRow(self, row: int):
print(f"Called {self.takeRow.__name__}")
print(f"{self.rowCount()=}")
label_item = self.itemAt(row, QFormLayout.LabelRole)
field_item = self.itemAt(row, QFormLayout.FieldRole)
print(f"{label_item=}\n{field_item=}")
# get refs before removal
label = label_item.widget()
field = field_item.layout() or field_item.widget()
self.removeItem(label_item)
self.removeItem(field_item)
self.removeRow(row)
label.setParent(None)
field.setParent(None)
self.cache.append((label, field))
print(f"{self.rowCount()=}")
print(f"{self.cache=}")
print(self.cache[0])
print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
return label, field

Inheritance in iterable implementation of python's multiprocessing.Queue

I found the default implementation of python's multiprocessing.Queue lacking, in that it's not iterable like any other collection. So I went about the effort of creating a 'subclass' of it, adding the feature in. As you can see from the code below, it's not a proper subclass, as multiprocess.Queue isn't a direct class itself, but a factory function, and the real underlying class is multiprocess.queues.Queue. I don't have the understanding nor effort to expend necessary to go about mimicking the factory function just so I can inherit from the class properly, so I simply had the new class create it's own instance from the factory and treat it as the superclass. Here is the code;
from multiprocessing import Queue, Value, Lock
import queue
class QueueClosed(Exception):
pass
class IterableQueue:
def __init__(self, maxsize=0):
self.closed = Value('b', False)
self.close_lock = Lock()
self.queue = Queue(maxsize)
def close(self):
with self.close_lock:
self.closed.value = True
self.queue.close()
def put(self, elem, block=True, timeout=None):
with self.close_lock:
if self.closed.value:
raise QueueClosed()
else:
self.queue.put(elem, block, timeout)
def put_nowait(self, elem):
self.put(elem, False)
def get(self, block=True):
if not block:
return self.queue.get_nowait()
elif self.closed.value:
try:
return self.queue.get_nowait()
except queue.Empty:
return None
else:
val = None
while not self.closed.value:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
pass
return val
def get_nowait(self):
return self.queue.get_nowait()
def join_thread(self):
return self.queue.join_thread()
def __iter__(self):
return self
def __next__(self):
val = self.get()
if val == None:
raise StopIteration()
else:
return val
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
This allows me to instantiate an IterableQueue object just like a normal multiprocessing.Queue, put elements into it like normal, and then inside child consumers, simply loop over it like so;
from iterable_queue import IterableQueue
from multiprocessing import Process, cpu_count
import os
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
def consumer(queue):
print(f"[{os.getpid()}] Consuming")
for i in queue:
print(f"[{os.getpid()}] < {i}")
n = fib(i)
print(f"[{os.getpid()}] {i} > {n}")
print(f"[{os.getpid()}] Closing")
def producer():
print("Enqueueing")
with IterableQueue() as queue:
procs = [Process(target=consumer, args=(queue,)) for _ in range(cpu_count())]
[p.start() for p in procs]
[queue.put(i) for i in range(36)]
print("Finished")
if __name__ == "__main__":
producer()
and it works almost seamlessly; the consumers exit the loop once the queue has been closed, but only after exhausting all remaining elements. However, I was unsatisfied with the lack of inherited methods. In an attempt to mimic actual inheritance behavior, I tried adding the following meta function call to the class;
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return self.queue.__getattr__[name]
However, this fails when instances of the IterableQueue class are manipulated inside child multiprocessing.Process threads, as the class's __dict__ property is not preserved within them. I attempted to remedy this in a hacky manner by replacing the class's default __dict__ with a multiprocessing.Manager().dict(), like so;
def __init__(self, maxsize=0):
self.closed = Value('b', False)
self.close_lock = Lock()
self.queue = Queue(maxsize)
self.__dict__ = Manager().dict(self.__dict__)
However on doing so, I received an error stating RuntimeError: Synchronized objects should only be shared between processes through inheritance. So my question is, how should I go about inheriting from the Queue class properly such that the subclass has inherited access to all of it's properties? In addition, while the queue is empty but not closed, the consumers all sit in a busy loop instead of a true IO block, taking up valuable cpu resources. If you have any suggestions on concurrency and race condition issues I might run into with this code, or how I might solve the busy loop issue, I'd be willing to take suggestions therein as well.
Based on code provided by MisterMiyagi, I created this general purpose IterableQueue class which can accept arbitrary input, blocks properly, and does not hang on queue close;
from multiprocessing.queues import Queue
from multiprocessing import get_context
class QueueClosed(Exception):
pass
class IterableQueue(Queue):
def __init__(self, maxsize=0, *, ctx=None):
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
def close(self):
super().put((None, False))
super().close()
def __iter__(self):
return self
def __next__(self):
try:
return self.get()
except QueueClosed:
raise StopIteration
def get(self, *args, **kwargs):
result, is_open = super().get(*args, **kwargs)
if not is_open:
super().put((None, False))
raise QueueClosed
return result
def put(self, val, *args, **kwargs):
super().put((val, True), *args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
The multiprocess.Queue wrapper only serves to use the default context.
def Queue(self, maxsize=0):
'''Returns a queue object'''
from .queues import Queue
return Queue(maxsize, ctx=self.get_context())
When inheriting, you can replicate this in the __init__ method. This allows you to inherit the entire Queue behaviour. You only need to add the iterator methods:
from multiprocessing.queues import Queue
from multiprocessing import get_context
class IterableQueue(Queue):
"""
``multiprocessing.Queue`` that can be iterated to ``get`` values
:param sentinel: signal that no more items will be received
"""
def __init__(self, maxsize=0, *, ctx=None, sentinel=None):
self.sentinel = sentinel
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
def close(self):
self.put(self.sentinel)
# wait until buffer is flushed...
while self._buffer:
time.sleep(0.01)
# before shutting down the sender
super().close()
def __iter__(self):
return self
def __next__(self):
result = self.get()
if result == self.sentinel:
# re-queue sentinel for other listeners
self.put(result)
raise StopIteration
return result
Note that the sentinel to indicate end-of-queue is compared by equality, because identity is not preserved across processes. The often-used queue.Queue sentinel object() does not work properly with this.

Resources