I have a class which caches some values to avoid computing them many times, for instance
class A(object):
def __init__(self, a, b):
self.a = a
self.b = b
self._value = None
#property
def value(self):
if self._value is None:
self._value = # <complex code that produces value>
return self._value
In this way, self._value is computed only once and all the other times the precomputed value is returned. So far so good.
Now, let's suppose I want to subclass A with class B. In our case class B will have its own method of computing self._value but it sometimes will need A's value, like in this example:
class B(A):
def __init__(self, a, b):
super().__init__(a, b)
#property
def value(self):
if self._value is not None:
self._value = # <complex code that produces B's version of value>
return self._value
def get_old_value(self):
return super().value # here comes the trouble
Now, clearly the trouble is that if get_old_value() is called before value() it will cache A's value forever. If value() is called before get_old_value() in the same way, get_old_value() will actually always return value().
Of course, one could simply use A's <complex code that produces value>, in the implementation of get_old_value() but that would duplicate code (which would pretty much make subclassing useless) or even wrap <complex code that produces value> inside another method in A and call that method in get_old_value() but this would not use caching at all.
Another way could be the following:
def get_old_value(self):
result = super().value
self._c = None
return result
but that would anyway remove caching for A's version of value and does not look clean at all. Is there any better way to accomplish this?
One thing I want to add is that in my code A and B make really sense as superclass and subclass, otherwise I would consider composition.
What you need to do is use name-mangling -- this will allow each class/subclass to maintain a private version of the variable so they don't clobber each other:
class A(object):
def __init__(self, a, b):
self.a = a
self.b = b
self.__value = None
#property
def value(self):
if self.__value is None:
self.__value = 7
return self.__value
class B(A):
def __init__(self, a, b):
super().__init__(a, b)
self.__value = None
#property
def value(self):
if self.__value is None:
self.__value = 17
return self.__value
def get_old_value(self):
return super().value # no more trouble here
And in use:
>>> b = B(1, 2)
>>> print(b.value)
17
>>> print(b.get_old_value())
7
Please note you now need to set __value in B's __init__ as well.
See also this answer for a couple more tidbits about name-mangling.
Related
I have this code:
class A:
def __init__(self, vals: list):
self._vals = vals
def __len__(self) -> int:
# some side effects like logging maybe
return len(self._vals)
def print_len(self) -> None:
# some function that uses the len above
print(len(self))
class B(A):
def __len__(self) -> int:
return 0
The issue is, I want print_len to always call A.__len__. I can do this:
class A:
def __init__(self, vals: list):
self._vals = vals
def __len__(self) -> int:
return len(self._vals)
def print_len(self) -> None:
print(A.__len__(self))
class B(A):
def __len__(self) -> int:
return 0
But it feels wrong. Basically I want B to lie about __len__ to outside callers, but internally use the correct len specified in A.
So
a = A([1, 2, 3])
print(len(a)) # print 3
a.print_len() # print 3 - no surprises there
b = B([1, 2, 3])
print(len(b)) # print 0 - overload the __len__
b.print_len() # want this to be 3 using A's __len__, not 0 using B's __len__
Is there any way to ensure a class always uses its own version of a method rather than a subclass' version? I thought name mangling of dunder methods would help here.
I think your approach is a good one. The zen of Python states that "There should be one-- and preferably only one --obvious way to do it." and I think you've found it.
That being said, you can do this via name mangling. You just need to prefix the method with double underscores (don't add them to the end like magic methods). This will create a private method which won't ever be overwritten by subclasses.
I think this might be self-defeating since you're now putting the computation in a different method.
class A:
def __init__(self, vals: list):
self._vals = vals
def __len__(self) -> int:
return self.__length()
def __length(self) -> int:
return len(self._vals)
def print_len(self) -> None:
print(self.__length())
I'm having trouble removing objects from a set. What i did was, create a test class and store two variables in it. Its a string variable. I need to store the objects i create into a set and also, any object where (t.a, t.b) is same as (t.b, t.a). Hence, whenever i add tuples into my set, i'm having trouble removing the reverse condition. Is there a way to do this in python?
class Test:
def __init__(self, a, b):
self.a = a
self.b = b
self.variables = [a, b]
def __hash__(self):
return hash((self.a, self.b))
def __eq__(self, other: Test):
return type(self) is type(other) and self.endpoint() == other.endpoint() or
self.endpoint() == other.endpoint()[::-1]
def endpoint(self):
return (self.a, self.b)
T = Test('A','B')
T2 = Test("B",'A")
result = set()
result.add(T)
result.add(T2)
However, result is showing me both the objects in it as opposed to one. Is there a way to fix this? Thanks
I am trying to make a class that has a bunch of children that all have their own respective methods but share common methods through the parent. The problem is I need to create an instance of the child class in the parent method but am not sure how to go about it
my code so far looks like this
def filterAttribute(self, attribute, value):
newlist = []
for thing in self._things:
if thing._attributes[attribute] == value:
newlist.append(thing)
return self.__init__(newlist)
the class constructor takes in a list as its sole argument. Does anyone know if there is a standard way of doing this because my code is returning a NoneType object
Here are a few examples of classes I have made
This is the parent class:
class _DataGroup(object):
def __init__(self, things=None):
self._things=things
def __iter__(self):
for x in self._things:
yield x
def __getitem__(self, key):
return self._things[key]
def __len__(self):
return len(self._things)
def extend(self, datagroup):
if(isinstance(datagroup, self.__class__)):
self._things.extend(datagroup._things)
self._things = list(set(self._things))
def filterAttribute(self, attribute, value):
newlist = []
for thing in self._things:
if thing._attributes[attribute] == value:
newlist.append(thing)
#return self.__init__(newlist)
return self.__init__(newlist)
this is one of the child classes
class _AuthorGroup(_DataGroup):
def __init__(self, things=None):
self._things = things
def getIDs(self):
return [x.id for x in self._things]
def getNames(self):
return [x.name for x in self._things]
def getWDs(self):
return [x.wd for x in self._things]
def getUrns(self):
return [x.urn for x in self._things]
def filterNames(self, names, incl_none=False):
newlist = []
for thing in self._things:
if((thing is not None or (thing is None and incl_none)) and thing.name in names):
newlist.append(thing)
return _AuthorGroup(newlist)
The functionality I am looking for is that I can use the parent class's with the child classes and create instances of the child classes instead of the overall DataGroup parent class
So if I correctly understand what you are trying to accomplish:
You want a Base Class 'DataGroup' which has a set of defined attributes and methods;
You want one or mpore child classes with the ability to inherit both methods and attributes from the base class as well as have the ability to over-ride base class methjods if necessary: and
You want to invoke the child class without also having to manually invoke the base class.
If this in fact is your problem, this is how I would proceed:
Note: I have modified several functions, since I think you have several other issues with your code, for example in the base class self._things is set up as a list, but in the functions get_item and filterAttribute you are assuming self._things is a dictionary structure. I have modified the functions so all assume a dict structure for self._things
class _DataGroup:
def __init__(self, things=None):
if things == None:
self._things = dict() #Sets up default empty dict
else:
self._things=things
def __iter__(self):
for x in self._things.keys():
yield x
def __len__(self):
return len(self._things)
def extend(self, datagroup):
for k, v in datagroup.items():
nv = self._things.pop(k, [])
nv.append(v)
self._things[k] = nv
# This class utilizes the methods and attributes of DataGroup
# and adds new methods, unique to the child class
class AttributeGroup(_DataGroup):
def __init__(self, things=None):
super.__init__(things)
def getIDs(self):
return [x for x in self._things]
def getNames(self):
return [x.name for x in self._things]
def getWDs(self):
return [x.wd for x in self._things]
def getUrns(self):
return [x.urn for x in self._things]
# This class over-rides a DataGroup method and adds new attribute
class NewChild(_DataGroup):
def __init__(self, newAttrib, things = None):
self._newattrib = newAttrib
super.__init__(self, things)
def __len__(self):
return max(len(self._newattrib), len(self._things))
These examples are simplified, since I am not absolutely sure of what you really want.
I very puzzled by the fact that __setattr__ is being called in an attempt to set a class attribute, which has already been set to an instance of another class. Consider the following code:
class A:
def __init__(self):
self.a = 42
def __add__(self, value):
print("Incrementing a of A by {}".format(value))
self.a += value
class B:
def __init__(self):
self.a = A()
self.b = 10
def __setattr__(self, attr, value):
print("Setting {} of B to {}".format(attr, value))
super(B, self).__setattr__(attr, value)
b = B()
print(b.b)
print(b.a)
b.b = 11
b.a += 1
print(b.b)
print(b.a)
When run, the code above produces the following output:
Setting a of B to <__main__.A object at 0x7f5e25438410>
Setting b of B to 10
10
<__main__.A object at 0x7f5e25438410>
Setting b of B to 11
Incrementing a of A by 1
Setting a of B to None
11
None
Obviously, b.a is correctly looked up and incremented. However, after the successful lookup, Python is attempting to create a new attribute of b called a. Why is that happending?
Answering my own question based on Jan Willems' comment:
Having __add__(self, value) return self fixes the described issue despite not being obvious. The documentation for the method (__add__ Documentation ) does not mention that __add__ should return anything when it is successful, which is where my confusion stemmed from.
I'm wondering if there's a way to have construction of a class, A, actually return an instance of one of its subclasses, based on some evaluation done in A.new()? The following doesn't work, but conveys the idea:
class A:
def __new__(self, subclass):
if subclass == "B":
return B()
else:
return C()
class B(A):
def __new__(self):
print("B initialized")
class C(A):
def __new__(self):
print("C initialized")
If I try the above when trying to assign a new instance to a variable, I get None assigned.
>>>x = A("B")
B initialized
>>>print(type(x))
<class 'NoneType'>
>>>print(isinstance(x, B))
False
But I want x to be an instance of B (and A).
In theory, yes, you can do this. But in practice, it's going to be fiddly and an alternative constructor is a much better way to do this rather than making it a feature of the base class __new__ method.
First off, the reason your current code doesn't work is that your __new__ methods in B and C don't do what they need to do, which is create an instance of those subclasses and return it. Unfortunately fixing them is not entirely trivial, since the natural way to create an instance (by calling super().__new__) won't work because A.__new__ is already involved in the process. You can bypass it, or maybe do an alternative approach.
Here's a mostly conventional way to do things, but with __new__ methods in the subclasses that bypass the base class to construct instances of themselves.
class A:
def __new__(cls, subclass):
if subclass == "B":
return B()
else:
return C()
class B(A):
def __new__(cls):
print("B initialized")
return object.__new__(cls) # we need to bypass A.__new__ here, so no super()
class C(A):
def __new__(cls):
print("C initialized")
return object.__new__(cls) # here too
An alternative might be to make the subclasses not do anything with regard to construction (no __new__ methods, only __init__), and for A.__new__ to take care of creating the subclass instances directly. This is a bit awkward though because the __init__ method of the subclasses needs to accept the same arguments as the parent class __new__ method, even though it probably doesn't need to be told that it was the chosen subclass:
class A:
def __new__(cls, subclass):
if subclass == "B":
cls = B
else:
cls = C
return super().__new__(cls) # directly build the subclass instances ourself
class B(A):
def __init__(self, subclass): # need to take subclass here, not anything else
super().__init__()
print("B initialized")
class C(A):
def __init__(self, subclass):
super().__init__()
print("C initialized")
A much nicer approach would be to make the normal creation process of your classes work normally, and add a separate, alternative constructor as a classmethod. That way it won't get in the way of the normal process of inheritance of __new__ and __init__.
class A:
#classmethod
def construct_subclass(cls, subclass): # alternative constructor
if subclass == "B":
return B(1, 2)
else:
return C("foo")
# we could have an __init__ or __new__ method too, but it would operate as normal
class B(A):
def __init__(self, x, y): # these methods now can take alternative arguments
super().__init__()
self.x = x
self.y = y
print("B initialized:", x, y)
class C(A):
def __init__(self, foo):
super().__init__()
self.foo = foo
print("C initialized:", foo)