Im currently working with a small project where I have done a proxy converter format as well as availability of proxies that are being in use.
Currently I have ended up doing something like this:
import random
import threading
import time
from typing import Dict
from loguru import logger
class ProxyManager(object):
def __init__(self, proxy_file_path=None):
self.proxies = self.load_proxies(proxy_file_path) if proxy_file_path else {}
#staticmethod
def load_proxies(proxy_file_path):
with open(proxy_file_path) as proxy_file:
proxies = {'https': [], 'http': []}
for proxy in proxy_file.readlines():
if proxy.count(':') > 1:
ip, port, username, password = proxy.rstrip().split(':')
url = f'http://{ip}:{port}:{username}#{password}'
else:
ip, port = proxy.rstrip().split(':')
url = f'http://{ip}:{port}'
proxies['https'].append(url)
return proxies
def random_proxy_https(self):
return random.choice(self.proxies['https'])
def all_proxies_https(self):
return [proxy for proxy in self.proxies['https']]
# -------------------------------------------------------------------------
# Proxies path
# -------------------------------------------------------------------------
proxies: Dict[str, ProxyManager] = {
"rotating": ProxyManager("./proxies/rotating.txt"),
"test": ProxyManager("./proxies/test.txt")
}
# -------------------------------------------------------------------------
# Proxies available
# -------------------------------------------------------------------------
all_proxies = proxies["rotating"].all_proxies_https()
proxy_dict = dict(zip(all_proxies, ['available'] * len(all_proxies)))
proxy_lock: threading = threading.Lock()
# -------------------------------------------------------------------------
# Initialize availability of proxies
# -------------------------------------------------------------------------
class AvailableProxies:
def __enter__(self):
proxy_lock.acquire()
self.proxy = None
while not self.proxy:
available = [
att for att, value in proxy_dict.items() if "available" in value
]
if available:
self.proxy = random.choice(available)
proxy_dict[self.proxy] = "busy"
break
else:
logger.info("Waiting ... no proxies available")
time.sleep(.2)
proxy_lock.release()
return self.proxy
def __exit__(self, exc_type, exc_val, exc_tb):
proxy_dict[self.proxy] = "available"
The idea is that I will have mulitple scripts that will be needing the ProxyManager class but not the AvailableProxies - That means that I do not need to set the variables
# -------------------------------------------------------------------------
# Proxies available
# -------------------------------------------------------------------------
all_proxies = proxies["rotating"].all_proxies_https()
proxy_dict = dict(zip(all_proxies, ['available'] * len(all_proxies)))
proxy_lock: threading = threading.Lock()
if specific script does not require the class. I wonder if its possible to set the variables inside the class AvailableProxies and that the variables all_proxies, proxy_dict & proxy_lock should be only set once inside the class when we import the AvailableProxies into a script?
Related
How to run all tests in serial per host while each host is in a single thread.
maybe some code will explain what I'm trying to do.
conftest.py
from paramiko import SSHClient, AutoAddPolicy
from shared import Host, HOSTS
from pytest import fixture
from typing import Dict
FMT = '[%(name)s %(levelname)-7s(%(lineno)-4d)][%(threadName)s] %(message)s'
#fixture(scope="session")
def _ssh_con(request) -> Dict[Host, SSHClient]:
print('>>>>>>>> setup')
cons: Dict[Host, SSHClient] = {}
for item in request.session.items:
host = item.callspec.params.get("host")
if host not in cons:
con = SSHClient()
con.set_missing_host_key_policy(AutoAddPolicy())
con.connect(host.ip, host.port, host.username, host.password)
cons[host] = con
print('>>>>>>>> setup done')
yield cons
print('<<<<<<<<<< teardown')
for value in cons.values():
value.close()
print('<<<<<<<<<< teardown done')
#fixture(autouse=True)
def ssh(host: Host, _ssh_con: Dict[Host, SSHClient], logger) -> SSHClient:
rp_logger.info(f'yield {host}')
yield _ssh_con[host]
def pytest_generate_tests(metafunc: Metafunc):
metafunc.parametrize('host', HOSTS, ids=str)
#fixture(scope="session")
def logger() -> logging.Logger:
logger = logging.getLogger('Tester')
logger.setLevel(logging.DEBUG)
fmt = logging.Formatter(FMT)
hdlr = logging.StreamHandler()
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return logger
shared.py
from dataclasses import dataclass, field
from typing import List
#dataclass()
class Host:
name: str
ip: str
port: int = field(repr=False, default=22)
username: str = 'myusername'
password: str = field(repr=False, default='myuserpassowrd')
def __hash__(self):
return hash(self.ip)
def __str__(self):
return self.name
HOSTS: List[Host] = [
Host('Host-1', '192.168.195.1'),
Host('Host-2', '192.168.195.2'),
Host('Host-3', '192.168.195.3'),
]
tests.py
from time import sleep
from paramiko import SSHClient
from shared import Host, HOSTS
SLEEP_TIME = 2
def test_ip(ssh: SSHClient, host: Host, logger):
logger.info(f"test_ip[{host}][{ssh}]")
command = "ip a s ens33 | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1"
sleep(SLEEP_TIME)
_, stdout, _ = ssh.exec_command(command)
output = stdout.read().decode('utf8').strip()
assert output == host.ip, "got un expected IP or didn't get any IP"
def test_machine_name(host: Host, ssh: SSHClient, logger):
logger.info(f"test_machine_name[{host}][{ssh}]")
command = f"ls /tmp | grep {host.name}"
sleep(SLEEP_TIME)
_, stdout, _ = ssh.exec_command(command)
output = stdout.read().decode('utf8').strip()
assert output, "didn't find file with host name"
What I want to achieve is the following:
Create all ssh connections for the session
start pytest_runtestloop
start Thread-1, Thread-2, Thread-3
for each thread start all tests in sequential order
Teardown all ssh connections for the session
I tried to use pytest-parallel and pytest-xdist (which doesn't fit my use case)
I also tried to write my own plugin, but I'm not able to get it right.
In the log output, I get that the thread name is MainThread _/(**)\_
from concurrent.futures import Future, ThreadPoolExecutor, wait
from typing import Dict, List
from shared import Host
WITH_SUB_THREAD = True
def _run_hosts_tests(session, host, tests):
if WITH_SUB_THREAD:
with ThreadPoolExecutor(1, f"Worker_{host}") as executor:
for test_idx in tests:
item = session.items[test_idx]
executor.submit(item.runtest)
else:
for test_idx in tests:
item = session.items[test_idx]
item.runtest()
def _done_callback(future: Future):
try:
result = future.result()
print(f"[\033[92;1mOK\033[0m] {result}")
return result
except Exception as e:
print(f"[\033[91;1mERR\033[0m] {e}")
raise e
class ParallelRunner:
def __init__(self):
self._tests_mapper: Dict[Host, List[int]] = dict()
def pytest_collection_finish(self, session):
for idx, item in enumerate(session.items):
host = item.callspec.getparam('host')
if host not in self._tests_mapper:
self._tests_mapper[host] = []
self._tests_mapper[host].append(idx)
def pytest_runtestloop(self, session):
if (
session.testsfailed
and not session.config.option.continue_on_collection_errors
):
raise session.Interrupted(
"%d error%s during collection"
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
)
if session.config.option.collectonly:
return True
with ThreadPoolExecutor(len(self._tests_mapper), 'Worker') as executor:
for host, tests in self._tests_mapper.items():
executor.submit(_run_hosts_tests, session, host, tests)\
.add_done_callback(_done_callback)
The answer was it set the connection in a global variable,
although I'm sure that there is a better solution, in the meantime I will put this workaround here.
conftest.py
from threading import Lock
_cons: Dict[Host, SSHClient] = dict()
#fixture(scope="session")
def _ssh_con(request: SubRequest) -> Dict[Host, SSHClient]:
mutex.acquire()
global _cons
if not _cons:
for item in request.session.items:
host = item.callspec.params.get("host")
if host not in _cons:
con = SSHClient()
con.set_missing_host_key_policy(AutoAddPolicy())
con.connect(host.ip, host.port, host.username, host.password)
_cons[host] = con
mutex.release()
return _cons
def pytest_sessionfinish(session, exitstatus):
mutex.acquire()
global _cons
if _cons:
for value in _cons.values():
value.close()
mutex.release()
How to call a variable within a function:
What is the Right approach:
Should define the variable before class or
We can define it under the class or function
Sample Code:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
timeStamp = time.strftime("%Y%m%d%H%M%S") # <-- Is this right Approach
class Scanner:
INFO = 0
DEBUG = 3
timeStamp = time.strftime("%Y%m%d%H%M%S") # <-- ?
def __init__(self, config_file, verbose=False):
""" Constructor """
def ask_passwords(self):
def ldap_init(self):
def hosts_module_scanner(self):
def users_module_scanner(self):
def ldap_reporting(self, user_list):
self.write_report(failed_users, "users_ldap_report-{}.txt".format(timeStamp))
def option_parser(prog_version):
if __name__ == '__main__':
scanner.ask_passwords()
scanner.ldap_init()
scanner.hosts_module_scanner()
scanner.users_module_scanner()
Note: In the above example it doesn't work if i define under class.
You can call variables within a class, using the syntax:
self.variable_name = # whatever you're assigning the variable.
As for where abouts in the class, your best bet is within the def init_() bit.
edit:
As a more verbose answer. you'll define the variables in the init method as shown below. Using self.variable in a class, but outside the methods (class functions) will throw a "self is not defined" error.
class Scanner:
def __init__(self, config_file, verbose=False):
""" Constructor """
self.INFO = 0
self.DEBUG = 3
self.timeStamp = time.strftime("%Y%m%d%H%M%S")
#declare rest of variables here
#other class methods go here
I'm trying to use shioaji API to create a Taiwan stock market trading application.
However, I found a strange behavior during the developement.
Here's my code:
import tkinter as tk
import os
import shioaji as sj
from shioaji import BidAskFOPv1, Exchange
from dotenv import load_dotenv
class TouchOrderBuy:
def __init__(self, api, contract):
print(f"{contract.symbol} TouchOrder init ...")
self.api = api
self.contract = contract
self.is_triggered = False
self.api.quote.subscribe(
contract=self.contract,
quote_type=sj.constant.QuoteType.BidAsk,
version=sj.constant.QuoteVersion.v1
)
print(f"self.quote_bidask_callback address: {hex(id(self.quote_bidask_callback))}")
self.api.quote.set_on_bidask_fop_v1_callback(self.quote_bidask_callback)
def quote_bidask_callback(self, exchange: Exchange, bidask: BidAskFOPv1):
print(f"{bidask.code} quote_bidask_callback")
print(f"self.is_triggered: {self.is_triggered}")
print(f"self.is_triggered address: {hex(id(self.is_triggered))}")
if bidask.code == 'TXO17500C2':
print(f"set self.is_triggered as True")
self.is_triggered = True
class TradingApp:
def __init__(self):
self.main_window = tk.Tk()
self.main_window.wm_title('test subscription')
self.initialize()
self.crate_gui()
self.main_window.mainloop()
def initialize(self):
self.api = sj.Shioaji(simulation=False)
login_info = self.api.login(
person_id=os.getenv('SHIOAJI_USERID'),
passwd=os.getenv("SHIOAJI_PASSWORD"),
contracts_timeout=10000,
contracts_cb=print,
fetch_contract=True
)
print('Login Done')
self.api.set_default_account(self.api.list_accounts()[2])
resp = self.api.activate_ca(
ca_path="Sinopac.pfx",
ca_passwd=os.getenv('CA_PASSWORD'),
person_id=os.getenv('SHIOAJI_USERID'),
)
def crate_gui(self):
sub_put_button = tk.Button(self.main_window, text='subs put', command=self.subscribe_put)
sub_put_button.pack()
sub_call_button = tk.Button(self.main_window, text='subs call', command=self.subscribe_call)
sub_call_button.pack()
def subscribe_call(self):
t_call = TouchOrderBuy(self.api, self.api.Contracts.Options.TXO.TXO202203017500C)
def subscribe_put(self):
t_put = TouchOrderBuy(self.api, self.api.Contracts.Options.TXO.TXO202203017500P)
if __name__ == '__main__':
load_dotenv()
app = TradingApp()
As you can see from the above code, I can create 2 TouchOrderBuy objects by first clicking subs put button, then clicking subs call button.
I found that when TouchOrderBuy's self.is_triggered of t_call becomes True, TouchOrderBuy's self.is_triggered of t_put also becomes True.
Why does this happen?
Here's a snippet of the output log:
According to this article, the contents of an instance variable are completely independent from one object instance to the other.
I tried to create a minimum reproducible code without using shioaji, but without success. I feel sorry about that.
Another question I have is why do t_put's and t_call's is_triggered variables refer to the same memory address.
I guess it's because that python has something similar to integer cache for boolean variables.
I also created another version of code to test if self.is_triggered is shared between t_put and t_call:
import tkinter as tk
import os
import shioaji as sj
from shioaji import BidAskFOPv1, Exchange
from dotenv import load_dotenv
class TouchOrderBuy:
def __init__(self, api, contract):
print(f"{contract.symbol} TouchOrder init ...")
self.api = api
self.contract = contract
self.is_triggered = False
def get_is_triggered(self):
print(f"is_triggered address: {hex(id(self.is_triggered))}")
return self.is_triggered
def set_is_triggered(self, value):
self.is_triggered = value
if __name__ == '__main__':
load_dotenv()
api = sj.Shioaji(simulation=False)
login_info = api.login(
person_id=os.getenv('SHIOAJI_USERID'),
passwd=os.getenv("SHIOAJI_PASSWORD"),
contracts_timeout=10000,
contracts_cb=print,
fetch_contract=True
)
print('Login Done')
api.set_default_account(api.list_accounts()[2])
resp = api.activate_ca(
ca_path="Sinopac.pfx",
ca_passwd=os.getenv('CA_PASSWORD'),
person_id=os.getenv('SHIOAJI_USERID'),
)
t_put = TouchOrderBuy(api, api.Contracts.Options.TXO.TXO202203017500P)
t_call = TouchOrderBuy(api, api.Contracts.Options.TXO.TXO202203017500C)
print(f"put is_triggered: {t_put.get_is_triggered()}")
print(f"call is_triggered: {t_call.get_is_triggered()}")
t_call.set_is_triggered(True)
print(f"put is_triggered: {t_put.get_is_triggered()}")
print(f"call is_triggered: {t_call.get_is_triggered()}")
In this version, when t_call's self.is_triggered is chagned, t_put's self.is_triggered does not change.
Here's its output:
TXO202203017500P TouchOrder init ...
TXO202203017500C TouchOrder init ...
is_triggered address: 0x7ffb2a9fa970
put is_triggered: False
is_triggered address: 0x7ffb2a9fa970
call is_triggered: False
is_triggered address: 0x7ffb2a9fa970
put is_triggered: False
is_triggered address: 0x7ffb2a9fa950
call is_triggered: True
Why does this version of code not have a weird behavior?
When executing the spider data is extracted from the page but when the pipeline starts something goes wrong... I get the following error:
Traceback (most recent call last):
File "C:\Users\EAgnelli\AppData\Local\Continuum\anaconda3\envs\tensorflow\lib\site-packages\twisted\internet\defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
TypeError: close_spider() missing 1 required positional argument: 'reason'
I'm sending the request through Scrapy Splash to execute some java on the page and then extracting the links information... however this is the first time I get this error.
This is my spider
import scrapy
import scrapy_splash
from scrapy.linkextractors import LinkExtractor
from cointelegraph_spider.items import CointelegraphSpiderItem
import sqlite3 as sq3
class CointelegraphspiderSpider(scrapy.Spider):
name = 'cointelegraphspider'
allowed_domains = ['cointelegraph.com']
start_urls = ['http://cointelegraph.com/']
def start_requests(self):
"""
Doc string
"""
# Execute the LUA script for "Load Mor" button
script = """
function main(splash, args)
assert(splash:go(args.url))
splash:wait(0.5)
local num_clicks = 2
local delay = 1.5
local load_more = splash:jsfunc(
[[
function ()
{
var el = document.getElementsByClassName('post-preview-list-navigation__btn post-preview-list-navigation__btn_load-more');
el[0].click();
}
]]
)
for _ = 1, num_clicks do
load_more()
splash:wait(delay)
end
return
{
html = splash:html(),
}
end
"""
for url in self.start_urls:
yield scrapy_splash.SplashRequest(
url=url,
callback=self.parse_main_page,
args={
'wait':3,
'lua_source':script,
#'timeout': 3600 # Here the max-timeout is 60 -- to increase it launch the docker with --max-timeout xxxxx
},
endpoint="execute",
)
def parse_main_page(self, response):
"""
Doc string
"""
# Convert Splash response into html response object
html = scrapy.Selector(response)
# Check DB for existing records
conn = sq3.connect("D:\\DCC\\Projects\\crypto_projects\\master_data.db")
db_links = conn.execute("select link from cointelegraph").fetchall() # list of tuples
db_links = [elem[0] for elem in db_links] # flattening list
print("DB LINKS! ", db_links)
#db_links = ["aaa",]
conn.close() # close connection
# Extract all links to be followed
news_links = LinkExtractor(restrict_xpaths=['//ul[#class="post-preview-list-cards"]/li/div/article/a', # Main Body
'//div[#class="main-news-tabs__wrp"]/ul/li/div/a'] # "Editor's Choice" & "Hot Stories"
).extract_links(html.response)
for link in news_links[:2]:
# Follow only new links
if link.url not in db_links:
yield scrapy.Request(link.url, callback=self.parse_article)
def parse_article(self, response):
"""
Doc string
"""
# Create Item for Pipeline
item = CointelegraphSpiderItem()
item['author'] = response.xpath('//div[#class="name"]/a/text()').extract_first().strip()
item['timestamp'] = response.xpath('//div/#datetime').extract_first().split('t')[0] # %Y-%m-%d
item['title'] = response.xpath('//h1[#class="header"]/text()').extract_first().strip()
item['body'] = ' '.join(response.xpath('//div[#class="post-full-text contents js-post-full-text"]/p//text()').extract())
item['quotes'] = ';;;'.join(response.xpath('//div[#class="post-full-text contents js-post-full-text"]/blockquote//text()').extract())
item['int_links'] = ';;;'.join(response.xpath('//div[#class="post-full-text contents js-post-full-text"]/p/a/#href').extract())
_tmp = [elem.replace('#','') for elem in response.xpath('//div[#class="tags"]/ul/li/a/text()').extract()]
item['tags'] = ';;;'.join([elem.replace(' ','') for elem in _tmp])
item['link'] = response.url
item['news_id'] = str(hash(item['link']))
yield item
This is my pipeline
import sqlite3 as sq3
import sqlite3_functions as sq_f
import logging
from scrapy.exceptions import DropItem
class CointelegraphSpiderPipeline(object):
"""
Doc string
"""
def __init__(self, stats):
"""
Doc string
"""
self.stats = stats
self.db_file = 'D:\\DCC\\Projects\\crypto_projects\\master_data.db'
self.conn = sq3.connect(self.db_file)
self.table_name = 'cointelegraph'
self.commit_counter = 0
#classmethod
def from_crawler(cls, crawler):
"""
Doc string
"""
stats = crawler.stats
return stats #cls(crawler.stats)
def open_spider(self, spider):
"""
Doc string
"""
print("I'm starting the pipeline")
logging.INFO("Starting Pipeline...")
def process_item(self, item, spider):
"""
Doc string
"""
item_checked = True
try:
# Sanity Check
for key, value in item.items():
print("Inside the loop!!!")
if value == '':
item_checked = False
raise DropItem("Item '{0}:{1}' has empty data - Link: {3}".format(key, value, item['link']))
else:
logging.INFO("Item check OK")
item_checked = True
# Insert row and increase counter
if item_checked:
self.conn = sq_f.insert_row(self.db_file, table_name=self.table_name, conn=self.conn, **item)
self.commit_counter += 1
self.conn.commit()
# Commit every 500 inserted rows
if self.commit_counter % 500 == 0:
self.conn.commit()
print(item)
except Exception as e:
logging.WARNING(e)
def close_spider(self, spider):
"""
Doc string
"""
logging.INFO("Commiting rows...")
self.conn.commit()
logging.INFO("Saving spider stats...")
print(self.stats.get_stats())
logging.INFO("Closing pipeline..")
self.conn.close()
and my Settings:
BOT_NAME = 'cointelegraph_spider'
SPIDER_MODULES = ['cointelegraph_spider.spiders']
NEWSPIDER_MODULE = 'cointelegraph_spider.spiders'
# Splash Settings
SPLASH_URL = 'http://localhost:8050'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3699.0 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
#'cointelegraph_spider.middlewares.CointelegraphSpiderSpiderMiddleware': 543,
}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
#'cointelegraph_spider.middlewares.CointelegraphSpiderDownloaderMiddleware': 543,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'cointelegraph_spider.pipelines.CointelegraphSpiderPipeline': 300,
}
While scrapy Pipeline is expected to have close_spider(self, spider) method the actual signal callback is expected to be close_spider(self, spider, reason).
Something in your code changed the pipeline's close_spider method to be a direct signal callback. You can fix that by adjusting your method signature to include reason:
def close_spider(self, spider, reason):
pass
See signals documentation on spider_closed
And scrapy Pipeline.close_spider
I'm trying to write blender addons that it can create custom nodes and we can operate them as normal blender builin nodes, especially for grouping them together, in my options it is a very import function, but now the code can add custom node, but grouping still does not work, so any tips?
#
# architect.py -- the blender addon
#
import bpy
import nodeitems_utils
from nodeitems_utils import NodeCategory, NodeItem, NodeItemCustom
from bpy.types import NodeTree, ShaderNodeTree, Node, NodeGroup, NodeCustomGroup, NodeSocket
bl_info = {
"name": "Architect",
"author": "Lei Liu",
"category": "Node"}
class ArchitectEngine(bpy.types.RenderEngine):
bl_idname = 'ARCHITECT_RENDER'
bl_label = "Architect"
bl_use_preview = False
bl_use_shading_nodes = False
bl_use_exclude_layers = False
bl_use_save_buffers = False
draw_callbacks = {}
def __init__(self):
self.session = None
pass
def __del__(self):
pass
# main scene render
def update(self, data, scene):
pass
def render(self, scene):
pass
class ArchitectNodeTree(ShaderNodeTree):
bl_idname = 'ArchitectNodeTree'
bl_label = 'Architect Node Tree'
bl_icon = 'NODETREE'
nodetypes = {}
pass
#classmethod
def poll(cls, context):
return context.scene.render.engine == 'ARCHITECT_RENDER'
class ArchitectNodeGroup(NodeCustomGroup):
bl_idname = 'ArchitectNodeGroup'
bl_label = 'Architect Node Group'
node_tree = ArchitectNodeTree
#classmethod
def poll(cls, context):
return context.scene.render.engine == 'ARCHITECT_RENDER'
# Custom socket type
class ArchitectSocket(NodeSocket):
# Description string
'''Architect node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ArchitectSocketType'
# Label for nice name display
bl_label = 'Architect Node Socket'
# Enum items list
my_items = [
("DOWN", "Down", "Where your feet are"),
("UP", "Up", "Where your head should be"),
("LEFT", "Left", "Not right"),
("RIGHT", "Right", "Not left")
]
myEnumProperty = bpy.props.EnumProperty(name="Direction", description="Just an example", items=my_items, default='UP')
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "myEnumProperty", text=text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 0.5)
class ArchitectTreeNode:
#classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'ArchitectNodeTree'
class DemoNode(Node, ArchitectTreeNode):
bl_idname = 'DemoNodeType'
bl_label = 'Demo Node'
bl_icon = 'SOUND'
typename = 'DemoNodeType'
# === Custom Properties ===
# These work just like custom properties in ID data blocks
# Extensive information can be found under
# http://wiki.blender.org/index.php/Doc:2.6/Manual/Extensions/Python/Properties
myStringProperty = bpy.props.StringProperty()
myFloatProperty = bpy.props.FloatProperty(default=3.1415926)
# === Optional Functions ===
# Initialization function, called when a new node is created.
# This is the most common place to create the sockets for a node, as shown below.
# NOTE: this is not the same as the standard __init__ function in Python, which is
# a purely internal Python method and unknown to the node system!
def init(self, context):
self.inputs.new('ArchitectSocketType', "Hello")
self.inputs.new('NodeSocketFloat', "World")
self.inputs.new('NodeSocketVector', "!")
self.outputs.new('NodeSocketColor', "How")
self.outputs.new('NodeSocketColor', "are")
self.outputs.new('NodeSocketFloat', "you")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("Node settings")
layout.prop(self, "myFloatProperty")
# Detail buttons in the sidebar.
# If this function is not defined, the draw_buttons function is used instead
def draw_buttons_ext(self, context, layout):
layout.prop(self, "myFloatProperty")
# myStringProperty button will only be visible in the sidebar
layout.prop(self, "myStringProperty")
# Optional: custom label
# Explicit user label overrides this, but here we can define a label dynamically
def draw_label(self):
return "I am a custom node"
class ArchitectNodeCategory(NodeCategory):
#classmethod
def poll(cls, context):
return (context.space_data.tree_type == 'ArchitectNodeTree')
# menu entry for node group tools
def group_tools_draw(self, layout, context):
layout.operator("node.group_make")
layout.operator("node.group_ungroup")
layout.separator()
# maps node tree type to group node type
node_tree_group_type = {
'CompositorNodeTree': 'CompositorNodeGroup',
'ShaderNodeTree': 'ShaderNodeGroup',
'TextureNodeTree': 'TextureNodeGroup',
'ArchitectNodeTree': 'ArchitectNodeGroup',
}
# generic node group items generator for shader, compositor and texture node groups
def node_group_items(context):
if context is None:
return
space = context.space_data
if not space:
return
ntree = space.edit_tree
if not ntree:
return
yield NodeItemCustom(draw=group_tools_draw)
def contains_group(nodetree, group):
if nodetree == group:
return True
else:
for node in nodetree.nodes:
if node.bl_idname in node_tree_group_type.values() and node.node_tree is not None:
if contains_group(node.node_tree, group):
return True
return False
for group in context.blend_data.node_groups:
if group.bl_idname != ntree.bl_idname:
continue
# filter out recursive groups
if contains_group(group, ntree):
continue
yield NodeItem(node_tree_group_type[group.bl_idname],
group.name,
{"node_tree": "bpy.data.node_groups[%r]" % group.name})
# only show input/output nodes inside node groups
def group_input_output_item_poll(context):
return False
architect_node_categories = [
ArchitectNodeCategory("ARCH_DEMO", "Demo", items=[
NodeItem("DemoNodeType"),
]),
ArchitectNodeCategory("ARCH_INPUT", "Input", items=[
NodeItem("TextureNodeCurveTime"),
NodeItem("TextureNodeCoordinates"),
NodeItem("TextureNodeTexture"),
NodeItem("TextureNodeImage"),
NodeItem("NodeGroupInput", poll=group_input_output_item_poll),
]),
ArchitectNodeCategory("ARCH_OUTPUT", "Output", items=[
NodeItem("NodeGroupOutput", poll=group_input_output_item_poll),
]),
ArchitectNodeCategory("ARCH_GROUP", "Group", items=node_group_items),
ArchitectNodeCategory("ARCH_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
def register():
bpy.utils.register_class(ArchitectNodeTree)
bpy.utils.register_class(ArchitectNodeGroup)
bpy.utils.register_class(DemoNode)
nodeitems_utils.register_node_categories('ARCHITECT', architect_node_categories)
bpy.utils.register_module(__name__)
pass
def unregister():
nodeitems_utils.unregister_node_categories('ARCHITECT')
bpy.utils.unregister_class(ArchitectNodeGroup)
bpy.utils.unregister_class(ArchitectNodeTree)
bpy.utils.unregister_class(DemoNode)
bpy.utils.unregister_module(__name__)
pass
if __name__ == "__main__":
register()
Creating and editing node groups in a custom node tree needs to be implemented by the code that defines the custom node tree.
See this question for an example of implementing custom node groups.