I need to disable parallel run for a single target. It is a test that verifies if program doesn't create some random or incorrectly named files. Any other file that is build in the meantime fails this test.
I found this advice on SCons FAQ:
Use the SideEffect() method and specify the same dummy file for each target that shouldn't be built in parallel. Even if the file doesn't exist, SCons will prevent the simultaneous execution of commands that affect the dummy file. See the linked method page for examples.
However, this is useless, as it would prevent parallel build of any two targets not only the test script.
Is there any way to prevent parallel build of one target while allowing it for all others?
We discussed this in the scons discord, and came up with an example which will setup synchronous test runners which will make sure no other tasks are running when the test is run.
This is the example SConstruct from the github example repo:
import SCons
# A bound map of stream (as in stream of work) name to side-effect
# file. Since SCons will not allow tasks with a shared side-effect
# to execute concurrently, this gives us a way to limit link jobs
# independently of overall SCons concurrency.
node_map = dict()
# A list of nodes that have to be run synchronously.
# sync node ensures the test runners are syncrhonous amongst
# themselves.
sync_nodes = list()
# this emitter will make a phony sideeffect per target
# the test builders will share all the other sideeffects making
# sure the tests only run when nothing else is running.
def sync_se_emitter(target, source, env):
name = str(target[0])
se_name = "#unique_node_" + str(hash(name))
se_node = node_map.get(se_name, None)
if not se_node:
se_node = env.Entry(se_name)
# This may not be necessary, but why chance it
env.NoCache(se_node)
node_map[se_name] = se_node
for sync_node in sync_nodes:
env.SideEffect(se_name, sync_node)
env.SideEffect(se_node, target)
return (target, source)
# here we force all builders to use the emitter, so all
# targets will respect the shared sideeffect when being built.
# NOTE: that the builders which should be synchronous must be listed
# by name, as SynchronousTestRunner is in this example
original_create_nodes = SCons.Builder.BuilderBase._create_nodes
def always_emitter_create_nodes(self, env, target = None, source = None):
if self.get_name(env) != "SynchronousTestRunner":
if self.emitter:
self.emitter = SCons.Builder.ListEmitter([self.emitter, sync_se_emitter])
else:
self.emitter = SCons.Builder.ListEmitter([sync_se_emitter])
return original_create_nodes(self, env, target, source)
SCons.Builder.BuilderBase._create_nodes = always_emitter_create_nodes
env = Environment()
env.Tool('textfile')
nodes = []
# this is a fake test runner which acts like its running a test
env['BUILDERS']["SynchronousTestRunner"] = SCons.Builder.Builder(
action=SCons.Action.Action([
"sleep 1",
"echo Starting test $TARGET",
"sleep 5",
"echo Finished test $TARGET",
'echo done > $TARGET'],
None))
# this emitter connects the test runners with the shared sideeffect
def sync_test_emitter(target, source, env):
for name in node_map:
env.SideEffect(name, target)
sync_nodes.append(target)
return (target, source)
env['BUILDERS']["SynchronousTestRunner"].emitter = SCons.Builder.ListEmitter([sync_test_emitter])
# in this test we create two test runners and make them depend on various source files
# being generated. This is just to force the tests to be run in the middle of
# the build. This will allow the example to demonstrate that all other jobs
# have paused so the test can be performed.
env.SynchronousTestRunner("test.out", "source10.c")
env.SynchronousTestRunner("test2.out", "source62.c")
for i in range(50):
nodes.append(env.Textfile(f"source{i}.c", f"int func{i}(){{return {i};}}"))
for i in range(50, 76):
node = env.Textfile(f"source{i}.c", f"int func{i}(){{return {i};}}")
env.Depends(node, "test.out")
nodes.append(node)
for i in range(76, 100):
node = env.Textfile(f"source{i}.c", f"int func{i}(){{return {i};}}")
env.Depends(node, "test2.out")
nodes.append(node)
nodes.append(env.Textfile('main.c', 'int main(){return 0;}'))
env.Program('out', nodes)
This solution is based in dmoody256's answer.
The underlying concept is the same but the code should be easier to use and it's ready to be put in the site_scons directory to not obfuscate SConstruct itself.
site_scons/site_init.py:
# Allows using functions `SyncBuilder` and `Environment.SyncCommand`.
from SyncBuild import SyncBuilder
site_scons/SyncBuild.py:
from SCons.Builder import Builder, BuilderBase, ListEmitter
from SCons.Environment import Base as BaseEnvironment
# This code allows to build some targets synchronously, which means there won't
# be anything else built at the same time even if SCons is run with flag `-j`.
#
# This is achieved by adding a different dummy values as side effect of each
# target. (These files won't be created. They are only a way of enforcing
# constraints on SCons.)
# Then the files that need to be built synchronously have added every dummy
# value from the entire configuration as a side effect, which effectively
# prevents it from being built along with any other file.
#
# To create a synchronous target use `SyncBuilder`.
__processed_targets = set()
__lock_values = []
__synchronous_nodes = []
def __add_emiter_to_builder(builder, emitter):
if builder.emitter:
if isinstance(builder.emitter, ListEmitter):
if not any(x is emitter for x in builder.emitter):
builder.emitter.append(emitter)
else:
builder.emitter = ListEmitter([builder.emitter, emitter])
else:
builder.emitter = ListEmitter([emitter])
def __individual_sync_locks_emiter(target, source, env):
if not target or target[0] not in __processed_targets:
lock_value = env.Value(f'.#sync_lock_{len(__lock_values)}#')
env.NoCache(lock_value)
env.SideEffect(lock_value, target + __synchronous_nodes)
__processed_targets.update(target)
__lock_values.append(lock_value)
return target, source
__original_create_nodes = BuilderBase._create_nodes
def __create_nodes_adding_emiter(self, *args, **kwargs):
__add_emiter_to_builder(self, __individual_sync_locks_emiter)
return __original_create_nodes(self, *args, **kwargs)
BuilderBase._create_nodes = __create_nodes_adding_emiter
def _all_sync_locks_emitter(target, source, env):
env.SideEffect(__lock_values, target)
__synchronous_nodes.append(target)
return (target, source)
def SyncBuilder(*args, **kwargs):
"""It works like the normal `Builder` except it prevents the targets from
being built at the same time as any other target."""
target = Builder(*args, **kwargs)
__add_emiter_to_builder(target, _all_sync_locks_emitter)
return target
def __SyncBuilder(self, *args, **kwargs):
"""It works like the normal `Builder` except it prevents the targets from
being built at the same time as any other target."""
target = self.Builder(*args, **kwargs)
__add_emiter_to_builder(target, _all_sync_locks_emitter)
return target
BaseEnvironment.SyncBuilder = __SyncBuilder
def __SyncCommand(self, *args, **kwargs):
"""It works like the normal `Command` except it prevents the targets from
being built at the same time as any other target."""
target = self.Command(*args, **kwargs)
_all_sync_locks_emitter(target, [], self)
return target
BaseEnvironment.SyncCommand = __SyncCommand
SConstruct (this is adapted dmoody256's test that does the same thing as the original):
env = Environment()
env.Tool('textfile')
nodes = []
# this is a fake test runner which acts like its running a test
env['BUILDERS']["SynchronousTestRunner"] = SyncBuilder(
action=Action([
"sleep 1",
"echo Starting test $TARGET",
"sleep 5",
"echo Finished test $TARGET",
'echo done > $TARGET'],
None))
# in this test we create two test runners and make them depend on various source files
# being generated. This is just to force the tests to be run in the middle of
# the build. This will allow the example to demonstrate that all other jobs
# have paused so the test can be performed.
env.SynchronousTestRunner("test.out", "source10.c")
env.SynchronousTestRunner("test2.out", "source62.c")
for i in range(50):
nodes.append(env.Textfile(f"source{i}.c", f"int func{i}(){{return {i};}}"))
for i in range(50, 76):
node = env.Textfile(f"source{i}.c", f"int func{i}(){{return {i};}}")
env.Depends(node, "test.out")
nodes.append(node)
for i in range(76, 100):
node = env.Textfile(f"source{i}.c", f"int func{i}(){{return {i};}}")
env.Depends(node, "test2.out")
nodes.append(node)
nodes.append(env.Textfile('main.c', 'int main(){return 0;}'))
env.Program('out', nodes)
After creating site_scons/site_init.py and site_scons/SyncBuild.py, you can just use function SyncBuilder or method Environment.SyncCommand in any SConstruct or SConscript file in the project without any additional configuration.
Related
I'm trying to debug my usage of wired and pyramid_services as well as migrate from using named services to registering services with interfaces and context classes.
Is there a way to see everything that is registered with the current container? Both for debugging and also to create fixtures for pytest during testing. Sort of like the get_registrations line of this pseudo code for injecting tests into conftest.py for pytests:
def generate_service_fixture(reg):
#pytest.fixture()
def service_fixture(base_app_request):
return base_app_request.find_service(iface=reg.iface, context=reg.context, name=reg.name)
return service_fixture
def inject_service_fixture(reg):
parts = [
get_iface_name(reg.iface),
get_context_name(reg.context),
get_name(reg.name)]
# Make up a name that tests can use to pull in the appropriate fixture.
fixture_name = '__'.join(filter(None, parts)) + '_service'
globals()[fixture_name] = generate_service_fixture(reg)
def get_iface_name(iface):
return iface.__name__ if iface else None
def get_context_name(context):
return context.__name__ if context else None
def get_name(name):
return name if name else None
def register_fixtures(container):
for reg in container.get_registrations():
inject_service_fixture(reg)
Later on in tests I would do something like:
def test_service_factory(IRequest_service):
assert IRequest_service, "Factory failed to construct request."
This sort of works for debugging after the services have been declared. I'm just posting this half-answer for now. I don't have a clean solution for dynamic pytest fixture creation.
def includeme(config):
# ...
config.commit()
introspector = config.registry.introspector
for intr in introspector.get_category('pyramid_services'):
print (intr['introspectable'])
I am trying to add some variables (e.g. self.boolean_flag) to HttpUser.
Which represents the user state. This variable is used in scenario load testing.
According to the documentation, I should use on_start to initialise variables.
However, when I use tasks = [TaskSet] like below, The on_start doesn't seem to work.
AttributeError: 'ExampleTask' object has no attribute 'boolean_flag':
class ExampleTask(TaskSet):
#task
def example_one(self):
print(self.boolean_flag) # AttributeError: 'ExampleTask' object has no attribute 'boolean_flag'
make_api_request(self, "example_one")
class CustomUser(HttpUser):
wait_time = between(
int(os.getenv("LOCUST_MIN_WAIT", 200)), int(os.getenv("LOCUST_MAX_WAIT", 1000))
)
def on_start(self):
self.boolean_flag = False
tasks = {ExampleTask1 : 10, ExampleTask2: 5 ... }
The bottom works though:
class CustomUser(HttpUser):
wait_time = between(
int(os.getenv("LOCUST_MIN_WAIT", 200)), int(os.getenv("LOCUST_MAX_WAIT", 1000))
)
def on_start(self):
self.boolean_flag = False
#task
def example_one(self):
print(self.boolean_flag)
make_api_request(self, "example_one")
Since I have many different scenarios that reuse many Tasksets, I need to use Tasks = {}..
I also tried subclassing HttpUser and add those variables in init().
But that doesn't work well with tasks={} either.
class CustomUser(HttpUser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.boolean_flag = False
class AllOfApisCallForLoadAtOneGo(CustomUser):
wait_time = between(
int(os.getenv("LOCUST_MIN_WAIT", 200)), int(os.getenv("LOCUST_MAX_WAIT", 1000))
)
tasks = {ExampleTask1 : 10, ExampleTask2: 5 ... }
(loadtest-GvbsrA_X-py3.8) ➜ loadtest git:(abcd) ✗ locust -f locustfile_scenario.py first -H https://www.somehost.com
[2020-09-02 06:24:27,276] MacBook-Pro.local/INFO/locust.main: Starting web interface at http://0.0.0.0:8089 (accepting connections from all network interfaces)
[2020-09-02 06:24:27,286] MacBook-Pro.local/INFO/locust.main: Starting Locust 1.2.3
[2020-09-02 06:24:35,881] MacBook-Pro.local/INFO/locust.runners: Spawning 10 users at the rate 3 users/s (0 users already running)...
[2020-09-02 06:24:35,883] MacBook-Pro.local/ERROR/locust.user.task: You must specify the base host. Either in the host attribute in the User class, or on the command line using the --host option.
Traceback (most recent call last):
File "/Users/poetry/virtualenvs/loadtest-GvbsrA_X-py3.8/lib/python3.8/site-packages/locust/user/task.py", line 284, in run
self.execute_next_task()
File "/Users/poetry/virtualenvs/loadtest-GvbsrA_X-py3.8/lib/python3.8/site-packages/locust/user/task.py", line 309, in execute_next_task
self.execute_task(self._task_queue.pop(0))
File "/Users/poetry/virtualenvs/loadtest-GvbsrA_X-py3.8/lib/python3.8/site-packages/locust/user/task.py", line 422, in execute_task
task(self.user)
File "/Users/poetry/virtualenvs/loadtest-GvbsrA_X-py3.8/lib/python3.8/site-packages/locust/user/users.py", line 224, in __init__
raise LocustError(
locust.exception.LocustError: You must specify the base host. Either in the host attribute in the User class, or on the command line using the --host option.
It appears you're assuming that TaskSet inherits from or somehow otherwise is called directly from HttpUser, which isn't the case. But TaskSet does have the user passed into it when it's instantiated. You just have to use self.user. So in your case instead of print(self.boolean_flag) in your task, you'd do print(self.user.boolean_flag).
I am struggling with command-line parsing and argparse, how to handle global variables, subcommands and optional params to these subcommands
I'm writting a python3 wrapper around python-libvirt to manage my VMs. The wrapper will handle creation, removal, stop/start, snapshots, etc.
A partial list of the options follows, that shows the different ways to pass params to my script:
# Connection option for all commands:
# ---
# vmman.py [-c hypervisor] (defaults to qemu:///system)
# Generic VM commands:
# ---
# vmman.py show : list all vms, with their state
# vmman.py {up|down|reboot|rm} domain : boots, shuts down, reboots
or deletes the domain
# Snapshot management:
# ---
# vmman.py lssnap domain : list snapshots attached to domain
# vmman.py snaprev domain [snapsname] : reverts domain to latest
snapshot or to snapname
# Resource management:
# ---
# vmman.py domain resdel [disk name] [net iface]
And then some code used to test the first subcommand :
def setConnectionString(args):
print('Arg = %s' % args.cstring)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parserConnect = subparsers.add_parser('ConnectionURI')
parserConnect.set_defaults(func=setConnectionString)
parserConnect.add_argument('-c', '--connect', dest='host')
args = parser.parse_args()
args.func(args)
print("COMPLETED")
Now, the argparse() doc on docs.python.org is dense and a bit confusing of a python newbie as I am... I would have expected the output to be something like:
`Arg = oslo`
What I get is :
[10:21:40|jfgratton#bergen:kvmman.py]: ./argstest.py -c oslo
usage: argstest.py [-h] {ConnectionURI} ...
argstest.py: error: invalid choice: 'connectionURI' (choose from 'ConnectionURI')
I obviously miss something, and I'm only testing the one I thought would be the easiest of the lot (global param); haven't even figured yet on how to handle optional subparams and all.
Your error output lists 'connectionURI' with lowercase 'c' as invalid choice, while it also says "choose from 'ConnectionURI'" with capital letter 'C'.
Fix: Call your test with:
./argstest.py ConnectionURI oslo
Maybe you should start simple (without subparsers) and build from there:
import argparse
def setConnectionString(hostname):
print('Arg = {}'.format(hostname))
parser = argparse.ArgumentParser(description='python3 wrapper around python-libvirt to manage VMs')
parser.add_argument('hostname')
args = parser.parse_args()
setConnectionString(args.hostname)
print("COMPLETED")
I want build files based on variables that are assigned in the SConstruct file. As in this example:
import os
env = Environment(ENV = os.environ)
def text_file_maker(target, source, env):
with open(str(target[0]), "w") as text_file:
text_file.write(env['my_text'])
return 0
env.Append( BUILDERS = {'Make_text' : Builder(action = text_file_maker)})
env.Make_text(target = 'sour.txt',
source = None,
my_text = 'lemon')
env.Make_text(target = 'sweet.txt',
source = None,
my_text = 'apple')
Running this script generates two text files with the content 'lemon' and 'apple'. If I'm running the script again, SCons correctly detects that the targets exist:
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
scons: `.' is up to date.
scons: done building targets.
Now if I'm changing one target, e.g:
env.Make_text(target = 'sweet.txt',
source = None,
my_text = 'orange')
and run the script, I'll get scons: '.' is up to date. and the content 'orange' is not written to sweet.txt.
I could specify a dependency:
Depends(['sweet.txt', 'sour.txt'], 'SConstruct')
but that forces a rebuild for any change in my SConstruct script.
How can I make SCons recognise changes in the SConstruct script that affects the targets?
This could be a very simple question, sorry if I missed something obvious.
You're example very closely resembles the answer which is in the manpage (Search for "Action Objects"):
https://scons.org/doc/production/HTML/scons-man.html
The third and succeeding arguments, if present, may either be a construction variable or a list of construction variables whose values will be included in the signature of the Action when deciding whether a target should be rebuilt because the action changed. The variables may also be specified by a varlist= keyword parameter; if both are present, they are combined. This is necessary whenever you want a target to be rebuilt when a specific construction variable changes. This is not often needed for a string action, as the expanded variables will normally be part of the command line, but may be needed if a Python function action uses the value of a construction variable when generating the command line.
def build_it(target, source, env):
# build the target from the 'XXX' construction variable
open(target[0], 'w').write(env['XXX'])
return 0
# Use positional arguments.
a = Action(build_it, '$STRINGIT', ['XXX'])
# Alternatively, use a keyword argument.
a = Action(build_it, varlist=['XXX'])
So in your case change:
env.Append( BUILDERS = {'Make_text' : Builder(action = text_file_maker)})
To
tfb_action = Action(text_file_maker, varlist=['my_text'])
env.Append( BUILDERS = {'Make_text' : Builder(action = tfb_action)})
Let's say I want to strip all the debug symbols in the shared libraries that I build whiling keeping the original file name.
I tried to add an command in the method:
def mySharedLibrary(self, *args, **kwargs):
# do some common work for every shared library like add a soname or append some lib files to LIBS parameter
target = SharedLibary(*args, **kwargs)
target = env.Command(target,target, "objcopy --strip-debug ${SOURCE}")
return target
I get this error: two different method was given to the same target,
I guess it's because the two targets returned by env.Command and SharedLibrary are exactly the same name.
Any ideas to do this?
Thanks in advance!
I had the same problem and got the same error. What I had to do was to create an intermediate target/library. The intermediate and final targets each had their own library name, so SCons doesnt get confused.
You could probably do something like the following:
env.SharedLibrary(target = 'namePreStrip', source = 'yourSource')
env.Command(target = 'name', source = 'namePreStrip', 'objcopy...')
I used objcopy to build a library out of several libraries. Here's the actual source code I implemented:
#
# Build an object file out of several other source files, objects, and libraries
# Optionally execute objcopy on the resulting library, depending if objcopyFlags
# has been populated
#
# env - SCons Environment used to build, Mandatory arg
# target - resulting library name, without LIBPREFIX and LIBSUFFIX, ej 'nsp2p',
# Mandatory arg
# sourceFiles - list of '.cc' files that will be compiled and included in the
# resulting lib, Optional arg
# objects - list of already compiled object files to be included in resulting lib,
# Optional arg
# libraries - list of libraries to be included in resulting lib, Optional arg
# objcopyFlags - list of flags to pass to objcopy command. objcopy will only
# be executed if this list is populated, Optional arg
#
# One of [sourceFiles, objects, or libraries] must be specified, else nothing
# will be performed
#
# Not using a custom builder because I dont like the way SCons prints the
# entire command each time its called, even if its not going to actually
# build anything AND I need more method args than provided by custom builders
#
def buildWholeArchive(self, env, target, sourceFiles, objects, libraries, objcopyFlags):
if len(sourceFiles) == 0 and len(objects) == 0 and len(libraries) == 0:
print "Incorrect use of buildWholeArchive, at least one of [sourceFiles | objects | librarires] must be specified, no build action will be performed"
return None
# Compile each source file
objNodes = []
if len(sourceFiles) > 0:
objNodes = env.Object(source = sourceFiles)
cmdList = []
cmdList.append(env['CXX'])
cmdList.append('-nostdlib -r -o $TARGET -Wl,--whole-archive')
for obj in objNodes:
cmdList.append(env.File(obj).abspath)
for obj in objects:
cmdList.append(env.File(obj).abspath)
for lib in libraries:
cmdList.append(lib)
cmdList.append('-Wl,--no-whole-archive')
cmd = ' '.join(cmdList)
libTarget = '%s%s%s' % (env['LIBPREFIX'], target, env['LIBSUFFIX'])
if len(objcopyFlags) > 0:
# First create the library, then run objcopy on it
objTarget = '%s%s_preObjcopy%s' % (env['LIBPREFIX'], target, env['LIBSUFFIX'])
preObjcopyTarget = env.Command(target = objTarget, source = [], action = cmd)
env.Depends(preObjcopyTarget, [objNodes, sourceFiles, objects, libraries])
objCmdList = [env['OBJCOPY']]
objCmdList.extend(objcopyFlags)
objCmdList.append('$SOURCE $TARGET')
objcopyCmd = ' '.join(objCmdList)
archiveTarget = env.Command(target = libTarget, source = preObjcopyTarget, action = objcopyCmd)
else:
# Just create the library
archiveTarget = env.Command(target = libTarget, source = [], action = cmd)
env.Depends(archiveTarget, [objNodes, sourceFiles, objects, libraries])
return archiveTarget
And here is how I called it:
sourceFiles = ['file1.cc', 'file2.cc']
libSource = []
if 'OcteonArchitecture' in env:
libSource.append(lib1)
libSource.append(lib2)
libSource.append(lib3)
objcopy = []
if 'OcteonArchitecture' in env:
objcopy.extend([
'--redefine-sym calloc=ns_calloc',
'--redefine-sym free=ns_free',
'--redefine-sym malloc=ns_malloc',
'--redefine-sym realloc=ns_realloc'])
archiveTarget = clonedEnv.buildWholeArchive(target = libName,
sourceFiles = sourceFiles,
objects = [],
libraries = libSource,
objcopyFlags = objcopy)
env.Alias('libMyLib', archiveTarget)