How can I get teamcity build plan's VCS root url on python? I've looked upon teamcity rest api documentation and googled if someone already had that problem - but seems there is no similar problems out in the internet.
from dohq_teamcity import TeamCity
from dohq_teamcity.configuration import Configuration
from settings.config import settings
config = Configuration()
config.api_key = {'token': settings.TEAMCITY_TOKEN}
config.api_key_prefix = {'token': 'Bearer'}
config.active_api_key = 'token'
tc = TeamCity(settings.TEAMCITY_URL, auth_settings=["Token"], configuration=config)
vcs_roots_locators = [x.locator_id for x in tc.vcs_root.get_roots().data]
def get_vcs_roots_vcs_url(tc: TeamCity, locator: str) -> str:
vcs_root = tc.vcs_root.get_root(vcs_root_locator=locator)
return [y for y in vcs_root.properties.data if y.name == 'url'][0].value
vcs_roots_urls = [get_vcs_roots_vcs_url(tc, x) for x in vcs_roots_locators]
print(vcs_roots_urls)
Related
i was trying to do a config.ini with configpaser i got it working with this
from configparser import ConfigParser
#-------Message Config Start ------------#
config = ConfigParser()
config.read('config.ini')
#Devicename for Message Curls
Geraet = str = config['DEFAULT']['Geraet_Name']
YellowBellX = str = config['DEFAULT']['YellowBellXCoord']
YellowBellY = str = config['DEFAULT']['YellowBellYCoord']
it works for urls of telegram curl and Geraet Name works alsow
but when i use this for x y for pixelmatch i get error
Exeption windll.gdi32.GetPixel failed : return -1
if i change YellowBellX to Coords it works
YellowBellX = 355
YellowBellY = 219
this works and pixel match is correct with no error
but i want to use Variable in the Config File
I am using the following function to implement a program that changes its behavior depending on the IP of the connected PC.
There is a problem with this function that if something tries to login and fails, it may get the IP of the failed one.
And now that we've encountered that possibility, the program is broken.
What edits do I need to make to make this function behave as expected?
import psutil
def get_ip(port=3389):
ip = ""
for x in psutil.net_connections():
if x.status == "ESTABLISHED" and x.laddr.port == port:
ip = x.raddr.ip
break
I changed the function based on Bijay Regmi's comment. Thank you. wmi was difficult for me, so I used win32evtlog to read it out little by little. I am working on improving readability and finding bugs little by little.
def systime(xml):
return datetime.fromisoformat(xml.find(f'{ns}System/{ns}TimeCreated').get('SystemTime')[:-2] + "+00:00")
def last_event(handle,
event_id,
condition: Callable[['Event'], bool] = None) -> Optional['Event']:
now = datetime.now(tz=timezone.utc)
while True:
events = win32evtlog.EvtNext(handle, 20)
if not events:
break
for event in events:
xml_content = win32evtlog.EvtRender(event, win32evtlog.EvtRenderEventXml)
obj = Event(ET.fromstring(xml_content))
if obj.EventID == event_id:
if obj.SystemTime + timedelta(minutes=5) < now:
return None
if condition and not condition(obj):
continue
return obj
class Event:
def __init__(self, xml: ET.Element):
self.EventID = xml and xml.find(f'{ns}System/{ns}EventID').text
self.SystemTime = xml and systime(xml)
self.xml = xml
if self.EventID == '24':
self.IpAddress = xml.find(f'{ns}UserData/{{Event_NS}}EventXML/{{Event_NS}}Address').text
elif self.EventID == '4624':
self.IpAddress = xml.find(f'{ns}EventData/{ns}Data[#Name="IpAddress"]').text
else:
self.IpAddress = None
I wanna get the list of repositories of a user or an organisation on GitHub, but excluding forked, mirrored or archived repos.
Using PyGithub you can do the following to filter public repos:
from github import Github
g = Github()
user = g.get_user("<username>") # target user
repos = user.get_repos()
non_forks = []
for repo in user.get_repos():
if repo.fork is False:
non_forks.append(repo.name)
print(non_forks)
https://github.com/PyGithub/PyGithub
is a python library for interacting with the Github API. From the readme:
from github import Github
# First create a Github instance:
# using username and password
g = Github("user", "password")
# or using an access token
g = Github("access_token")
# Github Enterprise with custom hostname
g = Github(base_url="https://{hostname}/api/v3", login_or_token="access_token")
# Then play with your Github objects:
for repo in g.get_user().get_repos():
print(repo.name)
The Repository object has properties that allow checking whether the repo is archived, a fork, or a mirror:
repo.archived // is repo archived?
repo.fork // is repo a fork?
repo.mirror_url // return the url of the mirrored repo, if applicable
I've written a small script that does the job perfectly
#!/usr/bin/env python3.9
#-*- coding: utf-8 -*-
import readline, sys, os, requests
import click
from github import Github
g = Github()
def userexists(username):
addr = "https://api.github.com/users/" + username
response = requests.get(addr)
if response.status_code == 404:
return False
else:
if response.status_code == 200:
return True
def printrepos(repos):
original_repos = []
for repo in repos:
if repo.fork is False and repo.archived is False:
print(repo.clone_url)
#click.command()
#click.argument('username')
def main(username):
if userexists(username):
user = g.get_user(username)
repos = user.get_repos()
printrepos(repos)
else:
print("Username doesn't exist")
if __name__ == "__main__":
main()
I am trying to figure out a way to launch Mitmproxy from a python script (which I have done) and save any traffic to a dump file (which i need help with).
By googling, looking at mitmproxy git issues and reading example code, this is what I have so far:
from mitmproxy import proxy, options
from mitmproxy.tools.dump import DumpMaster
from mitmproxy.addons import core
class AddHeader:
def __init__(self):
self.num = 0
def response(self, flow):
self.num = self.num + 1
print(self.num)
flow.response.headers["count"] = str(self.num)
addons = [
AddHeader()
]
opts = options.Options(listen_host='127.0.0.1', listen_port=8080)
pconf = proxy.config.ProxyConfig(opts)
m = DumpMaster(None)
m.server = proxy.server.ProxyServer(pconf)
# print(m.addons)
m.addons.add(addons)
print(m.addons)
# m.addons.add(core.Core())
try:
m.run()
except KeyboardInterrupt:
m.shutdown()
Issue is, this creates an error AttributeError: No such option: body_size_limit which seems to be mitigated with master.addons.add(core.Core) but this core addon already exists in DumpMaster so that fires a different error.
Inspecting the addons that are currently loaded by DumpMaster i do see the save to file addon is loaded, but I am not clear how to access that so that any traffic that is going through the proxy, regardless if it is request, response, ws or tcp can be written to a dump file
Thanks!
Here is a redacted list of the addons that are loaded
mitmproxy.addons.streambodies.StreamBodies object at 0x111542da0>
mitmproxy.addons.save.Save object at 0x111542dd8>
mitmproxy.addons.upstream_auth.UpstreamAuth object at 0x111542e10>
just add those 2 lines after opts = options.Options(listen_host='127.0.0.1', listen_port=8080)
opts.add_option("body_size_limit", int, 0, "")
opts.add_option("keep_host_header", bool, True, "")
your code snippet already runs a working proxy. However, the option to dump the recorded traffic into a file during runtime (save_stream_file) is part of the Save-Addon which is loaded by default after the DumpMaster instance is created. Therefore, you need to set the save_stream_file option after creating the DumpMaster instance. Took me a while to figure it out as well but this worked for me, saving your output stream to a file named traffic_stream:
from mitmproxy import proxy, options
from mitmproxy.tools.dump import DumpMaster
opts = options.Options(listen_port=8081)
opts.add_option("body_size_limit", int, 0, "")
pconf = proxy.config.ProxyConfig(opts)
m = DumpMaster(None)
m.server = proxy.server.ProxyServer(pconf)
m.options.set('save_stream_file=traffic_stream')
try:
m.run()
except KeyboardInterrupt:
m.shutdown()
Hope it works for you as well!
I have numerous Bokeh Server files in a directory say.. /dir/bokeh/, assume the bokeh servers are called bokeh1.py, bokeh2.py, bokeh3.py
The file structure is like so:
|--dir
|---flask.py
|---bokeh
|--bokeh1.py
|--bokeh2.py
I am deploying them all on flask like so:
files=[]
for file in os.listdir("/dir/bokeh/"):
if file.endswith('.py'):
file="bokeh/"+file
files.append(file)
argvs = {}
urls = []
for i in files:
argvs[i] = None
urls.append(i.split('\\')[-1].split('.')[0])
host = 'myhost.com'
apps = build_single_handler_applications(files, argvs)
bokeh_tornado = BokehTornado(apps, extra_websocket_origins=["myhost.com"])
bokeh_http = HTTPServer(bokeh_tornado)
sockets, port = bind_sockets("myhost.com", 0)
bokeh_http.add_sockets(sockets)
Then for each bokeh server, I have within flask.py:
#app.route("/bokeh1")
def bokeh1():
bokeh_script = server_document("http://11.111.11.111:%d/bokeh1" % port)
return render_template("bokserv.html", bokeh_script=bokeh_script)
The number of bokeh servers I need to deploy could grow quickly. How can I write something that will generate the #app.route for each of the bokehs bokeh1.py, bokeh2.py, bokeh3.py efficiently based on my current setup? The server is being run on Ubuntu
You can create all the functions in a loop:
def serve(name):
#app.route("/{}".format(name))
def func():
bokeh_script = server_document("http://11.111.11.111:%d/%s" % (port, name))
return render_template("bokserv.html", bokeh_script=bokeh_script)
func.__name__ = name
return func
all_serve_functions = [serve(name) for name in all_names]