How to use google-cloud-os-config classes in python code? - python-3.x

In a Google Cloud function (python 3.7) , I need to fetch the compliance state of all VMs in a given location in a project.
From available google documentation here I could see the REST API format:
https://cloud.google.com/compute/docs/os-configuration-management/view-compliance#view_compliance_state
On searching for the client library here , I found this:
class google.cloud.osconfig_v1alpha.types.ListInstanceOSPoliciesCompliancesRequest(mapping=None, *, ignore_unknown_fields=False, **kwargs)[source]
Bases: proto.message.Message
A request message for listing OS policies compliance data for all Compute Engine VMs in the given location.
parent
Required. The parent resource name.
Format: projects/{project}/locations/{location}
For {project}, either Compute Engine project-number or project-id can be provided.
Type
str
page_size
The maximum number of results to return.
Type
int
page_token
A pagination token returned from a previous call to ListInstanceOSPoliciesCompliances that indicates where this listing should continue from.
Type
str
filter
If provided, this field specifies the criteria that must be met by a InstanceOSPoliciesCompliance API resource to be included in the response.
Type
str
And the response class as:
class google.cloud.osconfig_v1alpha.types.ListInstanceOSPoliciesCompliancesResponse(mapping=None, *, ignore_unknown_fields=False, **kwargs)[source]
Bases: proto.message.Message
A response message for listing OS policies compliance data for all Compute Engine VMs in the given location.
instance_os_policies_compliances
List of instance OS policies compliance objects.
Type
Sequence[google.cloud.osconfig_v1alpha.types.InstanceOSPoliciesCompliance]
next_page_token
The pagination token to retrieve the next page of instance OS policies compliance objects.
Type
str
property raw_page
But I am not sure how to use this information in the python code.
I have written this but not sure if this is correct:
from google.cloud.osconfig_v1alpha.services.os_config_zonal_service import client
from google.cloud.osconfig_v1alpha.types import ListInstanceOSPoliciesCompliancesRequest
import logging
logger = logging.getLogger(__name__)
import os
def handler():
try:
project_id = os.environ["PROJECT_ID"]
location = os.environ["ZONE"]
#list compliance state
request = ListInstanceOSPoliciesCompliancesRequest(
parent=f"projects/{project}/locations/{location}")
response = client.instance_os_policies_compliance(request)
return response
except Exception as e:
logger.error("Unable to get compliance - %s " % str(e))
I could not find any usage example for the client library methods anywhere.
Could someone please help me here?
EDIT:
This is what I am using now:
from googleapiclient.discovery import build
def list_policy_compliance():
projectId = "my_project"
zone = "my_zone"
try:
service = build('osconfig', 'v1alpha', cache_discovery=False)
compliance_response = service.projects().locations(
).instanceOsPoliciesCompliances().list(
parent='projects/%s/locations/%s' % (
projectId, zone)).execute()
return compliance_response
except Exception as e:
raise Exception()

Something like this should work:
from google.cloud import os_config_v1alpha as osc
def handler():
client = osc.OsConfigZonalService()
project_id = "my_project"
location = "my_gcp_zone"
parent = f"projects/{project_id}/locations/{location}"
response = client.list_instance_os_policies_compliances(
parent=parent
)
# response is an iterable yielding
# InstanceOSPoliciesCompliance objects
for result in response:
# do something with result
...
You can also construct the request like this:
response = client.list_instance_os_policies_compliances(
request = {
"parent": parent
}
)

Answering my own question here , this is what I used:
from googleapiclient.discovery import build
def list_policy_compliance():
projectId = "my_project"
zone = "my_zone"
try:
service = build('osconfig', 'v1alpha', cache_discovery=False)
compliance_response = service.projects().locations(
).instanceOsPoliciesCompliances().list(
parent='projects/%s/locations/%s' % (
projectId, zone)).execute()
return compliance_response
except Exception as e:
raise Exception()

Related

Not able to retrieve tick data - Zerodha Web Socket

I am using the below code to connect to the Web Socket API of Zerodha to pull tick data information for a particular instrument that I am interested in. When I run the below piece of code, I am not able to pull any information. I am not sure whether I am calling the functions in the Streaming_Ticks class properly.
The instrument token, which is the input, is placed in the "parameter_file.csv" and this token needs to be passed on to on_connect callback function present inside Streaming_Ticks class.
Would welcome your comments on how to run this code correctly.
from kiteconnect import KiteConnect
from kiteconnect import KiteTicker
import os
import csv
#cwd = os.chdir("E:\\Algorthmic Trading\\Zerodha_Training")
class Streaming_Ticks:
def __init__(self):
access_token = open("access_token.txt",'r').read()
key_secret = open("key_info.txt",'r').read().split()
self.kite = KiteConnect(api_key=key_secret[0])
self.kite.set_access_token(access_token)
self.kws = KiteTicker(key_secret[0],self.kite.access_token)
def on_ticks(ws,ticks):
# Callback to receive ticks.
#logging.debug("Ticks: {}".format(ticks))
print(ticks)
def on_connect(ws,response):
# Callback on successful connect.
# Subscribe to a list of instrument_tokens (RELIANCE and ACC here).
#logging.debug("on connect: {}".format(response))
print(token_list)
ws.subscribe(token_list)
ws.set_mode(ws.MODE_FULL,token_list) # Set all token tick in `full` mode.
#ws.set_mode(ws.MODE_FULL,[tokens[0]]) # Set one token tick in `full` mode.
if __name__ == "__main__":
cwd = os.chdir("E:\\Algorthmic Trading\\Zerodha_Training")
tick_data = Streaming_Ticks()
token_list= []
with open('parameter_file.csv') as param_file:
param_reader = csv.DictReader(param_file, )
for row in param_reader:
token_list.append(int(row['token']))
tick_data.on_ticks = tick_data.on_ticks
tick_data.on_connect = tick_data.on_connect
tick_data.kws.connect()

Google cloud function (python) does not deploy - Function failed on loading user code

I'm calling a simple python function in google cloud but cannot get it to save. It shows this error:
"Function failed on loading user code. This is likely due to a bug in the user code. Error message: Error: please examine your function logs to see the error cause: https://cloud.google.com/functions/docs/monitoring/logging#viewing_logs. Additional troubleshooting documentation can be found at https://cloud.google.com/functions/docs/troubleshooting#logging. Please visit https://cloud.google.com/functions/docs/troubleshooting for in-depth troubleshooting documentation."
Logs don't seem to show much that would indicate error in the code. I followed this guide: https://blog.thereportapi.com/automate-a-daily-etl-of-currency-rates-into-bigquery/
With the only difference environment variables and the endpoint I'm using.
Code is below, which is just a get request followed by a push of data into a table.
import requests
import json
import time;
import os;
from google.cloud import bigquery
# Set any default values for these variables if they are not found from Environment variables
PROJECT_ID = os.environ.get("PROJECT_ID", "xxxxxxxxxxxxxx")
EXCHANGERATESAPI_KEY = os.environ.get("EXCHANGERATESAPI_KEY", "xxxxxxxxxxxxxxx")
REGIONAL_ENDPOINT = os.environ.get("REGIONAL_ENDPOINT", "europe-west1")
DATASET_ID = os.environ.get("DATASET_ID", "currency_rates")
TABLE_NAME = os.environ.get("TABLE_NAME", "currency_rates")
BASE_CURRENCY = os.environ.get("BASE_CURRENCY", "SEK")
SYMBOLS = os.environ.get("SYMBOLS", "NOK,EUR,USD,GBP")
def hello_world(request):
latest_response = get_latest_currency_rates();
write_to_bq(latest_response)
return "Success"
def get_latest_currency_rates():
PARAMS={'access_key': EXCHANGERATESAPI_KEY , 'symbols': SYMBOLS, 'base': BASE_CURRENCY}
response = requests.get("https://api.exchangeratesapi.io/v1/latest", params=PARAMS)
print(response.json())
return response.json()
def write_to_bq(response):
# Instantiates a client
bigquery_client = bigquery.Client(project=PROJECT_ID)
# Prepares a reference to the dataset
dataset_ref = bigquery_client.dataset(DATASET_ID)
table_ref = dataset_ref.table(TABLE_NAME)
table = bigquery_client.get_table(table_ref)
# get the current timestamp so we know how fresh the data is
timestamp = time.time()
jsondump = json.dumps(response) #Returns a string
# Ensure the Response is a String not JSON
rows_to_insert = [{"timestamp":timestamp,"data":jsondump}]
errors = bigquery_client.insert_rows(table, rows_to_insert) # API request
print(errors)
assert errors == []
I tried just the part that does the get request with an offline editor and I can confirm a response works fine. I suspect it might have to do something with permissions or the way the script tries to access the database.

Caching values on a module level and unit testing

Below is a module for querying and caching AWS STS tokens, the intention is to avoid querying STS if there is a valid token.
class Credentials:
def __init__(self):
self.sts_credentials = None
self.token_expiry_time = None
def is_token_expired(self):
current_time_with_buffer = datetime.now() + timedelta(minutes=2)
return not self.token_expiry_time or self.token_expiry_time < current_time_with_buffer
CREDENTIALS_ = Credentials()
def get_credentials():
if CREDENTIALS_.is_token_expired():
sts_client = boto3.client('sts')
LOGGER.info("The credentials are either empty or expiring, refreshing")
try:
sts_token = sts_client.assume_role(
RoleArn=os.environ["KINESIS_ASSUME_ROLE"],
RoleSessionName=str(uuid.uuid4()))
except Exception as e:
LOGGER.error(f"Error occurred while trying to assume role with {os.environ['KINESIS_ASSUME_ROLE']}", e)
raise e
CREDENTIALS_.sts_credentials = {
"aws_access_key_id": sts_token['Credentials']['AccessKeyId'],
"aws_secret_access_key": sts_token['Credentials']['SecretAccessKey'],
"aws_session_token": sts_token['Credentials']['SessionToken']
}
CREDENTIALS_.token_expiry_time = sts_token["Credentials"]["Expiration"]
return CREDENTIALS_.sts_credentials
One of the unit tests is as below, this passes in isolation, but fails when run alongside other tests, the reason being CREDENTIALS_ variable, which is modified by other tests, I can set this value to None, but I want to know what is the cleaner way of clearing the cached value
def test_get_credentials_refreshes_token_if_about_to_expire(sts_response, credentials):
with mock.patch("boto3.client") as mock_boto_client:
mock_assume_role = mock_boto_client.return_value.assume_role
mock_assume_role.return_value = sts_response
get_credentials()
actual_credentials = get_credentials()
calls = [call('sts'),
call().assume_role(RoleArn='arn:aws:iam::000000000000:role/dummyarn', RoleSessionName=ANY),
call('sts'),
call().assume_role(RoleArn='arn:aws:iam::000000000000:role/dummyarn', RoleSessionName=ANY)]
assert credentials == actual_credentials
mock_boto_client.assert_has_calls(calls)
The cleaner way would be to make sure that your unit tests are performing unit tests. This means that for every unit there should be no interaction with other units. Since you are using a global variable CREDENTIALS_, this is going to be nearly impossible.
1) easy fix
An easy fix would be to pass CREDENTIALS_ as input argument. Then you can create a fake CREDENTIALS_ object during each of the tests, that are tailored to your test conditions.
2) Better fix
A better solution would be, besides using the credential input argument, to break up the logic inside the get_credentials. By splitting it into smaller functions, you can separate the server logic and the credential updating. Making it easier to Mock and test. A possible division of the whole function would be:
get_sts_token
update_credentials
get_credentials
Now the get_sts_token has connections to the server, but the update_credentials and get_credentials do not have to directly interact with it.
Code
Example 1)
def update_credentials(credentials):
if credentials.is_token_expired():
sts_client = boto3.client('sts')
LOGGER.info("The credentials are either empty or expiring, refreshing")
try:
sts_token = sts_client.assume_role(
RoleArn=os.environ["KINESIS_ASSUME_ROLE"],
RoleSessionName=str(uuid.uuid4()))
except Exception as e:
LOGGER.error(f"Error occurred while trying to assume role with {os.environ['KINESIS_ASSUME_ROLE']}", e)
raise e
credentials.sts_credentials = {
"aws_access_key_id": sts_token['Credentials']['AccessKeyId'],
"aws_secret_access_key": sts_token['Credentials']['SecretAccessKey'],
"aws_session_token": sts_token['Credentials']['SessionToken']
}
credentials.token_expiry_time = sts_token["Credentials"]["Expiration"]
return credentials
# Where you need the credentials
CREDENTIALS_ = update_credentials(CREDENTIALS_)
CREDENTIALS_.sts_credentials
Now you can insert your own CREDENTIALS_ object in the test.
Example 2)
def get_sts_token():
sts_client = boto3.client('sts')
LOGGER.info("The credentials are either empty or expiring, refreshing")
try:
sts_token = sts_client.assume_role(
RoleArn=os.environ["KINESIS_ASSUME_ROLE"],
RoleSessionName=str(uuid.uuid4()))
except Exception as e:
LOGGER.error(f"Error occurred while trying to assume role with {os.environ['KINESIS_ASSUME_ROLE']}", e)
raise e
return sts_token
def update_credentials(credentials, sts_token):
credentials.sts_credentials = {
"aws_access_key_id": sts_token['Credentials']['AccessKeyId'],
"aws_secret_access_key": sts_token['Credentials']['SecretAccessKey'],
"aws_session_token": sts_token['Credentials']['SessionToken']
}
return credentials
def get_credentials(credentials: Credentials):
if credentials.is_token_expired():
sts_token = get_sts_token()
credentials = update_credentials(credentials, sts_token)
return credentials.sts_credentials

400 Caller's project doesn't match parent project

I have this block of code that basically translates text from one language to another using the cloud translate API. The problem is that this code always throws the error: "Caller's project doesn't match parent project". What could be the problem?
translation_separator = "translated_text: "
language_separator = "detected_language_code: "
translate_client = translate.TranslationServiceClient()
# parent = translate_client.location_path(
# self.translate_project_id, self.translate_location
# )
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = (
os.getcwd()
+ "/translator_credentials.json"
)
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
try:
result = str(
translate_client.translate_text(
request={
"contents": [text],
"target_language_code": self.target_language_code,
"parent": f'projects/{self.translate_project_id}/'
f'locations/{self.translate_location}',
"model": self.translate_model
}
)
)
print(result)
except Exception as e:
print("error here>>>>>", e)
Your issue seems to be related to the authentication method that you are using on your application, please follow the guide for authention methods with the translate API. If you are trying to pass the credentials using code, you can explicitly point to your service account file in code with:
def explicit():
from google.cloud import storage
# Explicitly use service account credentials by specifying the private key
# file.
storage_client = storage.Client.from_service_account_json(
'service_account.json')
Also, there is a codelab for getting started with the translation API with Python, this is a great step by step getting started guide for running the translate API with Python.
If the issue persists, you can try creating a Public Issue Tracker for Google Support

It is possible to upload a YouTube video description file with the Google python script?

I am using the Google python script to upload videos.
#!/usr/bin/python
import http.client #httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, http.client.NotConnected,
http.client.IncompleteRead, http.client.ImproperConnectionState,
http.client.CannotSendRequest, http.client.CannotSendHeader,
http.client.ResponseNotReady, http.client.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Developers Console at
# https://console.developers.google.com/.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Developers Console
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print ("Uploading file...")
status, response = insert_request.next_chunk()
if 'id' in response:
print ("Video id '%s' was successfully uploaded." % response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print (error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print ("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
if __name__ == '__main__':
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="Test Description")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[0], help="Video privacy status.")
args = argparser.parse_args()
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except HttpError as e:
print ("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
The problem is the --description parameter. Only allow put one text line. And i need to put several lines with line jumps ('\n'). ¿it is possible to do this from another way?
Will be wonderful if this parameter (or other param) would allow a file text path to upload the description, like the "--file" parameter does.
There is something i can i do to solve this?
Or maybe one place where i'll can to contact with google developers to ask them if is posible to reimplement the initialize_upload(youtube, args) function to get it works like i say?
Yes it is possible!!
We have to add the --description-file option.
Google please, do a complete manual of your API!!!

Resources