HTTPSConnectionPool(host='127.0.0.1', port=7545): Max retries exceeded with url: (Caused by NewConnectionError - python-3.x

# compile_standart is going to be the main function that we will use to compile this code.
from solcx import compile_standard, install_solc
import json
from web3 import Web3
import os
from dotenv import load_dotenv
load_dotenv()
with open("./SimpleStorage.sol", "r") as file:
simple_storage_file = file.read()
print("Installing...")
install_solc("0.6.0")
# print(simple_storage_file)
# compile our solidity
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_file}},
"settings": {
"outputSelection": {
"*": {
"*": [
"abi",
"metadata",
"evm.bytecode",
"evm.bytecode.sourceMap",
] # (ABI=Application Binary Interface)EVM (Ethereum Virtual Machine) is the core component of the Ethereum network
}
}
},
},
solc_version="0.6.0",
)
# print(compiled_sol)
with open("compiled_code.json", "w") as file: # w means it wil wright from it
json.dump(
compiled_sol, file
) # is it's going to take our compiled soul jason variable and just dump it into this (file) here
# but still it is going to keep it in json syntax
# get bytecode
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"][
"bytecode"
]["object"]
# get abi
abi = json.loads(
compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["metadata"]
)["output"]["abi"]
# for connecting to ganache
w3 = Web3(Web3.HTTPProvider("https://127.0.0.1:7545"))
chain_id = 5777
my_address = "0x630Ee320BcE235224184A31FC687a5D183142BB9"
private_key = "0xd3cf1f678e8a78ace754cf57bd6ebcb28852e9657bb371951d72bbb5a0a3f413"
# private_key = os.getenv(" PRIVATE_KEY ")
# print(private_key)
# Create the contract in Python
SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)
# print(SimpleStorage)
# Get the latest transaction
**nonce = w3.eth.getTransactionCount(my_address)**(I'm having this error in this line)[![**enter image description here**][1]][1]
# print (nounce)
# we could see that the number of transaction=0 because we haven't made any
# 1. Build a transaction
# 2. Sign a transaction
# 3 . Send a transaction
transaction = SimpleStorage.constructor().buildTransaction(
{" chainId ": chain_id, " from ": my_address, " nonce ": nonce}
)
# print(transaction)
signed_txn = w3.eth.account.sign_transaction(transaction, private_key=private_key)
print(signed_txn) # this is how we sign a transaction
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# 1 good practice to do when we are sending
# a transaction is to wait for some block confirmation to happen
# this will have our code stop and wait for this transaction hash to go through
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
# working with contracts
# Contract Address
# Contract ABI
# Working with deployed Contracts
simple_storage = w3.eth.contract(address=tx_receipt.contractAddress, abi=abi)
# call->Simulate making the call and getting a return value
# Transact->Actually make a state change
# Initial value of a favorite number
print(simple_storage.functions.retrieve().call())
# store some value into this contract
store_transaction = simple_storage.functions.store(15).buildTransaction(
{
"chainId": chain_id,
"gasPrice": w3.eth.gas_price,
"from": my_address,
"nonce": nonce
+ 1,
}
)
signed_store_txn = w3.eth.account.sign_transaction(
store_transaction, private_key=private_key
)
send_store_tx = w3.eth.send_raw_transaction(signed_store_txn.rawTransaaction)
tx_receipt = w3.eth.wait_for_transaction_receipt(send_store_tx)
#[1]: https://i.stack.imgur.com/sPikF.png
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:

Related

botocore.exceptions.ClientError: An error occurred (InvalidInstanceID.Malformed)

Any suggestion pls, Here is the code, works fine if i call function as get_instance_name ('i-0368cdfdded') and error i get is
Tried ids.txt as both 'i-xxxx' or i-xxx
botocore.exceptions.ClientError: An error occurred (InvalidInstanceID.Malformed) when calling the DescribeInstances operation: Invalid id: "i-xxxxxx"
import boto3
AWS_REGION = "us-west-2"
AWS_PROFILE = "profilex"
session=boto3.session.Session(profile_name=AWS_PROFILE)
ec2 = session.resource('ec2', region_name=AWS_REGION)
def get_instance_name(fid):
i = ec2.Instance(fid)
instancename = ''
for tags in i.tags:
if tags["Key"] == 'Name':
instancename = tags["Value"]
return instancename
with open('ids.txt') as f:
for line in f:
get_instance_name ('line')
so what i found is i get expected output when i run the program in interactive python terminal but not run as program. I am not able to figure that out why, but the program itself works on console. Here is what i ran last which worked. ( I was doing many trial & errors )
import boto3
import sys
import os
import json
AWS_REGION = "us-west-2"
AWS_PROFILE = "xxxx"
session=boto3.session.Session(profile_name=AWS_PROFILE)
ec2 = session.resource('ec2', region_name=AWS_REGION)
def get_instance_name(fid):
i = ec2.Instance(fid)
instancename = ''
for tags in i.tags:
if tags["Key"] == 'Name':
instancename = tags["Value"]
return instancename
filename="xxx.txt"
with open(filename) as file:
while (line := file.readline().rstrip()):
#print(line)
get_instance_name(line)

issue while creating VM instance using python code in GCP

I am trying to write a code which will read values from excel file and will create VMs in Google Cloud. I am facing problem at two locations, while creating tags if I use 'items': [tag] or while creating service account scope it starts giving me error.
import os, json
import googleapiclient.discovery
from google.oauth2 import service_account
import csv
credentials = service_account.Credentials.from_service_account_file('G:/python/json/mykids-280210.json')
compute = googleapiclient.discovery.build('compute', 'v1', credentials=credentials)
def create_instance(compute, vm_name, image_project, image_family, machinetype, startupscript, zone, network,
subnet, project, scope, tag):
# Get the latest Debian Jessie image.
image_response = compute.images().getFromFamily(
project=image_project, family=image_family).execute()
source_disk_image = image_response['selfLink']
# Configure the machine
machine_type = "zones/" + zone + "/machineTypes/" + machinetype
startup_script = startupscript
config = {
'name': vm_name,
'machineType': machine_type,
'description': 'This VM was created with python code',
'tags': {
'items': ['external', 'home', 'local'] #'items': [tag] <~~~~~~~~~~~
},
'deletionProtection': False,
'labels': {'env': 'dev', 'server': 'mytower', 'purpose': 'personal'},
# Specify the boot disk and the image to use as a source.
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/' + network,
'subnetwork': 'regions/us-central1/subnetworks/' + subnet,
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage and logging.
'serviceAccounts': [{
'email': 'default',
'scopes': [
#'https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write'
#scope # scope <~~~~~~~~~~~~~~~~~~~~
]
}],
'scheduling': {
"preemptible": True
},
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
'metadata': {
'items': [{
# Startup script is automatically executed by the
# instance upon startup.
'key': 'startup-script',
'value': startup_script
}]
}
}
return compute.instances().insert(
project=project,
zone=zone,
body=config).execute()
# [END create_instance]
with open('vms.csv', newline='') as csvfile:
data = csv.DictReader(csvfile)
for row in data:
vm_name = row['vm_name']
image_project = row['image_project']
image_family = row['image_family']
machinetype = row['machinetype']
startupscript = row['startupscript']
zone = row['zone']
network = row['network']
subnet = row['subnet']
project = row['project']
scope = row['scopes']
tag = row['tags']
print(create_instance(compute, vm_name, image_project, image_family, machinetype, startupscript, zone, network,
subnet, project, scope, tag))
csvfile.close()
error when use scope variable
G:\python\pythonProject\venv\Scripts\python.exe G:/python/pythonProject/read-excel-gcp/vm/create_vm.py
Traceback (most recent call last):
File "G:\python\pythonProject\read-excel-gcp\vm\create_vm.py", line 100, in <module>
print(create_instance(compute, vm_name, image_project, image_family, machinetype, startupscript, zone, network,
File "G:\python\pythonProject\read-excel-gcp\vm\create_vm.py", line 79, in create_instance
return compute.instances().insert(
File "G:\python\pythonProject\venv\lib\site-packages\googleapiclient\_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "G:\python\pythonProject\venv\lib\site-packages\googleapiclient\http.py", line 915, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 400 when requesting https://compute.googleapis.com/compute/v1/projects/mykids-280210/zones/us-central1-a/instances?alt=json returned "One or more of the service account scopes are invalid: 'https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write''". Details: "One or more of the service account scopes are invalid: 'https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write''">
Process finished with exit code 1
I get a similar error when I use tag variable.
I have # the value the way I am passing in the above code.
Below is my csv file details
vm_name,image_project,image_family,machinetype,startupscript,zone,network,subnet,project,scopes,tags
python-vm1,debian-cloud,debian-9,e2-micro,G:/python/json/startup-script.sh,us-central1-a,myvpc,subnet-a,mykids-280210,"https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write'","external', 'home', 'local'"
python-vm2,debian-cloud,debian-9,e2-micro,G:/python/json/startup-script.sh,us-central1-a,myvpc,subnet-a,mykids-280210,"https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write'","external', 'home', 'local'"
I am not sure that when the value are passed directly it works, but when passing the value through variable, it fails.
I have marked the problem area with <~~~~~~~~~~~~
Please suggest if anyone understands the issue.
#d.s can you try changing your scope format to something like this:
'serviceAccounts': [
{
'email': 'default'
'scopes':[
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/servicecontrol',
'https://www.googleapis.com/auth/service.management.readonly',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/monitoring.write',
'https://www.googleapis.com/auth/trace.append',
'https://www.googleapis.com/auth/devstorage.read_write']}]
The listed scopes are the default scopes that you will need for an instance. I think the problem you are facing is you where trying to only list two scopes which are not enough to allow you to deploy your instance.

Azure-ML Deployment does NOT see AzureML Environment (wrong version number)

I've followed the documentation pretty well as outlined here.
I've setup my azure machine learning environment the following way:
from azureml.core import Workspace
# Connect to the workspace
ws = Workspace.from_config()
from azureml.core import Environment
from azureml.core import ContainerRegistry
myenv = Environment(name = "myenv")
myenv.inferencing_stack_version = "latest" # This will install the inference specific apt packages.
# Docker
myenv.docker.enabled = True
myenv.docker.base_image_registry.address = "myazureregistry.azurecr.io"
myenv.docker.base_image_registry.username = "myusername"
myenv.docker.base_image_registry.password = "mypassword"
myenv.docker.base_image = "4fb3..."
myenv.docker.arguments = None
# Environment variable (I need python to look at folders
myenv.environment_variables = {"PYTHONPATH":"/root"}
# python
myenv.python.user_managed_dependencies = True
myenv.python.interpreter_path = "/opt/miniconda/envs/myenv/bin/python"
from azureml.core.conda_dependencies import CondaDependencies
conda_dep = CondaDependencies()
conda_dep.add_pip_package("azureml-defaults")
myenv.python.conda_dependencies=conda_dep
myenv.register(workspace=ws) # works!
I have a score.py file configured for inference (not relevant to the problem I'm having)...
I then setup inference configuration
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
I setup my compute cluster:
from azureml.core.compute import ComputeTarget, AksCompute
from azureml.exceptions import ComputeTargetException
# Choose a name for your cluster
aks_name = "theclustername"
# Check to see if the cluster already exists
try:
aks_target = ComputeTarget(workspace=ws, name=aks_name)
print('Found existing compute target')
except ComputeTargetException:
print('Creating a new compute target...')
prov_config = AksCompute.provisioning_configuration(vm_size="Standard_NC6_Promo")
aks_target = ComputeTarget.create(workspace=ws, name=aks_name, provisioning_configuration=prov_config)
aks_target.wait_for_completion(show_output=True)
from azureml.core.webservice import AksWebservice
# Example
gpu_aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,
num_replicas=3,
cpu_cores=4,
memory_gb=10)
Everything succeeds; then I try and deploy the model for inference:
from azureml.core.model import Model
model = Model(ws, name="thenameofmymodel")
# Name of the web service that is deployed
aks_service_name = 'tryingtodeply'
# Deploy the model
aks_service = Model.deploy(ws,
aks_service_name,
models=[model],
inference_config=inference_config,
deployment_config=gpu_aks_config,
deployment_target=aks_target,
overwrite=True)
aks_service.wait_for_deployment(show_output=True)
print(aks_service.state)
And it fails saying that it can't find the environment. More specifically, my environment version is version 11, but it keeps trying to find an environment with a version number that is 1 higher (i.e., version 12) than the current environment:
FailedERROR - Service deployment polling reached non-successful terminal state, current service state: Failed
Operation ID: 0f03a025-3407-4dc1-9922-a53cc27267d4
More information can be found here:
Error:
{
"code": "BadRequest",
"statusCode": 400,
"message": "The request is invalid",
"details": [
{
"code": "EnvironmentDetailsFetchFailedUserError",
"message": "Failed to fetch details for Environment with Name: myenv Version: 12."
}
]
}
I have tried to manually edit the environment JSON to match the version that azureml is trying to fetch, but nothing works. Can anyone see anything wrong with this code?
Update
Changing the name of the environment (e.g., my_inference_env) and passing it to InferenceConfig seems to be on the right track. However, the error now changes to the following
Running..........
Failed
ERROR - Service deployment polling reached non-successful terminal state, current service state: Failed
Operation ID: f0dfc13b-6fb6-494b-91a7-de42b9384692
More information can be found here: https://some_long_http_address_that_leads_to_nothing
Error:
{
"code": "DeploymentFailed",
"statusCode": 404,
"message": "Deployment not found"
}
Solution
The answer from Anders below is indeed correct regarding the use of azure ML environments. However, the last error I was getting was because I was setting the container image using the digest value (a sha) and NOT the image name and tag (e.g., imagename:tag). Note the line of code in the first block:
myenv.docker.base_image = "4fb3..."
I reference the digest value, but it should be changed to
myenv.docker.base_image = "imagename:tag"
Once I made that change, the deployment succeeded! :)
One concept that took me a while to get was the bifurcation of registering and using an Azure ML Environment. If you have already registered your env, myenv, and none of the details of the your environment have changed, there is no need re-register it with myenv.register(). You can simply get the already register env using Environment.get() like so:
myenv = Environment.get(ws, name='myenv', version=11)
My recommendation would be to name your environment something new: like "model_scoring_env". Register it once, then pass it to the InferenceConfig.

Create Stack Instances Parameter Issue

I'm creating stack instance, using python boto3 SDK. According to the documentation I should be able to use ParameterOverrides but I'm getting following error..
botocore.exceptions.ParamValidationError: Parameter validation failed:
Unknown parameter in input: "ParameterOverrides", must be one of: StackSetName, Accounts, Regions, OperationPreferences, OperationId
Environment :
aws-cli/1.11.172 Python/2.7.14 botocore/1.7.30
imports used
import boto3
import botocore
Following is the code
try:
stackset_instance_response = stackset_client.create_stack_instances(
StackSetName=cloudtrail_stackset_name,
Accounts=[
account_id
],
Regions=[
stack_region
],
OperationPreferences={
'RegionOrder': [
stack_region
],
'FailureToleranceCount': 0,
'MaxConcurrentCount': 1
},
ParameterOverrides=[
{
'ParameterKey': 'CloudtrailBucket',
'ParameterValue': 'test-bucket'
},
{
'ParameterKey': 'Environment',
'ParameterValue': 'SANDBOX'
},
{
'ParameterKey': 'IsCloudTrailEnabled',
'ParameterValue': 'NO'
}
]
)
print("Stackset create Response : " + str(stackset_instance_response))
operation_id = stackset_instance_response['OperationId']
print (operation_id)
except botocore.exceptions.ClientError as e:
print("Stackset creation error : " + str(e))
I'm not sure where I'm doing wrong, any help would be greatly appreciated.
Thank you.
1.8.0 is the first version of Botocore that has parameteroverrides defined.
https://github.com/boto/botocore/blob/1.8.0/botocore/data/cloudformation/2010-05-15/service-2.json#L1087-L1090
1.7.30 doesn't have that defined. https://github.com/boto/botocore/blob/1.7.30/botocore/data/cloudformation/2010-05-15/service-2.json

Error while submitting a spark job using spark-jobserver

I face following error occasionally while submitting job. This error goes away if I remove the rootdir of filedao, datadao and sqldao. That means I have to restart the job-server and re-upload my jar.
{
"status": "ERROR",
"result": {
"message": "Ask timed out on [Actor[akka://JobServer/user/context-supervisor/1995aeba-com.spmsoftware.distributed.job.TestJob#-1370794810]] after [10000 ms]. Sender[null] sent message of type \"spark.jobserver.JobManagerActor$StartJob\".",
"errorClass": "akka.pattern.AskTimeoutException",
"stack": ["akka.pattern.PromiseActorRef$$anonfun$1.apply$mcV$sp(AskSupport.scala:604)", "akka.actor.Scheduler$$anon$4.run(Scheduler.scala:126)", "scala.concurrent.Future$InternalCallbackExecutor$.unbatchedExecute(Future.scala:601)", "scala.concurrent.BatchingExecutor$class.execute(BatchingExecutor.scala:109)", "scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:599)", "akka.actor.LightArrayRevolverScheduler$TaskHolder.executeTask(LightArrayRevolverScheduler.scala:331)", "akka.actor.LightArrayRevolverScheduler$$anon$4.executeBucket$1(LightArrayRevolverScheduler.scala:282)", "akka.actor.LightArrayRevolverScheduler$$anon$4.nextTick(LightArrayRevolverScheduler.scala:286)", "akka.actor.LightArrayRevolverScheduler$$anon$4.run(LightArrayRevolverScheduler.scala:238)", "java.lang.Thread.run(Thread.java:745)"]
}
}
My config file is as follows:
# Template for a Spark Job Server configuration file
# When deployed these settings are loaded when job server starts
#
# Spark Cluster / Job Server configuration
# Spark Cluster / Job Server configuration
spark {
# spark.master will be passed to each job's JobContext
master = <spark_master>
# Default # of CPUs for jobs to use for Spark standalone cluster
job-number-cpus = 4
jobserver {
port = 8090
context-per-jvm = false
context-creation-timeout = 100 s
# Note: JobFileDAO is deprecated from v0.7.0 because of issues in
# production and will be removed in future, now defaults to H2 file.
jobdao = spark.jobserver.io.JobSqlDAO
filedao {
rootdir = /tmp/spark-jobserver/filedao/data
}
datadao {
rootdir = /tmp/spark-jobserver/upload
}
sqldao {
slick-driver = slick.driver.H2Driver
jdbc-driver = org.h2.Driver
rootdir = /tmp/spark-jobserver/sqldao/data
jdbc {
url = "jdbc:h2:file:/tmp/spark-jobserver/sqldao/data/h2-db"
user = ""
password = ""
}
dbcp {
enabled = false
maxactive = 20
maxidle = 10
initialsize = 10
}
}
result-chunk-size = 1m
short-timeout = 60 s
}
context-settings {
num-cpu-cores = 2 # Number of cores to allocate. Required.
memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, #1G, etc.
}
}
akka {
remote.netty.tcp {
# This controls the maximum message size, including job results, that can be sent
# maximum-frame-size = 200 MiB
}
}
# check the reference.conf in spray-can/src/main/resources for all defined settings
spray.can.server.parsing.max-content-length = 250m
I am using spark-2.0-preview version.
I have faced the same error before and was related with timeout, for sure is an syncronus request (sync=true) togheter you must provide the timeout (in seconds) who is a value relative with how long it takes to process your request.
This an example how the request should look like:
curl -k --basic -d '' 'http://localhost:5050/jobs?appName=app&classPath=Main&context=test-context&sync=true&timeout=40'
if your request needs more than 40 seconds maybe you also need to modify the application.conf located on
spark-jobserver-master/job-server/src/main/resources/application.conf
ànd on the spray.can.server section modify:
idle-timeout = 210 s
request-timeout = 200 s

Resources