issue while creating VM instance using python code in GCP - python-3.x

I am trying to write a code which will read values from excel file and will create VMs in Google Cloud. I am facing problem at two locations, while creating tags if I use 'items': [tag] or while creating service account scope it starts giving me error.
import os, json
import googleapiclient.discovery
from google.oauth2 import service_account
import csv
credentials = service_account.Credentials.from_service_account_file('G:/python/json/mykids-280210.json')
compute = googleapiclient.discovery.build('compute', 'v1', credentials=credentials)
def create_instance(compute, vm_name, image_project, image_family, machinetype, startupscript, zone, network,
subnet, project, scope, tag):
# Get the latest Debian Jessie image.
image_response = compute.images().getFromFamily(
project=image_project, family=image_family).execute()
source_disk_image = image_response['selfLink']
# Configure the machine
machine_type = "zones/" + zone + "/machineTypes/" + machinetype
startup_script = startupscript
config = {
'name': vm_name,
'machineType': machine_type,
'description': 'This VM was created with python code',
'tags': {
'items': ['external', 'home', 'local'] #'items': [tag] <~~~~~~~~~~~
},
'deletionProtection': False,
'labels': {'env': 'dev', 'server': 'mytower', 'purpose': 'personal'},
# Specify the boot disk and the image to use as a source.
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/' + network,
'subnetwork': 'regions/us-central1/subnetworks/' + subnet,
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage and logging.
'serviceAccounts': [{
'email': 'default',
'scopes': [
#'https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write'
#scope # scope <~~~~~~~~~~~~~~~~~~~~
]
}],
'scheduling': {
"preemptible": True
},
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
'metadata': {
'items': [{
# Startup script is automatically executed by the
# instance upon startup.
'key': 'startup-script',
'value': startup_script
}]
}
}
return compute.instances().insert(
project=project,
zone=zone,
body=config).execute()
# [END create_instance]
with open('vms.csv', newline='') as csvfile:
data = csv.DictReader(csvfile)
for row in data:
vm_name = row['vm_name']
image_project = row['image_project']
image_family = row['image_family']
machinetype = row['machinetype']
startupscript = row['startupscript']
zone = row['zone']
network = row['network']
subnet = row['subnet']
project = row['project']
scope = row['scopes']
tag = row['tags']
print(create_instance(compute, vm_name, image_project, image_family, machinetype, startupscript, zone, network,
subnet, project, scope, tag))
csvfile.close()
error when use scope variable
G:\python\pythonProject\venv\Scripts\python.exe G:/python/pythonProject/read-excel-gcp/vm/create_vm.py
Traceback (most recent call last):
File "G:\python\pythonProject\read-excel-gcp\vm\create_vm.py", line 100, in <module>
print(create_instance(compute, vm_name, image_project, image_family, machinetype, startupscript, zone, network,
File "G:\python\pythonProject\read-excel-gcp\vm\create_vm.py", line 79, in create_instance
return compute.instances().insert(
File "G:\python\pythonProject\venv\lib\site-packages\googleapiclient\_helpers.py", line 134, in positional_wrapper
return wrapped(*args, **kwargs)
File "G:\python\pythonProject\venv\lib\site-packages\googleapiclient\http.py", line 915, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 400 when requesting https://compute.googleapis.com/compute/v1/projects/mykids-280210/zones/us-central1-a/instances?alt=json returned "One or more of the service account scopes are invalid: 'https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write''". Details: "One or more of the service account scopes are invalid: 'https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write''">
Process finished with exit code 1
I get a similar error when I use tag variable.
I have # the value the way I am passing in the above code.
Below is my csv file details
vm_name,image_project,image_family,machinetype,startupscript,zone,network,subnet,project,scopes,tags
python-vm1,debian-cloud,debian-9,e2-micro,G:/python/json/startup-script.sh,us-central1-a,myvpc,subnet-a,mykids-280210,"https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write'","external', 'home', 'local'"
python-vm2,debian-cloud,debian-9,e2-micro,G:/python/json/startup-script.sh,us-central1-a,myvpc,subnet-a,mykids-280210,"https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write'","external', 'home', 'local'"
I am not sure that when the value are passed directly it works, but when passing the value through variable, it fails.
I have marked the problem area with <~~~~~~~~~~~~
Please suggest if anyone understands the issue.

#d.s can you try changing your scope format to something like this:
'serviceAccounts': [
{
'email': 'default'
'scopes':[
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/servicecontrol',
'https://www.googleapis.com/auth/service.management.readonly',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/monitoring.write',
'https://www.googleapis.com/auth/trace.append',
'https://www.googleapis.com/auth/devstorage.read_write']}]
The listed scopes are the default scopes that you will need for an instance. I think the problem you are facing is you where trying to only list two scopes which are not enough to allow you to deploy your instance.

Related

Elasticsearch "register repo" command returns 500 error code but syntax matches the doc (I think)

I have this Python code to register a Google Cloud Storage (GCS) repository:
import requests
from grabconfig import grabconfig
(HOSTS, ign) = grabconfig()
reqHeaders = {'content-type' : 'application/json'}
for h in HOSTS:
url = f'http://{h}:9200'
r = requests.put(f'{url}/_snapshot/prod_backup2',
'''{ \"type\" : \"gcs\" }, { \"settings\" : { \"client\" : \"secondary\", \"bucket\" : \"prod_backup2\" },
{ \"compress\" : \"true\" }}''',
headers=reqHeaders)
print(r)
print(r.json())
r2 = requests.get(f'{url}/_cat/snapshots')
print(r2)
print(r2.json())
The configuration file I am using is the prod.py one:
HOSTS = ['10.x.x.x']
BACKUP_REPO = ['prod_backup2']
But when I run the code I get this error, always:
<Response [500]>
{'error': {'root_cause': [{'type': 'repository_exception', 'reason': '[prod_backup2] repository type [gcs] does not exist'}], 'type': 'repository_exception', 'reason': '[prod_backup2] repository type [gcs] does not exist'}, 'status': 500}
I think I found it: the gcs plugin was not installed on the server I was targeting.
That's supposed to be fixed by Monday, so I'm on to the next task.

Creating Azure VirtualMachineExtension failure

I have a Windows Machine that I want to add VM extension using the azure python SDK , I send the following request
{'location': 'westus',
'tags': None,
'publisher': 'Microsoft.Compute',
'virtual_machine_extension_type': 'CustomScriptExtension',
'type_handler_version': '1.4',
'settings': '{
"file_uris": ["https://mysite.azurescripts.net/ps_enable_winrm_http.ps1"],
"command_to_execute": "powershell -ExecutionPolicy Unrestricted -file ps_enable_winrm_http.ps1"}'
}
but what happens is that it gives the following exception
configure virtual_machine '946b4246-a604-4b01-9e6a-09ed64a93bdb' failed with this error :
VM has reported a failure when processing extension '13da0dc5-09c0-4e56-a35d-fdbc42432e11'.
Error message: "Invalid handler configuration. Exiting.
Error Message: Expecting state 'Element'.. Encountered 'Text' with name '', namespace ''. "
More information on troubleshooting is available at https://aka.ms/VMExtensionCSEWindowsTroubleshoot
adding a simple code snippet that I use
vm_extension_name = "{0}".format(uuid4())
vm_extension_params = {
'location': location_val,
'tags': tags_val,
'publisher': 'Microsoft.Compute',
'virtual_machine_extension_type': 'CustomScriptExtension',
'type_handler_version': type_handler_version,
'auto_upgrade_minor_version': True,
'settings': json.dumps({
'fileUris': file_uris,
'commandToExecute': command_to_execute
})
}
logger.info("sending {0}".format(vm_extension_params))
any ideas , should I send something differently or am I missing something from the above request that cause the issue
thanks for the help in advance
Regards,
When we use python sdk to install custom script extension, we should create Object VirtualMachineExtension. Its parameter settings should be Object. But you define it as str. Please update it with removing ''. For more details, please refer to the document
For example
from azure.mgmt.compute import ComputeManagementClient
from azure.common.credentials import ServicePrincipalCredentials
AZURE_TENANT_ID= ''
AZURE_CLIENT_ID=''
AZURE_CLIENT_SECRET=''
AZURE_SUBSCRIPTION_ID=''
credentials = ServicePrincipalCredentials(client_id=AZURE_CLIENT_ID,secret=AZURE_CLIENT_SECRET,tenant=AZURE_TENANT_ID)
compute_client = ComputeManagementClient(credentials, AZURE_SUBSCRIPTION_ID)
resource_group_name='stan'
vm_name='win2016'
params_create = {
'location':'CentralUS',
'tags': None,
'publisher': 'Microsoft.Compute',
'virtual_machine_extension_type': 'CustomScriptExtension',
'type_handler_version': '1.4',
'settings':
{
'fileUris': ['https://***/test/test.ps1'],
'commandToExecute': 'powershell -ExecutionPolicy Unrestricted -File test.ps1'
}
}
ext_poller = compute_client.virtual_machine_extensions.create_or_update(
resource_group_name,
vm_name,
'test',
params_create,
)
ext = ext_poller.result()
print(ext)

Getting HttpError 401 while generating google Admob network report using python script

Below is the code that I am using to generate the Admob network report
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import os
base_path=os.path.dirname(os.path.realpath(__file__))
scopes=['https://www.googleapis.com/auth/admob.report']
key_file_location = base_path+'/config/service_account.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(key_file_location, scopes)
account_id='accounts/pub-XXXXXXXXXXXXXXXX'
network_report_filter = {
'dateRange': {
'startDate': {'year': 2020, 'month': 1, 'day': 1},
'endDate': {'year': 2020, 'month': 2, 'day': 10}
},
'dimensions': ['DATE', 'APP', 'COUNTRY'],
'metrics': ['CLICKS', 'ESTIMATED_EARNINGS'],
'dimensionFilters': [
{
'dimension': 'COUNTRY',
'matchesAny': {'values': [{'value': 'US', 'value': 'CN'}]}
}
],
'sortConditions': [
{'dimension':'APP', 'order': 'ASCENDING'},
{'metric':'CLICKS', 'order': 'DESCENDING'}
],
'localizationSettings': {
'currencyCode': 'USD',
'languageCode': 'en-US'
}
}
# Build the service object.
admob = build('admob', 'v1', credentials=credentials)
admob._resourceDesc=network_report_filter
accounts=admob.accounts()
network_report=accounts.networkReport().generate(parent=account_id)
data=network_report.execute()
It throws the below error
*** HttpError: https://admob.googleapis.com/v1/accounts/pub-XXXXXXXXXXXXXXXX/networkReport:generate?alt=json returned "Request is missing required authentication credential. Expected OAuth 2 access token, login cookie or other valid authentication credential. See https://developers.google.com/identity/sign-in/web/devconsole-project.">
I have generated the service account credentials with Admob API is enabled.
But not able to figure out why there is authentication error.
The main issue, that code above tries to use the service account to query the api. But, it's not supported. It could be queried with OAuth2.0 Client Id.
The steps to generate OAth2.0 Client ID:
Open the credential page of your project (https://console.cloud.google.com/apis/credentials?project=REPLACE_WITH_YOUR_PROJECT_ID);
Generate OAuth2.0 Client ID;
Download the generated json file;
Use it with code below;
The following works well for me:
Libs:
pip3 install --upgrade google-api-python-client --user
pip3 install --upgrade oauth2client --user
Code example:
import csv
import sys
from googleapiclient import discovery
from googleapiclient.http import build_http
from oauth2client import tools
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
class AdMobAPI:
def __init__(self):
scope = 'https://www.googleapis.com/auth/admob.report'
name = 'admob'
version = 'v1'
flow = OAuth2WebServerFlow(client_id='<todo: replace with a client_id from the secret json>',
client_secret='<todo: replace with a secret from the secret json>',
scope=scope)
storage = Storage(name + '.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage)
http = credentials.authorize(http=build_http())
self.admob = discovery.build(name, version, http=http)
# Convert to the list of dictionaries
def report_to_list_of_dictionaries(self, response):
result = []
for report_line in response:
if report_line.get('row'):
print(report_line)
row = report_line.get('row')
dm = {}
if row.get('dimensionValues'):
for key, value in row.get('dimensionValues').items():
if value.get('value') and value.get('displayLabel'):
dm.update({key: value.get('value')})
dm.update({key + '_NAME': value.get('displayLabel')})
else:
dm.update({key: next(filter(None, [value.get('value'), value.get('displayLabel')]))})
if row.get('metricValues'):
for key, value in row.get('metricValues').items():
dm.update({key: next(filter(None, [value.get('value'), value.get('microsValue'), value.get('integerValue')]))})
result.append(dm)
return result
def generate_report(self, publisher_id):
date_range = {'startDate': {'year': 2020, 'month': 4, 'day': 1},
'endDate': {'year': 2020, 'month': 4, 'day': 1}}
dimensions = ['DATE', 'APP', 'PLATFORM', 'COUNTRY']
metrics = ['ESTIMATED_EARNINGS', 'IMPRESSIONS', 'CLICKS',
'AD_REQUESTS', 'MATCHED_REQUESTS']
sort_conditions = {'dimension': 'DATE', 'order': 'DESCENDING'}
report_spec = {'dateRange': date_range,
'dimensions': dimensions,
'metrics': metrics,
'sortConditions': [sort_conditions]}
request = {'reportSpec': report_spec}
return self.admob.accounts().networkReport().generate(
parent='accounts/{}'.format(publisher_id),
body=request).execute()
api = AdMobAPI()
raw_report = api.generate_report('<todo: replace with publisher id, smth like pub-[0-9]+>')
report_as_list_of_dictionaries = api.report_to_list_of_dictionaries(raw_report)
# Convert to CSV
dict_writer = csv.DictWriter(sys.stdout, report_as_list_of_dictionaries[0].keys())
dict_writer.writeheader()
dict_writer.writerows(report_as_list_of_dictionaries)
Currently, google admob api does not support service accounts
for more details, see here enter link description here

CloudFormation stack deletion failing to remove VPC

I have created aws infrastructure with collection EC2, Redshift, VPC etc. via CLOUDFORMATION. Now I want to delete it in particular reverse order. Exa. All resources are dependent on VPC. VPC should be deleted at the end. But somehow every stack is deleting but VPC stack is not deleting via python BOTO3.It shows some subnet or network interface dependency error. But when I try to delete via console, It deletes it successfully.
Has anyone faced this issue?
I have tried to delete everyting like loadbalancer which is attached to it. But still VPC is not deleting.
AWS CloudFormation creates a dependency graph between resources based upon DependsOn references in the template and references between resources.
It then tries to deploy resources in parallel, but takes dependencies into account.
For example, a Subnet might be defined as:
Subnet1:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: !Ref ProdVPC
In this situation, there is an explicit reference to ProdVPC, so CloudFormation will only create Subnet1 after ProdVPC has been created.
When a CloudFormation stack is deleted, the reverse logic is applied. In this case, Subnet1 will be deleted before ProdVPC is deleted.
However, CloudFormation is not aware of resources created outside of the stack. This means that if a resource (eg an Amazon EC2 instance) is created inside the Subnet, then stack deletion will fail because the Subnet cannot be deleted while there is an EC2 instance using it (or, more accurately, an ENI is attached to it).
In such situations, you will need to manually delete the resources that are causing the "delete failure" and then try the delete command again.
A good way to find such resources is to look in the Network Interfaces section of the EC2 management console. Make sure that there are no interfaces connected to the VPC.
As you specified that you are having issues with deleting VPC within stacks containing lambdas which themselves are in VPC, this most probably could be because of the network interfaces being generated by lambdas to connect to other resources in the VPC.
Technically these network interfaces should be auto-deleted when lambdas are undeployed from the stack but in my experience, I have observed orphaned ENI's which doesn't let the VPC be undeployed.
For this reason, I created a custom resource backed lambda which cleans up the ENI's after all lambdas within VPC's have been undeployed.
This is the cloud formation part where you setup the custom resource and pass the VPC ID
##############################################
# #
# Custom resource deleting net interfaces #
# #
##############################################
NetInterfacesCleanupFunction:
Type: AWS::Serverless::Function
Properties:
CodeUri: src
Handler: cleanup/network_interfaces.handler
Role: !GetAtt BasicLambdaRole.Arn
DeploymentPreference:
Type: AllAtOnce
Timeout: 900
PermissionForNewInterfacesCleanupLambda:
Type: AWS::Lambda::Permission
Properties:
Action: lambda:invokeFunction
FunctionName:
Fn::GetAtt: [ NetInterfacesCleanupFunction, Arn ]
Principal: lambda.amazonaws.com
InvokeLambdaFunctionToCleanupNetInterfaces:
DependsOn: [PermissionForNewInterfacesCleanupLambda]
Type: Custom::CleanupNetInterfacesLambda
Properties:
ServiceToken: !GetAtt NetInterfacesCleanupFunction.Arn
StackName: !Ref AWS::StackName
VPCID:
Fn::ImportValue: !Sub '${MasterStack}-Articles-VPC-Ref'
Tags:
'owner': !Ref StackOwner
'task': !Ref Task
And this is the corresponding lambda. This lambda tries 3 times to detach and delete orphaned network interfaces and if fails if it can't which means there's still a lambda which is generating new network interfaces and you need to debug for that.
import boto3
from botocore.exceptions import ClientError
from time import sleep
# Fix this wherever your custom resource handler code is
from common import cfn_custom_resources as csr
import sys
MAX_RETRIES = 3
client = boto3.client('ec2')
def handler(event, context):
vpc_id = event['ResourceProperties']['VPCID']
if not csr.__is_valid_event(event, context):
csr.send(event, context, FAILED, validate_response_data(result))
return
elif event['RequestType'] == 'Create' or event['RequestType'] == 'Update':
result = {'result': 'Don\'t trigger the rest of the code'}
csr.send(event, context, csr.SUCCESS, csr.validate_response_data(result))
return
try:
# Get all network intefaces for given vpc which are attached to a lambda function
interfaces = client.describe_network_interfaces(
Filters=[
{
'Name': 'description',
'Values': ['AWS Lambda VPC ENI*']
},
{
'Name': 'vpc-id',
'Values': [vpc_id]
},
],
)
failed_detach = list()
failed_delete = list()
# Detach the above found network interfaces
for interface in interfaces['NetworkInterfaces']:
detach_interface(failed_detach, interface)
# Try detach a second time and delete each simultaneously
for interface in interfaces['NetworkInterfaces']:
detach_and_delete_interface(failed_detach, failed_delete, interface)
if not failed_detach or not failed_delete:
result = {'result': 'Network interfaces detached and deleted successfully'}
csr.send(event, context, csr.SUCCESS, csr.validate_response_data(result))
else:
result = {'result': 'Network interfaces couldn\'t be deleted completely'}
csr.send(event, context, csr.FAILED, csr.validate_response_data(result))
# print(response)
except Exception:
print("Unexpected error:", sys.exc_info())
result = {'result': 'Some error with the process of detaching and deleting the network interfaces'}
csr.send(event, context, csr.FAILED, csr.validate_response_data(result))
def detach_interface(failed_detach, interface):
try:
if interface['Status'] == 'in-use':
detach_response = client.detach_network_interface(
AttachmentId=interface['Attachment']['AttachmentId'],
Force=True
)
# Sleep for 1 sec after every detachment
sleep(1)
print(f"Detach response for {interface['NetworkInterfaceId']}- {detach_response}")
if 'HTTPStatusCode' not in detach_response['ResponseMetadata'] or \
detach_response['ResponseMetadata']['HTTPStatusCode'] != 200:
failed_detach.append(detach_response)
except ClientError as e:
print(f"Exception details - {sys.exc_info()}")
def detach_and_delete_interface(failed_detach, failed_delete, interface, retries=0):
detach_interface(failed_detach, interface)
sleep(retries + 1)
try:
delete_response = client.delete_network_interface(
NetworkInterfaceId=interface['NetworkInterfaceId'])
print(f"Delete response for {interface['NetworkInterfaceId']}- {delete_response}")
if 'HTTPStatusCode' not in delete_response['ResponseMetadata'] or \
delete_response['ResponseMetadata']['HTTPStatusCode'] != 200:
failed_delete.append(delete_response)
except ClientError as e:
print(f"Exception while deleting - {str(e)}")
print()
if retries <= MAX_RETRIES:
if e.response['Error']['Code'] == 'InvalidNetworkInterface.InUse' or \
e.response['Error']['Code'] == 'InvalidParameterValue':
retries = retries + 1
print(f"Retry {retries} : Interface in use, deletion failed, retrying to detach and delete")
detach_and_delete_interface(failed_detach, failed_delete, interface, retries)
else:
raise RuntimeError("Code not found in error")
else:
raise RuntimeError("Max Number of retries exhausted to remove the interface")
The link to the lambda is https://gist.github.com/revolutionisme/8ec785f8202f47da5517c295a28c7cb5
More information about configuring lambdas in a VPC - https://docs.aws.amazon.com/lambda/latest/dg/vpc.html

Abnormal behavior of python package eve

I have installed the eve package on my windows machine but every time I shutdown the machine and try to load the eve package I get module not found error.
On re-installation attempt(Btw I used the latest pip version to install), I get
from eve import Eve
app=Eve()
app.run()
The error points to the second line.
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-79-46d1b24866c8> in <module>()
30 # host = '127.0.0.1'
31
---> 32 app = Eve()
33 # app.run()
34
~\AppData\Local\Continuum\anaconda3\lib\site-packages\eve\flaskapp.py in __init__(self, import_name, settings, validator, data, auth, redis, url_converters, json_encoder, media, **kwargs)
158 self.settings = settings
159
--> 160 self.load_config()
161 self.validate_domain_struct()
162
~\AppData\Local\Continuum\anaconda3\lib\site-packages\eve\flaskapp.py in load_config(self)
275
276 try:
--> 277 self.config.from_pyfile(pyfile)
278 except:
279 raise
~\AppData\Local\Continuum\anaconda3\lib\site-packages\flask\config.py in from_pyfile(self, filename, silent)
128 try:
129 with open(filename, mode='rb') as config_file:
--> 130 exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
131 except IOError as e:
132 if silent and e.errno in (
~\AppData\Local\Continuum\anaconda3\lib\site-packages\bokeh\settings.py in <module>()
9 from os.path import join, abspath, isdir
10
---> 11 from .util.paths import ROOT_DIR, bokehjsdir
12
13
ModuleNotFoundError: No module named 'config'
Moreover, I find that there is no folder "lib" but "Lib". If this is the problem how do I rectify it?
However, the code below works but runs for microsecs, not like running a back-end server with api's:
from eve import Eve
app=Eve
app.run
The settings.py file:
# Let's just use the local mongod instance. Edit as needed.
# Please note that MONGO_HOST and MONGO_PORT could very well be left
# out as they already default to a bare bones local 'mongod' instance.
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DBNAME = 'apitest'
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
# Enable reads (GET), edits (PATCH), replacements (PUT) and deletes of
# individual items (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']
people = {
# 'title' tag used in item links.
'item_title': 'person',
# by default the standard item entry point is defined as
# '/people/<ObjectId>/'. We leave it untouched, and we also enable an
# additional read-only entry point. This way consumers can also perform GET
# requests at '/people/<lastname>/'.
'additional_lookup': {
'url': 'regex("[\w]+")',
'field': 'lastname'
},
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'resource_methods': ['GET', 'POST'],
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/pyeve/cerberus) for details.
'schema': {
'firstname': {
'type': 'string',
'minlength': 1,
'maxlength': 10,
},
'lastname': {
'type': 'string',
'minlength': 1,
'maxlength': 15,
'required': True,
# talk about hard constraints! For the purpose of the demo
# 'lastname' is an API entry-point, so we need it to be unique.
'unique': True,
},
# 'role' is a list, and can only contain values from 'allowed'.
'role': {
'type': 'list',
'allowed': ["author", "contributor", "copy"],
},
# An embedded 'strongly-typed' dictionary.
'location': {
'type': 'dict',
'schema': {
'address': {'type': 'string'},
'city': {'type': 'string'}
},
},
'born': {
'type': 'datetime',
},
}
}
DOMAIN = {
'people': people,
}
So, What could be the solution to this problem?
Any help is appreciated.
I don't have this issue after a quick test. Let me share with you all steps and let me know anything is different.
1) Enter Anaconda Prompt
2) conda create -n eswar python=3.6
3) conda activate eswar
4) pip install eve
5) python
5.1) import eve
5.2) exit()
6) shutdown windows machine
7) restart windows machine
8) enter anaconda prompt
9) conda activate eswar
10) python
11) from eve import Eve
12) everything looks fine.
did you forget to activate your env after restart?

Resources