botocore.exceptions.ClientError: An error occurred (InvalidInstanceID.Malformed) - python-3.x

Any suggestion pls, Here is the code, works fine if i call function as get_instance_name ('i-0368cdfdded') and error i get is
Tried ids.txt as both 'i-xxxx' or i-xxx
botocore.exceptions.ClientError: An error occurred (InvalidInstanceID.Malformed) when calling the DescribeInstances operation: Invalid id: "i-xxxxxx"
import boto3
AWS_REGION = "us-west-2"
AWS_PROFILE = "profilex"
session=boto3.session.Session(profile_name=AWS_PROFILE)
ec2 = session.resource('ec2', region_name=AWS_REGION)
def get_instance_name(fid):
i = ec2.Instance(fid)
instancename = ''
for tags in i.tags:
if tags["Key"] == 'Name':
instancename = tags["Value"]
return instancename
with open('ids.txt') as f:
for line in f:
get_instance_name ('line')

so what i found is i get expected output when i run the program in interactive python terminal but not run as program. I am not able to figure that out why, but the program itself works on console. Here is what i ran last which worked. ( I was doing many trial & errors )
import boto3
import sys
import os
import json
AWS_REGION = "us-west-2"
AWS_PROFILE = "xxxx"
session=boto3.session.Session(profile_name=AWS_PROFILE)
ec2 = session.resource('ec2', region_name=AWS_REGION)
def get_instance_name(fid):
i = ec2.Instance(fid)
instancename = ''
for tags in i.tags:
if tags["Key"] == 'Name':
instancename = tags["Value"]
return instancename
filename="xxx.txt"
with open(filename) as file:
while (line := file.readline().rstrip()):
#print(line)
get_instance_name(line)

Related

Error 500 when trying to send file from remote server (python code) to an SFTP server

I have a Flask API in a Docker container where I do the following :
from flask import Flask, request
import os
import json
import paramiko
import subprocess
app = Flask(__name__)
#app.route("/")
def hello():
return "Service up in K8S!"
#app.route("/get", methods=['GET'])
def get_ano():
print("Test liveness")
return "Pod is alive !"
#app.route("/run", methods=['POST'])
def run_dump_generation():
rules_str = request.headers.get('database')
print(rules_str)
postgres_bin = r"/usr/bin/"
dump_file = "database_dump.sql"
os.environ['PGPASSWORD'] = 'XXXXX'
print('Before dump generation')
with open(dump_file, "w") as f:
result = subprocess.call([
os.path.join(postgres_bin, "pg_dump"),
"-Fp",
"-d",
"XX",
"-U",
"XX",
"-h",
"XX",
"-p",
"XX"
],
stdout=f
)
print('After dump generation')
transport = paramiko.Transport(("X", X))
transport.connect(username="X", password="X")
sftp = transport.open_sftp_client()
remote_file = '/data/database_dump.sql'
sftp.put('database_dump.sql', remote_file)
print("SFTP object", sftp)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
When I run the app in Kubernetes with Post request I have the error : "POST /run HTTP/1.1" 500
Here are the requirements.txt:
Flask==2.0.1
paramiko==3.0.0
The error comes from transport = paramiko.Transport(("X", X)). The same code works locally. I don't understand why I have this error when I am on Kubernetes. But in the logs no print are displaying, I guess it is because I have error 500. I guess it is not possible with this code to send file from this container to the SFTP server (has OpenSSH).
What can I do ?
---- UPDATE ----
I think I have found the problem. In a Flask pod (VM) I try to send file from this pod to SFTP server. So I have to modify the following code to "allow" this type of send. This an SFTP server with OpenSSH.
Here is the code where to modify :
transport = paramiko.Transport(("X", X))
transport.connect(username="X", password="X")
sftp = transport.open_sftp_client()
remote_file = '/data/database_dump.sql'
sftp.put('database_dump.sql', remote_file)
print("SFTP object", sftp)
SFTP server (with OpenSSH) is alpine and Flask code is in Alpine container too.
UPDATE BELOW, I tried the following :
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
session = ssh.connect(hostname="X", port=X, username='X', password="X")
print(ssh)
But I have the following error :
File "c:/Users/X/dump_generator_api/t.py", line 32, in <module>
session = ssh.connect(hostname="X", port=X, username='X', password="X")
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\client.py", line 449, in connect
self._auth(
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\client.py", line 780, in _auth
raise saved_exception
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\client.py", line 767, in _auth
self._transport.auth_password(username, password)
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\transport.py", line 1567, in auth_password
return self.auth_handler.wait_for_response(my_event)
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\auth_handler.py", line 259, in wait_for_response
raise e
paramiko.ssh_exception.AuthenticationException: Authentication failed.

HTTPSConnectionPool(host='127.0.0.1', port=7545): Max retries exceeded with url: (Caused by NewConnectionError

# compile_standart is going to be the main function that we will use to compile this code.
from solcx import compile_standard, install_solc
import json
from web3 import Web3
import os
from dotenv import load_dotenv
load_dotenv()
with open("./SimpleStorage.sol", "r") as file:
simple_storage_file = file.read()
print("Installing...")
install_solc("0.6.0")
# print(simple_storage_file)
# compile our solidity
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_file}},
"settings": {
"outputSelection": {
"*": {
"*": [
"abi",
"metadata",
"evm.bytecode",
"evm.bytecode.sourceMap",
] # (ABI=Application Binary Interface)EVM (Ethereum Virtual Machine) is the core component of the Ethereum network
}
}
},
},
solc_version="0.6.0",
)
# print(compiled_sol)
with open("compiled_code.json", "w") as file: # w means it wil wright from it
json.dump(
compiled_sol, file
) # is it's going to take our compiled soul jason variable and just dump it into this (file) here
# but still it is going to keep it in json syntax
# get bytecode
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"][
"bytecode"
]["object"]
# get abi
abi = json.loads(
compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["metadata"]
)["output"]["abi"]
# for connecting to ganache
w3 = Web3(Web3.HTTPProvider("https://127.0.0.1:7545"))
chain_id = 5777
my_address = "0x630Ee320BcE235224184A31FC687a5D183142BB9"
private_key = "0xd3cf1f678e8a78ace754cf57bd6ebcb28852e9657bb371951d72bbb5a0a3f413"
# private_key = os.getenv(" PRIVATE_KEY ")
# print(private_key)
# Create the contract in Python
SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)
# print(SimpleStorage)
# Get the latest transaction
**nonce = w3.eth.getTransactionCount(my_address)**(I'm having this error in this line)[![**enter image description here**][1]][1]
# print (nounce)
# we could see that the number of transaction=0 because we haven't made any
# 1. Build a transaction
# 2. Sign a transaction
# 3 . Send a transaction
transaction = SimpleStorage.constructor().buildTransaction(
{" chainId ": chain_id, " from ": my_address, " nonce ": nonce}
)
# print(transaction)
signed_txn = w3.eth.account.sign_transaction(transaction, private_key=private_key)
print(signed_txn) # this is how we sign a transaction
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# 1 good practice to do when we are sending
# a transaction is to wait for some block confirmation to happen
# this will have our code stop and wait for this transaction hash to go through
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
# working with contracts
# Contract Address
# Contract ABI
# Working with deployed Contracts
simple_storage = w3.eth.contract(address=tx_receipt.contractAddress, abi=abi)
# call->Simulate making the call and getting a return value
# Transact->Actually make a state change
# Initial value of a favorite number
print(simple_storage.functions.retrieve().call())
# store some value into this contract
store_transaction = simple_storage.functions.store(15).buildTransaction(
{
"chainId": chain_id,
"gasPrice": w3.eth.gas_price,
"from": my_address,
"nonce": nonce
+ 1,
}
)
signed_store_txn = w3.eth.account.sign_transaction(
store_transaction, private_key=private_key
)
send_store_tx = w3.eth.send_raw_transaction(signed_store_txn.rawTransaaction)
tx_receipt = w3.eth.wait_for_transaction_receipt(send_store_tx)
#[1]: https://i.stack.imgur.com/sPikF.png
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:
During handling of the above exception, another exception occurred:

boto3 s3 bucket tagging

I'm getting access error while tagging a bucket. Please note that the role I'm using has s3 full access.
The code works fine till this point-
for bucket in s3.buckets.all():
s3_bucket = bucket
s3_bucket_name = s3_bucket.name
try:
response = s3_client.get_bucket_tagging(Bucket=s3_bucket_name)
print(response)
except ClientError:
print (s3_bucket_name, "does not have tags")
but after adding putTag code, it gives error even for GetBucketTagging operation.
This is my final code:
for bucket in s3.buckets.all():
s3_bucket = bucket
s3_bucket_name = s3_bucket.name
try:
response = s3_client.get_bucket_tagging(Bucket=s3_bucket_name)
print(response)
except ClientError:
print (s3_bucket_name, "does not have tags")
bucket_tagging = s3.BucketTagging(s3_bucket_name)
response = bucket_tagging.put(
Tagging={
'TagSet': [
{
'Key': 'pcs:name',
'Value': s3_bucket_name
},
]
},
)
The error I'm getting is-
botocore.exceptions.ClientError: An error occurred (AccessDenied) when calling the GetBucketTagging operation: Access Denied
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "tagging.py", line 91, in <module>
tagging()
File "tagging.py", line 71, in tagging
'Value': s3_bucket_name
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(*args, **params)
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/botocore/client.py", line 395, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/botocore/client.py", line 725, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (AccessDenied) when calling the PutBucketTagging operation: Access Denied
am I passing the tag parameters wrong? Got this from Boto3 documentation itself
I couldn't find a way to catch the exception, however, this worked for me:
tagging_client = boto3.client('resourcegroupstaggingapi')
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
for bucket in s3.buckets.all():
s3_bucket = bucket
s3_bucket_name = s3_bucket.name
bucket_tagging = s3.BucketTagging(s3_bucket_name)
try:
response = s3_client.get_bucket_tagging(Bucket=s3_bucket_name)
a = response
except ClientError:
response = tagging_client.tag_resources(
ResourceARNList=[
"arn:aws:s3:::" + bucket.name
],
Tags={
'pcs:name': bucket.name
}
)
pls note that you'll need the additional "resource tagging" policy attached to your role.
Hope this helps. Cheers.
I took out the try sections and ran this version of your code:
import boto3
s3_resource = boto3.resource('s3')
bucket_tagging = s3_resource.BucketTagging('my-bucket-name')
response = bucket_tagging.put(
Tagging={
'TagSet': [
{
'Key': 'pcs:name',
'Value': 'stackoverflow'
},
]
},
)
It worked fine:
[
Therefore, there must be something else that is causing your request to fail. You might want to check AWS CloudTrail to see if there is a hint as to why the request was denied.
"get_bucket_tagging" throws NoSuchTagSet when there are no tags. for testing create a tag first before run test or Catch the exception and create tags.

Airflow DataprocSubmitJobOperator - ValueError: Protocol message Job has no "python_file_uris" field

I'm using the DataprocSubmitJobOperator on Airflow to schedule pyspark jobs, and when i'm unable to pass pyfiles to the pyspark job
Here is the code i'm using :
DAG
# working - passing jars
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : ["gs://dataproc-spark-jars/mongo-spark-connector_2.12-3.0.2.jar",
'gs://dataproc-spark-jars/bson-4.0.5.jar','gs://dataproc-spark-jars/mongo-spark-connector_2.12-3.0.2.jar','gs://dataproc-spark-jars/mongodb-driver-core-4.0.5.jar',
'gs://dataproc-spark-jars/mongodb-driver-sync-4.0.5.jar','gs://dataproc-spark-jars/spark-avro_2.12-3.1.2.jar','gs://dataproc-spark-jars/spark-bigquery-with-dependencies_2.12-0.23.2.jar',
'gs://dataproc-spark-jars/spark-token-provider-kafka-0-10_2.12-3.1.3.jar','gs://dataproc-spark-jars/htrace-core4-4.1.0-incubating.jar','gs://dataproc-spark-jars/hadoop-client-3.3.1.jar','gs://dataproc-spark-jars/spark-sql-kafka-0-10_2.12-3.1.3.jar','gs://dataproc-spark-jars/hadoop-client-runtime-3.3.1.jar','gs://dataproc-spark-jars/hadoop-client-3.3.1.jar','gs://dataproc-spark-jars/kafka-clients-3.2.0.jar','gs://dataproc-spark-jars/commons-pool2-2.11.1.jar'],
"file_uris":['gs://kafka-certs/versa-kafka-gke-ca.p12','gs://kafka-certs/syslog-vani.p12',
'gs://kafka-certs/alarm-compression-user.p12','gs://kafka-certs/appstats-user.p12',
'gs://kafka-certs/insights-user.p12','gs://kafka-certs/intfutil-user.p12',
'gs://kafka-certs/reloadpred-chkpoint-user.p12','gs://kafka-certs/reloadpred-user.p12',
'gs://dataproc-spark-configs/topic-customer-map.cfg','gs://dataproc-spark-configs/params.cfg','gs://kafka-certs/issues-user.p12','gs://kafka-certs/anomaly-user.p12','gs://kafka-certs/appstat-anomaly-user.p12','gs://kafka-certs/appstat-agg-user.p12','gs://kafka-certs/alarmblock-user.p12']
},
"python_file_uris": ['gs://dagger-mongo/move2mongo_api.zip']
}
path = "gs://dataproc-spark-configs/pip_install.sh"
CLUSTER_GENERATOR_CONFIG = ClusterGenerator(
project_id=PROJECT_ID,
zone="us-east1-b",
master_machine_type="n1-highmem-8",
worker_machine_type="n1-highmem-8",
num_workers=2,
storage_bucket="dataproc-spark-logs",
init_actions_uris=[path],
metadata={'PIP_PACKAGES': 'pyyaml requests pandas openpyxl kafka-python google-cloud-storage pyspark'},
).make()
with models.DAG(
'Versa-kafka2mongo_api',
# Continue to run DAG twice per day
default_args=default_dag_args,
#schedule_interval='*/10 * * * *',
schedule_interval=None,
catchup=False,
) as dag:
# create_dataproc_cluster
create_dataproc_cluster = DataprocCreateClusterOperator(
task_id="create_dataproc_cluster",
cluster_name=CLUSTER_NAME,
region=REGION,
cluster_config=CLUSTER_GENERATOR_CONFIG
)
run_dataproc_spark = DataprocSubmitJobOperator(
task_id="run_dataproc_spark",
job=PYSPARK_JOB,
location=REGION,
project_id=PROJECT_ID,
)
delete_dataproc_cluster = DataprocDeleteClusterOperator(
task_id="delete_dataproc_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
trigger_rule=trigger_rule.TriggerRule.ALL_SUCCESS
)
# Define DAG dependencies.
create_dataproc_cluster >> run_dataproc_spark >> delete_dataproc_cluster
Here is the python file(call_kafka2mongo_api.py) code :
from move2mongo_api import alarmBlock_api
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('kafka2mongo_api').getOrCreate()
print(f" spark : {spark}")
spark.sparkContext.addPyFile("move2mongo_api.zip")
cust = ['versa']
for c in cust:
ap = alarmBlock_api(spark, c)
ap.readFromKafka()
Pls note : i upload move2mongo_api.zip to storage bucket ( gs://dagger-mongo/move2mongo_api.zip) before running the Airflow job.
move2mongo_api.zip - contains python file -> alarmBlock_api.py, which is referenced in job file 'call_kafka2mongo.py'
When i run this workflow, the error i get is shown below :
Traceback (most recent call last):
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/cloud/operators/dataproc.py", line 1849, in execute
job_object = self.hook.submit_job(
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/common/hooks/base_google.py", line 439, in inner_wrapper
return func(self, *args, **kwargs)
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/cloud/hooks/dataproc.py", line 870, in submit_job
return client.submit_job(
File "/opt/python3.8/lib/python3.8/site-packages/google/cloud/dataproc_v1/services/job_controller/client.py", line 493, in submit_job
request = jobs.SubmitJobRequest(request)
File "/opt/python3.8/lib/python3.8/site-packages/proto/message.py", line 516, in __init__
pb_value = marshal.to_proto(pb_type, value)
File "/opt/python3.8/lib/python3.8/site-packages/proto/marshal/marshal.py", line 211, in to_proto
pb_value = rule.to_proto(value)
File "/opt/python3.8/lib/python3.8/site-packages/proto/marshal/rules/message.py", line 36, in to_proto
return self._descriptor(**value)
ValueError: Protocol message Job has no "python_file_uris" field.
[2022-07-17, 18:07:00 UTC] {taskinstance.py:1279} INFO - Marking task as UP_FOR_RETRY. dag_id=Versa-kafka2mongo_api, task_id=run_dataproc_spark, execution_date=20220717T180647, start_date=20220717T180659, end_date=20220717T180700
What am i doing wrong here ?
Any ideas how to debug/fix this ?
tia!
You appear to have a layout issue in PYSPARK_JOB.
You have:
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : []
},
"python_file_uris": []
}
You want:
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : []
"python_file_uris": []
},
}

Error on deploying Python app to AWS Lambda

I have built a Python-Tornado app and am trying to deploy it to AWS Lambda using zappa. But, I am getting an error Error: Warning! Status check on the deployed lambda failed. A GET request to '/' yielded a 502 response code.
My folder structure inside the root folder is :
├── amortization.py
├── config.py
├── dmi-amort-dev-1557138776.zip
├── main.py
├── requirements.txt
├── venv
│   ├── bin
│  
└── zappa_settings.json
zappa deploy dev gives me :
Calling deploy for stage dev..
Downloading and installing dependencies..
- pandas==0.24.2: Using locally cached manylinux wheel
- numpy==1.16.3: Using locally cached manylinux wheel
- sqlite==python3: Using precompiled lambda package
Packaging project as zip.
Uploading dmi-amort-dev-1557143681.zip (30.8MiB)..
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 32.3M/32.3M [00:19<00:00, 1.94MB/s]
Scheduling..
Scheduled dmi-amort-dev-zappa-keep-warm-handler.keep_warm_callback with expression rate(4 minutes)!
Uploading dmi-amort-dev-template-1557143718.json (1.5KiB)..
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1.56K/1.56K [00:00<00:00, 10.6KB/s]
Waiting for stack dmi-amort-dev to create (this can take a bit)..
75%|█████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 3/4 [00:09<00:04, 5.00s/res]
Deploying API Gateway..
Error: Warning! Status check on the deployed lambda failed. A GET request to '/' yielded a 502 response code.
zappa tail gives me
Traceback (most recent call last):
File "/var/task/handler.py", line 602, in lambda_handler
return LambdaHandler.lambda_handler(event, context)
File "/var/task/handler.py", line 245, in lambda_handler
handler = cls()
File "/var/task/handler.py", line 142, in __init__
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
AttributeError: module 'main' has no attribute 'app'
zappa_settings.json:
{
"dev": {
"app_function": "main.app",
"aws_region": "ap-south-1",
"profile_name": "default",
"project_name": "dmi-amort",
"runtime": "python3.6",
"s3_bucket": "zappa-mekp457ye",
"manage_roles": false,
"role_name": "lambda-role",
}
}
main.py:
import tornado.web
from tornado.ioloop import IOLoop
from tornado.web import MissingArgumentError
from config import get_arguments
from amortization import get_amort_schedule
class MainHandler(tornado.web.RequestHandler):
def prepare(self):
"""Checking if all the required parameters are present."""
if self.request.method != 'POST':
self.write_error(status_code=405, message="Method not allowed")
return
self.parameters = dict()
for key in get_arguments():
try:
self.parameters[key] = self.get_argument(key)
except MissingArgumentError:
self.write_error(status_code=400,
message="Missing Argument(s)")
return
# checking if 'label' is provided
if 'label' in self.request.arguments.keys():
self.parameters['label'] = self.get_argument('label')
# Set up response dictionary.
self.response = dict()
def get(self, *args, **kwargs):
self.write_error(status_code=405, message="Method not allowed")
def post(self, *args, **kwargs):
"""Executes the main logic part."""
self.response = get_amort_schedule(self.parameters)
self.write_json()
def set_default_headers(self):
"""Sets content-type as 'application/json' for response as JSON."""
self.set_header('Content-Type', 'application/json')
def write_error(self, status_code, **kwargs):
"""Invokes when error occurs in processing the request."""
if 'message' not in kwargs:
if status_code == 405:
kwargs['message'] = 'Invalid HTTP method.'
else:
kwargs['message'] = 'Unknown error.'
kwargs["error"] = True
self.set_status(status_code=status_code)
self.response = dict(kwargs)
self.write_json()
def write_json(self):
"""Responsible for writing the response."""
if "status" in self.response:
self.set_status(self.response.get("status"))
self.set_default_headers()
self.write(self.response)
self.finish()
def main():
app = tornado.web.Application([
(r'/', MainHandler),
], debug=True)
# server = HTTPServer(app)
# server.bind(8888)
# server.start(0)
app.listen()
# app.run(host='0.0.0.0')
IOLoop.current().start()
if __name__ == '__main__':
main()
What is the mistake here and how can I fix it?
It looks like the deployment is succeeding, but when Zappa checks to see if the code is working, the return code is 502, which suggests that the lambda function is failing to run in the lambda environment.
Taking a look at the logs, the critical line is:
AttributeError: module 'main' has no attribute 'app'
And this is true, if we look at your code, at no point do you expose an attribute called app in main.py.
I'm not experienced with Tornado, but I suspect that if you move the declaration of app out of the main() function and into the root scope, then the handler should succeed.
For example:
# rest of the file...
self.finish()
app = tornado.web.Application([
(r'/', MainHandler),
], debug=True)
app.listen()
def main():
IOLoop.current().start()
if __name__ == '__main__':
main()

Resources