Error 500 when trying to send file from remote server (python code) to an SFTP server - python-3.x

I have a Flask API in a Docker container where I do the following :
from flask import Flask, request
import os
import json
import paramiko
import subprocess
app = Flask(__name__)
#app.route("/")
def hello():
return "Service up in K8S!"
#app.route("/get", methods=['GET'])
def get_ano():
print("Test liveness")
return "Pod is alive !"
#app.route("/run", methods=['POST'])
def run_dump_generation():
rules_str = request.headers.get('database')
print(rules_str)
postgres_bin = r"/usr/bin/"
dump_file = "database_dump.sql"
os.environ['PGPASSWORD'] = 'XXXXX'
print('Before dump generation')
with open(dump_file, "w") as f:
result = subprocess.call([
os.path.join(postgres_bin, "pg_dump"),
"-Fp",
"-d",
"XX",
"-U",
"XX",
"-h",
"XX",
"-p",
"XX"
],
stdout=f
)
print('After dump generation')
transport = paramiko.Transport(("X", X))
transport.connect(username="X", password="X")
sftp = transport.open_sftp_client()
remote_file = '/data/database_dump.sql'
sftp.put('database_dump.sql', remote_file)
print("SFTP object", sftp)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
When I run the app in Kubernetes with Post request I have the error : "POST /run HTTP/1.1" 500
Here are the requirements.txt:
Flask==2.0.1
paramiko==3.0.0
The error comes from transport = paramiko.Transport(("X", X)). The same code works locally. I don't understand why I have this error when I am on Kubernetes. But in the logs no print are displaying, I guess it is because I have error 500. I guess it is not possible with this code to send file from this container to the SFTP server (has OpenSSH).
What can I do ?
---- UPDATE ----
I think I have found the problem. In a Flask pod (VM) I try to send file from this pod to SFTP server. So I have to modify the following code to "allow" this type of send. This an SFTP server with OpenSSH.
Here is the code where to modify :
transport = paramiko.Transport(("X", X))
transport.connect(username="X", password="X")
sftp = transport.open_sftp_client()
remote_file = '/data/database_dump.sql'
sftp.put('database_dump.sql', remote_file)
print("SFTP object", sftp)
SFTP server (with OpenSSH) is alpine and Flask code is in Alpine container too.
UPDATE BELOW, I tried the following :
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
session = ssh.connect(hostname="X", port=X, username='X', password="X")
print(ssh)
But I have the following error :
File "c:/Users/X/dump_generator_api/t.py", line 32, in <module>
session = ssh.connect(hostname="X", port=X, username='X', password="X")
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\client.py", line 449, in connect
self._auth(
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\client.py", line 780, in _auth
raise saved_exception
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\client.py", line 767, in _auth
self._transport.auth_password(username, password)
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\transport.py", line 1567, in auth_password
return self.auth_handler.wait_for_response(my_event)
File "C:\Users\X\AppData\Local\Programs\Python\Python38-32\lib\site-packages\paramiko\auth_handler.py", line 259, in wait_for_response
raise e
paramiko.ssh_exception.AuthenticationException: Authentication failed.

Related

Hello , why my script run on local phycharm but when i upload my script on VPS ( host )i get errors?

why my script run on local phycharm but when i upload my script on VPS ( host )i get errors ?
my python code
`
import json
import socketio
TOKEN = "my-super-token" #You donation alert token
sio = socketio.Client()
#sio.on('connect')
def on_connect():
sio.emit('add-user', {"token": TOKEN, "type": "alert_widget"})
#sio.on('donation')
def on_message(data):
y = json.loads(data)
print(y['username'])
print(y['message'])
print(y['amount'])
print(y['currency'])
sio.connect('wss://socket.donationalerts.ru:443',transports='websocket')
`
from localhost , pycharmall work but from host i get this
error screenshot here
error :
Traceback (most recent call last): File "test.py", line 22, in
sio.connect('wss://socket.donationalerts.ru:443',transports='websocket')
File "/usr/local/lib/python3.8/site-packages/socketio/client.py", line
338, in connect
raise exceptions.ConnectionError(exc.args[0]) from None socketio.exceptions.ConnectionError: Connection error
i try install ,reinstall python , packages etc

boto3 s3 bucket tagging

I'm getting access error while tagging a bucket. Please note that the role I'm using has s3 full access.
The code works fine till this point-
for bucket in s3.buckets.all():
s3_bucket = bucket
s3_bucket_name = s3_bucket.name
try:
response = s3_client.get_bucket_tagging(Bucket=s3_bucket_name)
print(response)
except ClientError:
print (s3_bucket_name, "does not have tags")
but after adding putTag code, it gives error even for GetBucketTagging operation.
This is my final code:
for bucket in s3.buckets.all():
s3_bucket = bucket
s3_bucket_name = s3_bucket.name
try:
response = s3_client.get_bucket_tagging(Bucket=s3_bucket_name)
print(response)
except ClientError:
print (s3_bucket_name, "does not have tags")
bucket_tagging = s3.BucketTagging(s3_bucket_name)
response = bucket_tagging.put(
Tagging={
'TagSet': [
{
'Key': 'pcs:name',
'Value': s3_bucket_name
},
]
},
)
The error I'm getting is-
botocore.exceptions.ClientError: An error occurred (AccessDenied) when calling the GetBucketTagging operation: Access Denied
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "tagging.py", line 91, in <module>
tagging()
File "tagging.py", line 71, in tagging
'Value': s3_bucket_name
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(*args, **params)
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/botocore/client.py", line 395, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ec2-user/compass_backend/compass_backend/lib64/python3.7/site-packages/botocore/client.py", line 725, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (AccessDenied) when calling the PutBucketTagging operation: Access Denied
am I passing the tag parameters wrong? Got this from Boto3 documentation itself
I couldn't find a way to catch the exception, however, this worked for me:
tagging_client = boto3.client('resourcegroupstaggingapi')
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
for bucket in s3.buckets.all():
s3_bucket = bucket
s3_bucket_name = s3_bucket.name
bucket_tagging = s3.BucketTagging(s3_bucket_name)
try:
response = s3_client.get_bucket_tagging(Bucket=s3_bucket_name)
a = response
except ClientError:
response = tagging_client.tag_resources(
ResourceARNList=[
"arn:aws:s3:::" + bucket.name
],
Tags={
'pcs:name': bucket.name
}
)
pls note that you'll need the additional "resource tagging" policy attached to your role.
Hope this helps. Cheers.
I took out the try sections and ran this version of your code:
import boto3
s3_resource = boto3.resource('s3')
bucket_tagging = s3_resource.BucketTagging('my-bucket-name')
response = bucket_tagging.put(
Tagging={
'TagSet': [
{
'Key': 'pcs:name',
'Value': 'stackoverflow'
},
]
},
)
It worked fine:
[
Therefore, there must be something else that is causing your request to fail. You might want to check AWS CloudTrail to see if there is a hint as to why the request was denied.
"get_bucket_tagging" throws NoSuchTagSet when there are no tags. for testing create a tag first before run test or Catch the exception and create tags.

Docker-py client gives "invalid client port specification"

I am using the docker-py client to create containers on a need basis. So for this, I am using a generator to come up with a port number and trying to use httpd image on a particular port of the host from the generator. But, the client gives out ("invalid port specification: "port number here"") for any number, for any port number that I am trying to use.
Below is sample code that I am trying:
import docker
client = docker.from_env()
container= client.containers.run(image="httpd", ports={'80/tcp': 43545}, detach=True)
To note: The number 43545 does not have any significance here.
Docker details:
Client - 19.03.6
API Version - 1.40
Engine: 19.03.6
Error:
File "/project/api/.venv/lib/python3.7/site-packages/docker/models/containers.py", line 803, in run
detach=detach, **kwargs)
File "/project/api/.venv/lib/python3.7/site-packages/docker/models/containers.py", line 861, in create
resp = self.client.api.create_container(**create_kwargs)
File "/project/api/.venv/lib/python3.7/site-packages/docker/api/container.py", line 429, in create_container
return self.create_container_from_config(config, name)
File "/projectapi/.venv/lib/python3.7/site-packages/docker/api/container.py", line 440, in create_container_from_config
return self._result(res, True)
File "/projectpi/.venv/lib/python3.7/site-packages/docker/api/client.py", line 267, in _result
self._raise_for_status(response)
File "/projectapi/.venv/lib/python3.7/site-packages/docker/api/client.py", line 263, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/projectyter/api/.venv/lib/python3.7/site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 400 Client Error: Bad Request ("invalid port specification: "43545"")

Flask Deployment to AWS getting permission errno 13

I have working app on the localhost but when I deployed to the AWS it shows several errors.It can receive my sent image but it cant save it. I tried change permissions with chmod,still its not working. This is first time i am deploying flask app,so I dont know much about i My code is as follow:
***modules***
ImageFile.LOAD_TRUNCATED_IMAGES = True
app = flask.Flask(__name__)
#app.route('/', methods = ['GET','POST'])
def hello():
return 'hey there!!!'
#app.route('/imageupload', methods = ['GET','POST'])
def handle_request():
try:
imagefile = flask.request.files['image']
filename = werkzeug.utils.secure_filename(imagefile.filename)
print("\nReceived image file name:" + imagefile.filename)
imagefile.save(filename)
print("hey")
for image in glob.glob('./frames/*.*'):
img_filter(imagefile,image)
json = flask.request.values['Id']
return {"status": "true","message": "Uploaded Successfully", "Id": json }
except:
return {"status" : "false"}
if __name__ == '__main__' :
app.run()
And these are the errors :
Received image file name:1.jpg
ERROR:image_flask:Exception on /imageupload [POST]
Traceback (most recent call last):
File "/home/ubuntu/pic-edit/env/lib/python3.6/site-packages/flask/app.py", line 2446, in ws$
response = self.full_dispatch_request()
File "/home/ubuntu/pic-edit/env/lib/python3.6/site-packages/flask/app.py", line 1951, in fu$
rv = self.handle_user_exception(e)
File "/home/ubuntu/pic-edit/env/lib/python3.6/site-packages/flask/app.py", line 1820, in ha$
reraise(exc_type, exc_value, tb)
File "/home/ubuntu/pic-edit/env/lib/python3.6/site-packages/flask/_compat.py", line 39, in $
raise value
File "/home/ubuntu/pic-edit/env/lib/python3.6/site-packages/flask/app.py", line 1949, in fu$
rv = self.dispatch_request()
File "/home/ubuntu/pic-edit/env/lib/python3.6/site-packages/flask/app.py", line 1935, in di$
return self.view_functions[rule.endpoint](**req.view_args)
File "/var/www/html/pic-edit/image_flask.py", line 26, in handle_request
imagefile.save(f'/home/ubuntu/pic-edit/{filename}')
File "/home/ubuntu/pic-edit/env/lib/python3.6/site-packages/werkzeug/datastructures.py", li$
dst = open(dst, "wb")
PermissionError: [Errno 13] Permission denied: '/home/ubuntu/pic-edit/1.jpg'
you don't have the right permission. Try to open first the folder with chmod -R 700 /home/ubuntu/pic-edit/
Please note open the permissions like this can be dangerous. Rather I would assign the permission to the "Flask" - Running user
Further for investigations under which user are you running the flask app? How do you start the app?
Also you might change the handle func to:
def handle_request():
try:
f = request.files['file']
f.save(secure_filename(f.filename))
json = flask.request.values['Id']
return {"status": "true","message": "Uploaded Successfully", "Id": json }
note you nead to import from flask import request
I got it where I went wrong, that is because of permissions to the folder. I already tried "chmod 755" to it but it did not worked. After that I tried
chmod a+rwx
And that work fluently.

Error on deploying Python app to AWS Lambda

I have built a Python-Tornado app and am trying to deploy it to AWS Lambda using zappa. But, I am getting an error Error: Warning! Status check on the deployed lambda failed. A GET request to '/' yielded a 502 response code.
My folder structure inside the root folder is :
├── amortization.py
├── config.py
├── dmi-amort-dev-1557138776.zip
├── main.py
├── requirements.txt
├── venv
│   ├── bin
│  
└── zappa_settings.json
zappa deploy dev gives me :
Calling deploy for stage dev..
Downloading and installing dependencies..
- pandas==0.24.2: Using locally cached manylinux wheel
- numpy==1.16.3: Using locally cached manylinux wheel
- sqlite==python3: Using precompiled lambda package
Packaging project as zip.
Uploading dmi-amort-dev-1557143681.zip (30.8MiB)..
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 32.3M/32.3M [00:19<00:00, 1.94MB/s]
Scheduling..
Scheduled dmi-amort-dev-zappa-keep-warm-handler.keep_warm_callback with expression rate(4 minutes)!
Uploading dmi-amort-dev-template-1557143718.json (1.5KiB)..
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1.56K/1.56K [00:00<00:00, 10.6KB/s]
Waiting for stack dmi-amort-dev to create (this can take a bit)..
75%|█████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 3/4 [00:09<00:04, 5.00s/res]
Deploying API Gateway..
Error: Warning! Status check on the deployed lambda failed. A GET request to '/' yielded a 502 response code.
zappa tail gives me
Traceback (most recent call last):
File "/var/task/handler.py", line 602, in lambda_handler
return LambdaHandler.lambda_handler(event, context)
File "/var/task/handler.py", line 245, in lambda_handler
handler = cls()
File "/var/task/handler.py", line 142, in __init__
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
AttributeError: module 'main' has no attribute 'app'
zappa_settings.json:
{
"dev": {
"app_function": "main.app",
"aws_region": "ap-south-1",
"profile_name": "default",
"project_name": "dmi-amort",
"runtime": "python3.6",
"s3_bucket": "zappa-mekp457ye",
"manage_roles": false,
"role_name": "lambda-role",
}
}
main.py:
import tornado.web
from tornado.ioloop import IOLoop
from tornado.web import MissingArgumentError
from config import get_arguments
from amortization import get_amort_schedule
class MainHandler(tornado.web.RequestHandler):
def prepare(self):
"""Checking if all the required parameters are present."""
if self.request.method != 'POST':
self.write_error(status_code=405, message="Method not allowed")
return
self.parameters = dict()
for key in get_arguments():
try:
self.parameters[key] = self.get_argument(key)
except MissingArgumentError:
self.write_error(status_code=400,
message="Missing Argument(s)")
return
# checking if 'label' is provided
if 'label' in self.request.arguments.keys():
self.parameters['label'] = self.get_argument('label')
# Set up response dictionary.
self.response = dict()
def get(self, *args, **kwargs):
self.write_error(status_code=405, message="Method not allowed")
def post(self, *args, **kwargs):
"""Executes the main logic part."""
self.response = get_amort_schedule(self.parameters)
self.write_json()
def set_default_headers(self):
"""Sets content-type as 'application/json' for response as JSON."""
self.set_header('Content-Type', 'application/json')
def write_error(self, status_code, **kwargs):
"""Invokes when error occurs in processing the request."""
if 'message' not in kwargs:
if status_code == 405:
kwargs['message'] = 'Invalid HTTP method.'
else:
kwargs['message'] = 'Unknown error.'
kwargs["error"] = True
self.set_status(status_code=status_code)
self.response = dict(kwargs)
self.write_json()
def write_json(self):
"""Responsible for writing the response."""
if "status" in self.response:
self.set_status(self.response.get("status"))
self.set_default_headers()
self.write(self.response)
self.finish()
def main():
app = tornado.web.Application([
(r'/', MainHandler),
], debug=True)
# server = HTTPServer(app)
# server.bind(8888)
# server.start(0)
app.listen()
# app.run(host='0.0.0.0')
IOLoop.current().start()
if __name__ == '__main__':
main()
What is the mistake here and how can I fix it?
It looks like the deployment is succeeding, but when Zappa checks to see if the code is working, the return code is 502, which suggests that the lambda function is failing to run in the lambda environment.
Taking a look at the logs, the critical line is:
AttributeError: module 'main' has no attribute 'app'
And this is true, if we look at your code, at no point do you expose an attribute called app in main.py.
I'm not experienced with Tornado, but I suspect that if you move the declaration of app out of the main() function and into the root scope, then the handler should succeed.
For example:
# rest of the file...
self.finish()
app = tornado.web.Application([
(r'/', MainHandler),
], debug=True)
app.listen()
def main():
IOLoop.current().start()
if __name__ == '__main__':
main()

Resources