Unable to run python code in Azure function - python-3.x

I have init.py and blobquickstartv12.py within the same Azure Function "Test-v3". While init.py is a blob trigger, "blobquickstartv12.py " has the python code that I want to run. The only way I am able to run my code in blobquickstartv12.py is if I paste the entire code within the main() function of init.py.
I tried using from blobquickstartv12 import load where load is a function in my blobquickstartv12.py code but that gave me Exception: ModuleNotFoundError: No module named 'blobquickstartv12'
Can anyone tell me how can I call my custom code from within init.py
This is how the structure of my Azure Function looks like:
Here is my code in init.py:
import azure.functions as func
import pandas as pd
import numpy as np
from datetime import datetime
from pandas import ExcelFile
from pandas import ExcelWriter
from datetime import datetime, timedelta
from azure.storage.blob import BlockBlobService
import pyodbc
import sys
import os
from io import StringIO
import pkgutil
from . import blobquickstartv12
def main(myblob: func.InputStream):
logging.info(f"Python blob trigger function processed blob \n"
f"Name: {myblob.name}\n"
f"Blob Size: {myblob.length} bytes")
load=blobquickstartv12.load()
Here is my code for blobquickstart.py:
class load:
#CODE FOR CONNECTING TO THE SQL DATABASE
SERVER = 'xxxxxx.database.windows.net'
DATABASE = 'XYZ'
username = 'USERNAME'
pwd = 'PASSWORD'
driver= '{ODBC Driver 17 for SQL Server}'
cnxn = pyodbc.connect('DRIVER='+driver+';SERVER='+SERVER+';PORT=1433;DATABASE='+DATABASE+';UID='+username+';PWD='+ pwd)
cursor = cnxn.cursor()
print("Connected to Azure SQL")
#sqlcommand = ("INSERT INTO Stage.File(File_ID,File_type) VALUES (1235,'D')")
Curr_dt = datetime.now()
BLOB_STORAGEACCOUNTNAME="blobstorage"
BLOB_STORAGEACCOUNTKEY="AccountKey"
BLOBNAME="BlobName"
CONTAINERNAME= "ContainerName"

Update:
Please check the structure. On my side it is no problem. The code can import blobquickstartv12 fine.
This is the structure of azure function:
https://learn.microsoft.com/en-us/azure/azure-functions/functions-reference-python#folder-structure
This is the doc of how to import:
https://learn.microsoft.com/en-us/azure/azure-functions/functions-reference-python#import-behavior
Original Answer:
import module in the module should be like this:
For example, I have a dog.py and I want to use it.
This is the dog.py:
class Dog:
def __init__(self,name):
super().__init__()
self.name = name
def showdog(self):
print("This is a dog!")
In the _init_.py, you should use this:
from . import dog
mydog = dog.Dog("Woodie")
It work fine on my side.
This is the structure:

Related

how to pytest a pyqt application that uses argparse for input parameters

I have a PyQt application that uses argparse to pass some argument.
I managed to write a simple test to see if the app starts
but I cannot set/mock the argparse arguments
I know it because inside the code I have some try/except like this
try:
if args.erase_data:
pass
except NameError:
logger.error("Error in parsing erase_data input argument \n")
that during the tests fail, while they do not fail if I run the app.
I tried this to mock args
import os
import pathlib
# import pdb
import sys
from unittest import mock
import pytest
from PyQt5 import QtTest
from PyQt5.QtWidgets import *
from pytestqt.plugin import QtBot
sys.path.append(os.getcwd())
src_dir = pathlib.Path(__file__).parents[1].absolute()
print(src_dir)
sys.path.append(src_dir.as_posix())
GUI = __import__("GUI")
#pytest.fixture(scope="module")
def qtbot_session(qapp, request):
result = QtBot(qapp)
with capture_exceptions() as e:
print(getattr(e, "message", repr(e)))
yield result
print(" TEARDOWN qtbot")
#pytest.fixture(scope="module")
def Viewer(request):
with mock.patch.object(sys, "argv", ["",'-d','2']):
print("mocking sys argv")
print(sys.argv)
# pdb.set_trace()
app, app_window = GUI.main()
qtbotbis = QtBot(app)
QtTest.QTest.qWait(0.5 * 1000)
assert app_window.isVisible()
return app, app_window, qtbotbis
but args is still not set.
any idea how to solve it?

How to get the playlist_id from a playlist using spotipy?

so i'm trying to figure out on how to get the playlist_id from the play list I created, but I have no luck finding which method to use even after reading the spotipy docutmentation.
import spotipy
import json
import sys
from spotipy.oauth2 import SpotifyOAuth
import requests
import os
import dotenv
from json.decoder import JSONDecodeError
SPOTIPY_CLIENT_ID="secret"
SPOTIPY_CLIENT_SECRET="secret"
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope="playlist-modify-private",
client_id=SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET,
redirect_uri="https://google.com",
cache_path="token.txt"))
user_id = sp.current_user()["id"] # grabs user name
sp.user_playlist_create(user_id,"Funky Music",public=False,collaborative=False, description="Test")
This is how far I've gotten into the code.
playlist= sp.user_playlist_create(user_id,"Funky Music",public=False,collaborative=False, description="Test")
playlist_id = playlist["id"]

How to set extra_vars while running ansilbe-playbook programmatically in python3.5?(Ansible version - 2.8)

import os
import json
from collections import namedtuple
from ansible import context
from ansible.module_utils.common.collections import ImmutableDict
from ansible.utils.vars import load_extra_vars
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.playbook_executor import PlaybookExecutor
def execute_ansible_playbook(CLOUD_TO_USE=None, PLAYBOOK=None):
playbook_path = PLAYBOOK
#inventory_path = "hosts"
#Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check', 'diff', 'listhosts', 'listtasks', 'listtags', 'syntax'])
loader = DataLoader()
passwords = dict(vault_pass='secret')
inventory = InventoryManager(loader=loader, sources='inventory/' + CLOUD_TO_USE)
#inventory = InventoryManager(loader=loader, sources='localhost')
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook_path],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords
)
results = executor.run()
print (results)
I got this code from Run Ansible playbook programmatically?
This is running properly for other ansible-playbooks. But now I want to pass extra_vars to an ansible-playbook. I couldn't find a proper answer.
How can I do that?
FWIW. Use ansible-runner. The documentation is not complete. All parameters are described in the source.

Why is multiprocessing not working with python dash framework - Python3.6

I'm trying to implement multiprocessing library for splitting up a dataframe into parts, process it on multiple cores of CPU and then concatenate the results back into a final dataframe in a python dash application. The code works fine when I try it outside of the dash application (when I run the code standalone without enclosing it in a dash application). But when I enclose the same code in a dash application, I get an error. I have shown the code below:
I have tried the multiprocessing code out of the dash framework and it works absolutely fine.
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import flask
import dash_table_experiments as dt
import dash_table
import dash.dependencies
import base64
import time
import os
import pandas as pd
from docx import *
from docx.text.paragraph import Paragraph
from docx.text.paragraph import Run
import xml.etree.ElementTree as ET
import multiprocessing as mp
from multiprocessing import Pool
from docx.document import Document as doctwo
from docx.oxml.table import CT_Tbl
from docx.oxml.text.paragraph import CT_P
from docx.table import _Cell, Table
from docx.text.paragraph import Paragraph
import io
import csv
import codecs
import numpy as np
app = dash.Dash(__name__)
application = app.server
app.config.supress_callback_exceptions = True
app.layout = html.Div(children=[
html.Div([
html.Div([
html.H4(children='Reader'),
html.Br(),
],style={'text-align':'center'}),
html.Br(),
html.Br(),
html.Div([
dcc.Upload(html.Button('Upload File'),id='upload-data',style = dict(display = 'inline-block')),
html.Br(),
]
),
html.Div(id='output-data-upload'),
])
])
#app.callback(Output('output-data-upload', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename')])
def update_output(contents, filename):
if contents is not None:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
document = Document(io.BytesIO(decoded))
combined_df = pd.read_csv('combined_df.csv')
def calc_tfidf(input1):
input1 = input1.reset_index(drop=True)
input1['samplecol'] = 'sample'
return input1
num_cores = mp.cpu_count() - 1 #number of cores on your machine
num_partitions = mp.cpu_count() - 1 #number of partitions to split dataframe
df_split = np.array_split(combined_df, num_partitions)
pool = Pool(num_cores)
df = pd.concat(pool.map(calc_tfidf, df_split))
pool.close()
pool.join()
return len(combined_df)
else:
return 'No File uploaded'
app.css.append_css({'external_url': 'https://codepen.io/plotly/pen/EQZeaW.css'})
if __name__ == '__main__':
app.run_server(debug=True)
The above dash application takes as input any file. Upon uploading the file in the front end, a local CSV file (any file, in my case it is combined_df.csv) is loaded into a dataframe. Now I want to split the dataframe into parts using multiprocessing, process it and combine it back. But the above code results in the following error:
AttributeError: Can't pickle local object 'update_output..calc_tfidf'
What's wrong with this piece of code?
Okay I've figured it out now!. The problem is that the function calc_tfidf was not defined as a global function. I changed the function to be a global function and it worked perfect.
Simple checks when left unsolved at times might lead to days of redundant efforts! :(

Cassandra ExecutionResult on importlib._bootstrap failed

I am trying to execute multi-process to pull the data from Cassandra. But, I'm facing the issue.I want to pull it for single key or multiple keys using the multi-process provided my Cassandra
My cassandra_db class
from cassandra.cluster import Cluster
import cassandra
import pandas as pd
import numpy as np
from datetime import datetime
import sys
import os
from threading import Event
import itertools
from multiprocessing import Pool
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import tuple_factory
ip_address = '127.0.0.1'
class cassandra_db(object):
concurrency = 2 # chosen to match the default in execute_concurrent_with_args
def __init__(self,process_count=None):
self.pool = Pool(processes=process_count, initializer=self._setup)
#classmethod
def _setup(cls):
cls.session = Cluster([ip_address]).connect(keyspace='test')
cls.session.row_factory = pandas_factory
cls.prepared = cls.session.prepare('SELECT * FROM tr_test WHERE key=?')
def close_pool(self):
self.pool.close()
self.pool.join()
def get_results(self, params):
try:
xrange
except NameError:
xrange = range
params = list(params)
print("-----> ",params)
print("-----+>",self.concurrency)
self.pool.map(_multiprocess_get, (params[n:n + self.concurrency] for n in xrange(0, len(params), self.concurrency)))
#classmethod
def _results_from_concurrent(cls, params):
return execute_concurrent_with_args(cls.session, cls.prepared, params)
def _multiprocess_get(params):
return cassandra_db._results_from_concurrent(params)
My calling class
import os
import pandas as pd
import sys
relative_path='/home/anji'
sys.path.append(os.path.join(relative_path ,'commons','Database Operations'))
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra_db import cassandra_db
from cassandra.policies import ConstantReconnectionPolicy
processes =2
con_db = cassandra_db(processes)
keys=[(1,),(2,)]
df = con_db.get_results(keys)
print("Result",df.head())
Error:
multiprocessing.pool.MaybeEncodingError: Error sending result: '[[ExecutionResult(success=True, result_or_exc=<cassandra.cluster.ResultSet object at 0x7fa93658bbe0>), ExecutionResult(success=True, result_or_exc=<cassandra.cluster.ResultSet object at 0x7fa936a2e0f0>)]]'. Reason: 'PicklingError("Can't pickle <class 'importlib._bootstrap.ExecutionResult'>: attribute lookup ExecutionResult on importlib._bootstrap failed",)'
My trying to execute for 2 keys but facing the issue. Can any help me to solve this issue

Resources