Display image from url using holoviews - python-3.x

How can I display an image from url.
This result an error
import urllib
img = urllib.request.urlopen('https://www.edgystl.com/wp-content/uploads/2018/01/leather-bicker-jacket-model-street-style-men-1-e1530998880646.jpg')
hv.RGB(img)

HoloViews RGB elements expect generally expect a NumPy array, the easiest way of getting an array from an Image is to use the PIL (or Pillow) library and download it using requests. Here's what that looks like:
from PIL import Image
import requests
from io import BytesIO
url = 'https://www.edgystl.com/wp-content/uploads/2018/01/leather-bicker-jacket-model-street-style-men-1-e1530998880646.jpg'
response = requests.get(url)
img = Image.open(BytesIO(response.content))
hv.RGB(np.array(img))

Turns out it can be done with skimage
import holoviews as hv
from skimage import io
img = io.imread('https://www.edgystl.com/wp-content/uploads/2018/01/leather-bicker-jacket-model-street-style-men-1-e1530998880646.jpg')
hv.RGB(np.array(img))

Related

How to make inference to a keras model hosted on AWS SageMaker via AWS Lambda function?

I have a pre-trained keras model which I have hosted on AWS using AWS SageMaker. I've got an endpoint and I can make successful predictions using the Amazon SageMaker Notebook instance.
What I do there is that I serve a .PNG image like the following and the model gives me correct prediction.
file= s3.Bucket(bucketname).download_file(filename_1, 'normal.png')
file_name_1='normal.png'
import sagemaker
from sagemaker.tensorflow.model import TensorFlowModel
endpoint = 'tensorflow-inference-0000-11-22-33-44-55-666' #endpoint
predictor=sagemaker.tensorflow.model.TensorFlowPredictor(endpoint, sagemaker_session)
data = np.array([resize(imread(file_name), (137, 310, 3))])
predictor.predict(data)
Now I wanted to make predictions using a mobile application. For that I have to wrote a Lambda function in python and attached an API gateway to it. My Lambda function is the following.
import os
import sys
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CWD, "lib"))
import json
import base64
import boto3
import numpy as np
from scipy import signal
from scipy.signal import butter, lfilter
from scipy.io import wavfile
import scipy.signal as sps
import io
from io import BytesIO
import matplotlib.pylab as plt
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from datetime import datetime
from skimage.io import imread
from skimage.transform import resize
from PIL import Image
ENDPOINT_NAME = 'tensorflow-inference-0000-11-22-33-44-55-666'
runtime= boto3.client('runtime.sagemaker')
def lambda_handler(event, context):
s3 = boto3.client("s3")
# retrieving data from event.
get_file_content_from_postman = event["content"]
# decoding data.
decoded_file_name = base64.b64decode(get_file_content_from_postman)
image = Image.open(io.BytesIO(decoded_file_name))
data = np.array([resize(imread(image), (137, 310, 3))])
response = runtime.invoke_endpoint(EndpointName=ENDPOINT_NAME, ContentType='text/csv', Body=data)
result = json.loads(response['Body'].read().decode())
return result
The third last line is giving me error 'PngImageFile' object has no attribute 'read'.
Any idea what I am missing here?
If io.BytesIO(decoded_file_name) correctly represents your image data (though the name decoded_file_name suggests that its only file name, not actual image data), then you don't need to use PIL. Just use it directly:
data = np.array([resize(imread(io.BytesIO(decoded_file_name)), (137, 310, 3))])
I was missing one thing which was causing this error. After receiving the image data I used python list and then json.dump that list (of lists). Below is the code for reference.
import os
import sys
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CWD, "lib"))
import json
import base64
import boto3
import numpy as np
import io
from io import BytesIO
from skimage.io import imread
from skimage.transform import resize
# grab environment variable of Lambda Function
ENDPOINT_NAME = os.environ['ENDPOINT_NAME']
runtime= boto3.client('runtime.sagemaker')
def lambda_handler(event, context):
s3 = boto3.client("s3")
# retrieving data from event.
get_file_content_from_postman = event["content"]
# decoding data.
decoded_file_name = base64.b64decode(get_file_content_from_postman)
data = np.array([resize(imread(io.BytesIO(decoded_file_name)), (137, 310, 3))])
payload = json.dumps(data.tolist())
response = runtime.invoke_endpoint(EndpointName=ENDPOINT_NAME, ContentType='application/json', Body=payload)
result = json.loads(response['Body'].read().decode())
return result

How can I resize image with quality without saving image in Python?

I use this code but it need to save
from PIL import Image
import requests
from io import BytesIO
response = requests.get(url)
img = Image.open(BytesIO(response.content))
image = img.resize((W, H), Image.ANTIALIAS)
image.save('De7k.jpeg', optimize=True, quality=Quality)
If you would like to "save" the file while keeping it in memory instead of writing a file to disk, you can write it to another BytesIO object.
from PIL import Image
import requests
from io import BytesIO
response = requests.get(url)
img = Image.open(BytesIO(response.content))
image = img.resize((W, H), Image.ANTIALIAS)
output = BytesIO()
image.save(output, format="JPEG", optimize=True, quality=Quality)

Sending matplotlib image to pymsteams (cannot create new tag pymsteams)

I am using matplotlib to plot my image.
import pandas as pd
from matplotlib import pyplot as plt
x = ['09:30', '09:33', '09:40', '09:43', '09:50', '09:53', '10:00', '10:03', '10:10', '10:13']
y = ['3010.910000', '3011.650000', '3009.130000', '3011.500000', '3010.460000', '3010.950000', '3012.830000', '3013.120000', '3011.730000', '3010.130000']
matrix = pd.DataFrame({'Time': x, 'Quote': y})
matrix['Quote'] = matrix['Quote'].astype(float)
plt.plot('Time', 'Quote', data=matrix, color='mediumvioletred')
Here is the challenge now:
import pymsteams
web_hook = 'My Microsoft Teams URL https://outlook.office.com/webhook/blahblah'
teams_message = pymsteams.connectorcard(web_hook)
msg_section = pymsteams.cardsection()
msg_section.title('Title')
msg_section.addImage(image) #I want to add that plt image here
teams_message.addSection(msg_section)
teams_message.text("Some Message")
self.teams_message.send()
I have tried this (and I want this approach, using cache):
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
msg_section.addImage(buf.read())
I did try saving the image to local drive 'c:/temp/'. The code did not give any error msg, but the image on Teams was a blank image, even though the image is correct in c:/temp
In summary. A PNG image has to be converted to base64 string.
Please see the example below.
Note that I'm using Python 3.6.
Additionally image width seems to be limited in a Connector Card.
import numpy as np
import matplotlib.pyplot as plt
import base64
from io import BytesIO
import pymsteams
# generate fig
fig, ax = plt.subplots(1,1,figsize=(20,6))
ax.hist(np.random.normal(0, 1, 1000), bins=51, edgecolor='k', alpha=0.5);
buf = BytesIO()
fig.savefig(buf, format="png")
# get base64 string
data = base64.b64encode(buf.getbuffer()).decode("ascii")
encoded_fig = f"data:image/png;base64,{data}"
# send encoded_fig via webhook
web_hook = 'YOUR_WEBHOOK'
teams_message = pymsteams.connectorcard(web_hook)
msg_section = pymsteams.cardsection()
msg_section.title('Title')
msg_section.addImage(encoded_fig) #I want to add that plt image here
teams_message.addSection(msg_section)
teams_message.text("Some Message")
teams_message.send()
image_file = open('img/icon.png'), "rb").read()
ICON = "data:image/png;base64,{data}".format(data=b64encode(image_file).decode("ascii"))
#And in your Teams alert creation, you call:
section.activityImage(ICON)

Read a .jpg from RAM

from io import StringIO
from PIL import Image
import requests
response = requests.get(image.url)
# Works fine, but requests a disk write.
f = open('tmp.jpg', 'bw')
f.write(response.content)
img = Image.open('tmp.jpg')
# Fails with `OSError: cannot identify image file <_io.StringIO object at 0x7fb666238a68>`
#file = StringIO(str(response.content))
#img = Image.open(file)
I am trying to run the code from this tutorial but in python3. The commented out version is the closest I have gone to the original idea of "get an image from the network into RAM and work with that". I don't mind using cv2 if easier. How do I write this code pythonically and efficiently?
As Mark Setchell said, you likely want BytesIO not StringIO.
from io import BytesIO
from PIL import Image
import requests
response = requests.get(image.url)
file = BytesIO(response.content)
img = Image.open(file)

How to convert bokeh plots to BytesIO object to encode the plot with base64 module

I have a plot object of bokeh where I plot sin(x) curve.
from math import *
from io import BytesIO
from bokeh.plotting import (figure, output_file, show)
from bokeh.io import (export_png, export_svgs)
import base64
import numpy as np
plot = figure(plot_width=1000, plot_height=500)
x = np.linspace(-2*np.pi, 2*np.pi, 1000)
y = np.array([sin(i) for i in x])
plot.line(x, y, line_width=1)
Now, instead of saving it to some html file by some name, output_file('sine.html') I want to create a BytesIO() object so that I can further do base64 encoding.
I kindly need community help.
The reason why I desire is in matplotlib I can export an image as BytesIO() object and work with it smoothly rendering it back in Flask or Dash app like this,
figfile = BytesIO()
plt.savefig(figfile, format='png')
plt.clf()
figfile.seek(0)
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png.decode('UTF-8')
and I want the same applicability with bokeh.
Please guide me with this.
Bokeh provides this functionality in bokeh.io.export.get_screenshot_as_png:
from bokeh.io.export import get_screenshot_as_png
img = get_screenshot_as_png(plot)
img is a PIL image instance which contains the image.
Off-topic: This can also be used to have the plot displayed as a PNG in JupyterLab. Just call get_screenshot_as_png(plot) and you are done.

Resources