Bokeh Hover Showing ??? instead of Date and Time - python-3.x

I have read lots of questions and answers here and can not seem to fix my issue. I can not get the Hover tool in Bokeh to display the Date and Time correctly, I get ???, I understand this to mean that the Hover cannot read the column in my Dataframe.
I have implemented a lot of fixes and I think what I have here should work.
Here is my code:
This is the code for the data collection:
import cv2 as cv
from datetime import datetime
import pandas as pd
background = None
status_list = [None, None]
times = []
df = pd.DataFrame(columns = ['Start', 'End'])
video = cv.VideoCapture(0, cv.CAP_DSHOW)
while True:
check, frame = video.read()
status = 0
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
gray = cv.GaussianBlur(gray, (21, 21), 0)
if background is None:
background = gray
continue
delta_frame = cv.absdiff(background, gray)
thresh_delta = cv.threshold(delta_frame, 40, 255, cv.THRESH_BINARY)[1]
thresh_delta = cv.dilate(thresh_delta, None, iterations = 2)
(cnts, _) = cv.findContours(thresh_delta.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv.contourArea(contour) < 2000:
continue
status = 1
(x, y, w, h) = cv.boundingRect(contour)
cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
status_list.append(status)
status_list = status_list[-2:]
if status_list[-1] == 1 and status_list[-2] == 0:
times.append(datetime.now())
if status_list[-1] == 0 and status_list[-2] == 1:
times.append(datetime.now())
cv.imshow('Delta', delta_frame)
cv.imshow('Threshold Frame', thresh_delta)
cv.imshow('Colour Frame', frame)
key = cv.waitKey(1)
if key == ord('q'):
if status == 1:
times.append(datetime.now())
break
for i in range(0, len(times), 2):
df = df.append({'Start' :times[i], 'End' :times[i + 1]}, ignore_index = True)
video.release()
cv.destroyAllWindows()
And here is the code for the Bokeh graph:
%run Motion_Detection_Plotting.ipynb
from bokeh.plotting import figure, show, output_file
from bokeh.models import HoverTool, ColumnDataSource, formatters, TickFormatter
df['Start'] = pd.to_datetime(df.Start, format='%Y-%m-%D %H:%M:%S')
df['End'] = pd.to_datetime(df.End, format='%Y-%m-%D %H:%M:%S')
Source=ColumnDataSource(df.Start, df.End)
p = figure(x_axis_type = 'datetime', height = 200, width = 1000, title = 'Motion Graph', sizing_mode = 'stretch_width')
p.yaxis.minor_tick_line_color = None
p.yaxis.major_tick_in = 0
p.xaxis.major_tick_in = 0
p.yaxis.ticker = [0, 1]
p.ygrid.grid_line_color = None
hover = HoverTool(tooltips=[('Start', '#Start{%D/%m %H:%M:%S}'), ('End','#End{%D/%m %H:%M:%S}')],
formatters={'#Start':'datetime', '#End':'datetime'})
p.add_tools(hover)
q = p.quad(left = df['Start'], right = df['End'], bottom = 0, top = 1, color = 'green')
output_file('graph2.html')
show(p)
And here is my outputted graph
Graph with Hover showing ??? instead of date and time

You are very close to a working solution. Your are defining a ColumnDataSource (CDS) but you are not using it and a HoverTool trys to get the needed infromation from the CDS.
Please see the minmal example below with an working Holvertool.
import pandas as pd
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import HoverTool, ColumnDataSource, formatters, TickFormatter
output_notebook()
# dummy data
df = pd.DataFrame({'Start':['2020-01-01 00:00:00'], 'End':['2020-01-01 00:15:00']})
df['Start'] = pd.to_datetime(df.Start, format='%Y-%m-%d %H:%M:%S')
df['End'] = pd.to_datetime(df.End, format='%Y-%m-%d %H:%M:%S')
# bokeh
source=ColumnDataSource(df)
p = figure(x_axis_type = 'datetime')
hover = HoverTool(tooltips=[('Start', '#Start{%D/%m %H:%M:%S}'), ('End','#End{%D/%m %H:%M:%S}')],
formatters={'#Start':'datetime', '#End':'datetime'})
p.add_tools(hover)
# main change is here, data comes from CDS not from DataFrame
q = p.quad(left='Start', right='End', bottom=0, top=1, color='green', source=source)
show(p)
This is the output:

Related

How to add labels on top of horizontal bar charts in matplotlib?

Can anyone help me out to add the city name on the top of each horizontal bars?
I have done everything else.
Only need to figure out that.
import pandas as pd
import matplotlib.pyplot as plt
df1 = pd.read_csv("city_populations.csv")
#selecting particular columns
df = df1[['name','group','year','value']]
year = df1['year']
df = df.sort_values(by=['value'],ascending=False)
#selceting rows with year 2020
curr_year = 2020
#create a variable with true if year == curr_year
curr_population = df['year'] == curr_year
curr_population = df[curr_population]
print(curr_population)
#drawing the graph
fig,ax = plt.subplots(figsize = (10,8))
#to flip barh
values = curr_population[::-1]['group']
labels = []
clrs = []
for x in values:
if x == "India":
clrs.append("#adb0ff")
elif x == "Europe":
clrs.append("#ffb3ff")
elif x == "Asia":
clrs.append('#90d595')
elif x == "Latin America":
clrs.append("#e48381")
elif x == "Middle East":
clrs.append("#aafbff")
elif x == "North America":
clrs.append("#f7bb5f")
else:
clrs.append("#eafb50")
bar_plot = ax.barh(curr_population[::-1]['name'],curr_population[::-1]['value'],color = clrs)
plt.show()
This is the code I have written in order to get the bar graphs.
I need guidance for the labels on top of each bars.
you have to add the label option to the barh method
enter code herebar_plot = ax.barh(curr_population[::-1]['name'],curr_population[::-1]['value'],color = clrs, label="test")
If you want to position a label more freely you can use something like this (taken from https://matplotlib.org/3.2.1/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py):
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(ax)

Plot not showing up using matplotlib

I am trying to plot two true positive rate and false positive rate values for two different cases. While plotting, the second plot cancels out the display of the first sort of.
import math
import numpy as np
import random
import os
import sys
from scipy.spatial import distance
from matplotlib import pyplot as plt
import pandas as pd
def fextract(fname1, fname2):
file1 = open(fname1,'r')
file2 = open(fname2,'r')
cnt = 122
data1 = file1.readlines()
data2 = file2.readlines()
print(len(data1))
tpr = np.zeros((1,cnt))
fpr = np.zeros((1,cnt))
cnt2 = 0
for x in data1:
if(cnt2 == 120):
break
tpr[0,cnt2] = float(x)
cnt2 = cnt2 + 1
cnt2 = 0
for x in data2:
if(cnt2 == 120):
break
fpr[0,cnt2] = float(x)
cnt2 = cnt2 + 1
return tpr, fpr
def plotfig(x1,y1,x2,y2):
f1 = plt.figure()
lg = plt.plot(x2[0,:], y2[0,:], label="Skin Color (With Chrominance)")
lg = plt.plot(x1[0,:], y1[0,:], label="Skin Color")
plt.legend(loc='best')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
tpr,fpr = fextract('tp.txt','fp.txt')
tpr2,fpr2 = fextract('tp2.txt','fp2.txt')
tpr3,fpr3 = fextract('tp3.txt','fp3.txt')
tpr4,fpr4 = fextract('tp7.txt','fp7.txt')
tpr5,fpr5 = fextract('tp8.txt','fp8.txt')
tpr6,fpr6 = fextract('tp9.txt','fp9.txt')
plotfig(fpr,tpr,fpr4,tpr4)
plotfig(fpr2,tpr2,fpr5,tpr5)
plotfig(fpr3,tpr3,fpr6,tpr6)
As seen above, the the orange line stops midway and the blue gets plotted completely.

How to change the opacity of chosen scatter plot points

I want to create an interactive scatter plot so the user can select points with the cursor, so the chosen points are highlighted and the rest are faded.
Right now it only works if the color is changed, how can i change the opacity and keep the original colors?
import numpy as np
from numpy.random import rand
from matplotlib.widgets import LassoSelector
from matplotlib.path import Path
import matplotlib.pyplot as plt
class SelectFromCollection(object):
def __init__(self, ax, collection,c, alpha_other=0.3):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
self.c = c
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, (self.Npts, 1))
self.lasso = LassoSelector(ax, onselect=self.onselect)
self.ind = []
def onselect(self, verts):
path = Path(verts)
self.ind = np.nonzero(path.contains_points(self.xys))[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
def disconnect(self):
self.lasso.disconnect_events()
self.fc[:, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
np.random.seed(1)
x, y, c = rand(3, 100)
subplot_kw = dict(xlim=(0, 1), ylim=(0, 1), autoscale_on=False)
fig, ax = plt.subplots(subplot_kw=subplot_kw)
pts = ax.scatter(x, y,c=c, s=100)
selector = SelectFromCollection(ax, pts, c)
plt.show()
Solved, I used the method self.collection.get_facecolors(), to get the format and values, then I just changed the value of the 3rd column for the chosen indices like this:
fc = self.collection.get_facecolors()
fc[self.ind, 3] = 1
fc[others, 3] = self.alpha_other
self.collection.set_facecolors(fc)
cheers

how to get the length of eyes and mouth using dlib

I am working on a project of yawn detection, i am using dlib and opencv to detect the face and landmark on a video.
I want to get the length of eyes and mouth.
this is what i have done till now
import sys
import os
import dlib
import glob
from skimage import io
import cv2
import time
if len(sys.argv) != 3:
print("")
exit()
predictor_path = sys.argv[1]
faces_folder_path = sys.argv[2]
vidcap = cv2.VideoCapture('video.avi')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()
while vidcap.isOpened():
success, image = vidcap.read()
if success:
win.clear_overlay()
win.set_image(image)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(image, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(image, d)
print(shape)
print("Part 0: {}, Part 1: {},Part 2: {} ...".format(shape.part(0),shape.part(1),shape.part(2)))
# Draw the face landmarks on the screen.
win.add_overlay(shape)
win.add_overlay(dets)
time.sleep(0.01)
cv2.destroyAllWindows()
vidcap.release()
please help me how to get the length of open eyes and mouth.
From this figure
import Paths
import globals
from globals import ClassifierFiles
import numpy as np
import cv2
import time
import dlib
import math
import eyeCoordinates
import mouthCoordinates
from globals import Threshold
from globals import yawnFolder
import os
import openface
VIDEO_PATHS = []
readVideo('v.avi')#test video of faces
def readVideo(video):
global no,yes
video_capture = cv2.VideoCapture(video)
detector = dlib.get_frontal_face_detector() #Face detector
predictor = dlib.shape_predictor(ClassifierFiles.shapePredicter) #Landmark identifier
face_aligner = openface.AlignDlib(ClassifierFiles.shapePredicter)
u = 0
while True:
ret, frame = video_capture.read()
if frame != None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# clahe_image = clahe.apply(gray)
detections = detector(frame, 1) #Detect the faces in the image
for k,d in enumerate(detections): #For each detected face
shape = predictor(frame, d) #Get coordinates
vec = np.empty([68, 2], dtype = int)
coor = []
for i in range(1,68): #There are 68 landmark points on each face
#cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=1)
coor.append([shape.part(i).x, shape.part(i).y])
vec[i][0] = shape.part(i).x
vec[i][1] = shape.part(i).y
#RightEye and LeftEye coordinates
rightEye = eyeCoordinates.distanceRightEye(coor)
leftEye = eyeCoordinates.distanceLeftEye(coor)
eyes = (rightEye + leftEye)/2
#Mouth coordinates
mouth = mouthCoordinates.distanceBetweenMouth(coor)
print(eyes,mouth)
#prints both eyes average distance
#prints mouth distance
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
VIDEO_PATHS = Paths.videosPaths()
init()
eyeCoordinates File
import distanceFormulaCalculator
def distanceRightEye(c):
eR_36,eR_37,eR_38,eR_39,eR_40,eR_41 = 0,0,0,0,0,0
eR_36 = c[35]
eR_37 = c[36]
eR_38 = c[37]
eR_39 = c[38]
eR_40 = c[39]
eR_41 = c[40]
x1 = distanceFormulaCalculator.distanceFormula(eR_37,eR_41)
x2 = distanceFormulaCalculator.distanceFormula(eR_38,eR_40)
return ((x1+x2)/2)
def distanceLeftEye(c):
eL_42,eL_43,eL_44,eL_45,eL_46,eL_47 = 0,0,0,0,0,0
eL_42 = c[41]
eL_43 = c[42]
eL_44 = c[43]
eL_45 = c[44]
eL_46 = c[45]
eL_47 = c[46]
x1 = distanceFormulaCalculator.distanceFormula(eL_43,eL_47)
x2 = distanceFormulaCalculator.distanceFormula(eL_44,eL_46)
return ((x1+x2)/2)
def eyePoints():
return [36,37,38,39,40,41,42,43,44,45,46,47]
Mouth Coordinates File
import distanceFormulaCalculator
def distanceBetweenMouth(c):
m_60,m_61,m_62,m_63,m_64,m_65,m_66,m_67 = 0,0,0,0,0,0,0,0
m_60 = c[59]
m_61 = c[60]
m_62 = c[61]
m_63 = c[62]
m_64 = c[63]
m_65 = c[64]
m_66 = c[65]
m_67 = c[66]
x1 = distanceFormulaCalculator.distanceFormula(m_61,m_67)
x2 = distanceFormulaCalculator.distanceFormula(m_62,m_66)
x3 = distanceFormulaCalculator.distanceFormula(m_63,m_65)
return ((x1+x2+x3)/3)
def mouthPoints():
return [60,61,62,63,64,65,66,67]

Adding GUI into the python scripts

I've tried to add the GUI (tkinter) into my script, but to no avail. If anyone can help me, i would be so grateful. I'm using Python 3.6 and i think the latest opencv?
I have only started programming 2 weeks ago. So, kinda new into all this. Basically, I want to create a window that just pick the image from my folder and then process it through the script so that whenever i want to use another image, i don't have to change the script. I hope that make sense..
this is the script that i took from Chris Dahms from youtube, and managed to change it to what I want.
import cv2
import numpy as np
import os
import DetectChars
import DetectPlates
import PossiblePlate
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_CYAN = (255.0, 255.0, 0.0)
showSteps = False
def main():
blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN()
if blnKNNTrainingSuccessful == False:
print ("\nerror: KNN training was not successful\n")
return
imgOriginalScene = cv2.imread("CAR/Malaysia/22.jpg")
if imgOriginalScene is None:
print ("\nerror: image not read from file \n\n")
os.system("pause")
return
if imgOriginalScene is None:
print ("\nerror: image not read from file \n\n")
os.system("pause")
return
listOfPossiblePlates = DetectPlates.detectPlatesInScene(imgOriginalScene)
listOfPossiblePlates = DetectChars.detectCharsInPlates(listOfPossiblePlates)
cv2.imshow("imgOriginalScene", imgOriginalScene)
if len(listOfPossiblePlates) == 0:
print ("\nno license plates were detected\n")
else:
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.strChars), reverse = True)
licPlate = listOfPossiblePlates[0]
cv2.imshow("Image Plate", licPlate.imgPlate)
cv2.imshow("Image Threshold", licPlate.imgThresh)
if len(licPlate.strChars) == 0:
print ("\nno characters were detected\n\n")
return
drawRedRectangleAroundPlate(imgOriginalScene, licPlate)
print ("\nlicense plate read from image = " + licPlate.strChars + "\n")
print ("----------------------------------------")
writeLicensePlateCharsOnImage(imgOriginalScene, licPlate)
cv2.imshow("imgOriginalScene", imgOriginalScene)
cv2.imwrite("imgOriginalScene.png", imgOriginalScene)
cv2.waitKey(0)
return
def drawRedRectangleAroundPlate(imgOriginalScene, licPlate):
p2fRectPoints = cv2.boxPoints(licPlate.rrLocationOfPlateInScene)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]), SCALAR_RED, 2)
def writeLicensePlateCharsOnImage(imgOriginalScene, licPlate):
ptCenterOfTextAreaX = 0
ptCenterOfTextAreaY = 0
ptLowerLeftTextOriginX = 0
ptLowerLeftTextOriginY = 0
sceneHeight, sceneWidth, sceneNumChannels = imgOriginalScene.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
intFontFace = cv2.FONT_HERSHEY_SIMPLEX
fltFontScale = float(plateHeight) / 30.0
intFontThickness = int(round(fltFontScale * 2))
textSize, baseline = cv2.getTextSize(licPlate.strChars, intFontFace, fltFontScale, intFontThickness)
( (intPlateCenterX, intPlateCenterY), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg ) = licPlate.rrLocationOfPlateInScene
intPlateCenterX = int(intPlateCenterX)
intPlateCenterY = int(intPlateCenterY)
ptCenterOfTextAreaX = int(intPlateCenterX)
if intPlateCenterY < (sceneHeight * 0.75):
ptCenterOfTextAreaY = int(round(intPlateCenterY)) + int(round(plateHeight * 1.6))
else:
ptCenterOfTextAreaY = int(round(intPlateCenterY)) - int(round(plateHeight * 1.6))
textSizeWidth, textSizeHeight = textSize
ptLowerLeftTextOriginX = int(ptCenterOfTextAreaX - (textSizeWidth / 2))
ptLowerLeftTextOriginY = int(ptCenterOfTextAreaY + (textSizeHeight / 2))
cv2.putText(imgOriginalScene, licPlate.strChars, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_CYAN, intFontThickness)
if __name__ == "__main__":
main()
cv2.waitKey()
cv2.destroyAllWindows()
Pre-processing stage
# Preprocess.py
import numpy as np
import math
# module level variables ##########################################################################
GAUSSIAN_SMOOTH_FILTER_SIZE = (5, 5)
ADAPTIVE_THRESH_BLOCK_SIZE = 19
ADAPTIVE_THRESH_WEIGHT = 9
def preprocess(imgOriginal):
imgGrayscale = extractValue(imgOriginal)
imgMaxContrastGrayscale = maximizeContrast(imgGrayscale)
height, width = imgGrayscale.shape
grayscaled = cv2.cvtColor(imgOriginal,cv2.COLOR_BGR2GRAY)
imgBlurred = np.zeros((height, width, 1), np.uint8)
imgBlurred, otsu = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
imgThresh = cv2.medianBlur(otsu,5)
return imgGrayscale, imgThresh
# end function
def extractValue(imgOriginal):
height, width, numChannels = imgOriginal.shape
imgHSV = np.zeros((height, width, 3), np.uint8)
imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)
imgHue, imgSaturation, imgValue = cv2.split(imgHSV)
return imgValue
# end function
def maximizeContrast(imgGrayscale):
height, width = imgGrayscale.shape
imgTopHat = np.zeros((height, width, 1), np.uint8)
imgBlackHat = np.zeros((height, width, 1), np.uint8)
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement)
imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)
imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)
return imgGrayscalePlusTopHatMinusBlackHat
# end function
If all you are wanting is a window to select a file then this should work.
import Tkinter
from Tkinter import *
import tkSimpleDialog
from tkFileDialog import askopenfilename
master = Tk()
master.withdraw()
my_file = askopenfilename()
mainloop()
i recommend Gtk3 for your GUI.
here's a simple Gtk window with button:
#!/usr/bin/env python3
import gi
gi.require_version( 'Gtk', '3.0' )
from gi.repository import Gtk
class Window( Gtk.Window ):
def __init__( self ):
Gtk.Window.__init__( self )
self.connect( 'destroy', lambda q: Gtk.main_quit() )
button = Gtk.Button( "Gtk.Button" )
button.connect( "clicked", self.on_button_clicked )
grid = Gtk.Grid( )
grid.attach( button, 0, 0, 1, 1 )
self.add( grid )
self.show_all()
def on_button_clicked( self, button ):
print( "Gtk.Button was clicked" )
w = Window()
Gtk.main()

Resources