drop down menu with dash / plotly - python-3.x

How can I make this code with drop down menu to chose between "New Cases" and 2 other columns that I have in my csv file
# load in new csv to merge with geodata
import pandas as pd
df = pd.read_csv("ALLCOUNTRIES-PREDICTED.csv", header=0, encoding="utf-8")
import plotly.express as px
fig = px.choropleth(df,
locations="iso_alpha_3",
color="New Cases", # identify representing column
hover_name="Country", # identify country code column
animation_frame="Date", # identify date column
projection="equirectangular", # select projection
color_continuous_scale = 'Reds', # select prefer color scale
range_color=[0,10000] # select range of dataset
)
fig.show()
fig.write_html("example_map1.html")

source OWID COVID data. Renamed columns to be consistent with column names in question
core concept. Build a figure for each column. Each figure contains traces (data), frames and layout. Key is that each frame name is unique, hence addition of a suffix (a, b or c)
integrate three figures
traces is simple, just traces from first figure
frames is relatively simple, all frames from all figures
layout take layout from first figure without play/pause buttons
updatemenus is drop down of required columns. args are sliders and coloraxis from appropriate figure
have used different color scales for each column. have used a different max for range_color for each column, calculated from underlying data
play / pause have been removed - they can be made to partially work using this concept https://plotly.com/python/animations/#defining-button-arguments However this means you then need to updatemenus from updatemenus which really does not work in a completely static structure that updatemenus is
import pandas as pd
import io, requests
import plotly.express as px
import plotly.graph_objects as go
# get OWID COVID data
dfall = pd.read_csv(
io.StringIO(
requests.get(
"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv"
).text
)
)
# filter make data frame have same columns as question and filter to a few days..
dfall["date"] = pd.to_datetime(dfall["date"])
df = dfall.rename(
columns={
"iso_code": "iso_alpha_3",
"new_cases": "New Cases",
"location": "Country",
"date": "Date",
}
).loc[lambda d: d["Date"].ge("1-nov-2021")]
df["Date"] = df["Date"].dt.strftime("%Y-%b-%d")
# three columns we're going to build choropleths from
cols = ["New Cases", "new_deaths", "new_vaccinations"]
# build figures for each of the required columns
# key technique is append a suffix to animation frame so each frame has it's
# own name...
figs = [
px.choropleth(
df.assign(Date=lambda d: d["Date"] + f"~{suffix}"),
locations="iso_alpha_3",
color=c, # identify representing column
hover_name="Country", # identify country code column
animation_frame="Date", # identify date column
projection="equirectangular", # select projection
color_continuous_scale=color, # select prefer color scale
range_color=[
0,
df.groupby("Date")[c].quantile(0.75).mean(),
], # select range of dataset
)
for c, color, suffix in zip(cols, ["Blues", "Reds", "Greens"], list("abc"))
]
# play / pause don't work as don't stop between columns..
layout = {
k: v
for k, v in figs[0].to_dict()["layout"].items()
if k not in ["template", "updatemenus"]
}
# build figure from all frames, with layout excluding play/pause buttons
fig = go.Figure(
data=figs[0].data, frames=[fr for f in figs for fr in f.frames], layout=layout
)
# finally build drop down menu...
fig = fig.update_layout(
updatemenus=[
{
"buttons": [
{
"label": c,
"method": "relayout",
"args": [
{
"coloraxis": col_fig.layout.coloraxis,
"sliders": col_fig.layout.sliders,
}
],
}
for c, col_fig in zip(cols, figs)
]
}
]
)
fig
dash / plotly solution
using dash it becomes very simple, just build as many figures as columns
dropdown with call back just picks appropriate figure
import pandas as pd
import io, requests
import plotly.express as px
import plotly.graph_objects as go
import dash
from dash.dependencies import Input, Output, State
from jupyter_dash import JupyterDash
# get OWID COVID data
dfall = pd.read_csv(
io.StringIO(
requests.get(
"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv"
).text
)
)
# filter make data frame have same columns as question and filter to a few days..
dfall["date"] = pd.to_datetime(dfall["date"])
df = dfall.rename(
columns={
"iso_code": "iso_alpha_3",
"new_cases": "New Cases",
"location": "Country",
"date": "Date",
}
).loc[lambda d: d["Date"].ge("1-nov-2021")]
df["Date"] = df["Date"].dt.strftime("%Y-%b-%d")
# three columns we're going to build choropleths from
cols = ["New Cases", "new_deaths", "new_vaccinations"]
# build figures for each of the required columns
figs = [
px.choropleth(
df,
locations="iso_alpha_3",
color=c, # identify representing column
hover_name="Country", # identify country code column
animation_frame="Date", # identify date column
projection="equirectangular", # select projection
color_continuous_scale=color, # select prefer color scale
range_color=[
0,
df.groupby("Date")[c].quantile(0.75).mean(),
], # select range of dataset
)
for c, color in zip(cols, ["Blues", "Reds", "Greens"])
]
# Build App
app = JupyterDash(__name__)
app.layout = dash.html.Div(
[
dash.dcc.Dropdown(
id="choropleth",
options=[{"label": c, "value": i} for i, c in enumerate(cols)],
value=0,
),
dash.dcc.Graph(
id="map",
),
]
)
#app.callback(Output("map", "figure"), Input("choropleth", "value"))
def updateGraph(id):
if not id: return figs[0]
return figs[int(id)]
# Run app and display result inline in the notebook
app.run_server(mode="inline")

Related

Using python to plot 'Gridded' map

I would like to know how I can create a gridded map of a country(i.e. Singapore) with resolution of 200m x 200m squares. (50m or 100m is ok too)
I would then use the 'nearest neighbour' technique to assign a rainfall data and colour code to each square based on the nearest rainfall station's data.
[I have the latitude,longitude & rainfall data for all the stations for each date.]
Then, I would like to store the data in an Array for each 'gridded map' (i.e. from 1-Jan-1980 to 31-Dec-2021)
Can this be done using python?
P.S Below is a 'simple' version I did as an example to how the 'gridded' map should look like for 1 particular day.
https://i.stack.imgur.com/9vIeQ.png
Thank you so much!
Can this be done using python? YES
I have previously provided a similar answer binning spatial dataframe. Reference that also for concepts
you have noted that you are working with Singapore geometry and rainfall data. To setup an answer I have sourced this data from government sources
for purpose on answer I have used 2kmx2km grid so when plotting to demonstrate answer resource utilisation is reduced
core concept: create a grid of box polygons that cover the total bounds of the geometry. Note it's important to use UTM CRS here so that bounds in meters make sense. Once boxes are created remove boxes that are within total bounds but do not intersect with actual geometry
next create a geopandas dataframe of rainfall data. Use longitude and latitude of weather station to create points
final step, join_nearest() grid geometry with rainfall data geometry and data
clearly this final data frame gdf_grid_rainfall is a data frame, which is effectively an array. You can use as an array as you please ...
have provided a folium and plotly interactive visualisations that demonstrate clearly solution is working
solution
Dependent on data sourcing
# number of meters
STEP = 2000
a, b, c, d = gdf_sg.to_crs(gdf_sg.estimate_utm_crs()).total_bounds
# create a grid for Singapore
gdf_grid = gpd.GeoDataFrame(
geometry=[
shapely.geometry.box(minx, miny, maxx, maxy)
for minx, maxx in zip(np.arange(a, c, STEP), np.arange(a, c, STEP)[1:])
for miny, maxy in zip(np.arange(b, d, STEP), np.arange(b, d, STEP)[1:])
],
crs=gdf_sg.estimate_utm_crs(),
).to_crs(gdf_sg.crs)
# restrict grid to only squares that intersect with Singapore geometry
gdf_grid = (
gdf_grid.sjoin(gdf_sg)
.pipe(lambda d: d.groupby(d.index).first())
.set_crs(gdf_grid.crs)
.drop(columns=["index_right"])
)
# geodataframe of weather station locations and rainfall by date
gdf_rainfall = gpd.GeoDataFrame(
df_stations.merge(df, on="id")
.assign(
geometry=lambda d: gpd.points_from_xy(
d["location.longitude"], d["location.latitude"]
)
)
.drop(columns=["location.latitude", "location.longitude"]),
crs=gdf_sg.crs,
)
# weather station to nearest grid
gdf_grid_rainfall = gpd.sjoin_nearest(gdf_grid, gdf_rainfall).drop(
columns=["Description", "index_right"]
)
# does it work? let's visualize with folium
gdf_grid_rainfall.loc[lambda d: d["Date"].eq("20220622")].explore("Rainfall (mm)", height=400, width=600)
data sourcing
import requests, itertools, io
from pathlib import Path
import urllib
from zipfile import ZipFile
import fiona.drvsupport
import geopandas as gpd
import numpy as np
import pandas as pd
import shapely.geometry
# get official Singapore planning area geometry
url = "https://geo.data.gov.sg/planning-area-census2010/2014/04/14/kml/planning-area-census2010.zip"
f = Path.cwd().joinpath(urllib.parse.urlparse(url).path.split("/")[-1])
if not f.exists():
r = requests.get(url, stream=True, headers={"User-Agent": "XY"})
with open(f, "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
zfile = ZipFile(f)
zfile.extractall(f.stem)
fiona.drvsupport.supported_drivers['KML'] = 'rw'
gdf_sg = gpd.read_file(
[_ for _ in Path.cwd().joinpath(f.stem).glob("*.kml")][0], driver="KML"
)
# get data about Singapore weather stations
df_stations = pd.json_normalize(
requests.get("https://api.data.gov.sg/v1/environment/rainfall").json()["metadata"][
"stations"
]
)
# dates to get data from weather.gov.sg
dates = pd.date_range("20220601", "20220730", freq="MS").strftime("%Y%m")
df = pd.DataFrame()
# fmt: off
bad = ['S100', 'S201', 'S202', 'S203', 'S204', 'S205', 'S207', 'S208',
'S209', 'S211', 'S212', 'S213', 'S214', 'S215', 'S216', 'S217',
'S218', 'S219', 'S220', 'S221', 'S222', 'S223', 'S224', 'S226',
'S227', 'S228', 'S229', 'S230', 'S900']
# fmt: on
for stat, month in itertools.product(df_stations["id"], dates):
if not stat in bad:
try:
df_ = pd.read_csv(
io.StringIO(
requests.get(
f"http://www.weather.gov.sg/files/dailydata/DAILYDATA_{stat}_{month}.csv"
).text
)
).iloc[:, 0:5]
except pd.errors.ParserError as e:
bad.append(stat)
print(f"failed {stat} {month}")
df = pd.concat([df, df_.assign(id=stat)])
df["Rainfall (mm)"] = pd.to_numeric(
df["Daily Rainfall Total (mm)"], errors="coerce"
)
df["Date"] = pd.to_datetime(df[["Year","Month","Day"]]).dt.strftime("%Y%m%d")
df = df.loc[:,["id","Date","Rainfall (mm)", "Station"]]
visualisation using plotly animation
import plotly.express as px
# reduce dates so figure builds in sensible time
gdf_px = gdf_grid_rainfall.loc[
lambda d: d["Date"].isin(
gdf_grid_rainfall["Date"].value_counts().sort_index().index[0:15]
)
]
px.choropleth_mapbox(
gdf_px,
geojson=gdf_px.geometry,
locations=gdf_px.index,
color="Rainfall (mm)",
hover_data=gdf_px.columns[1:].tolist(),
animation_frame="Date",
mapbox_style="carto-positron",
center={"lat":gdf_px.unary_union.centroid.y, "lon":gdf_px.unary_union.centroid.x},
zoom=8.5
).update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0, "pad": 4})

Changing the values of a dict in lowercase ( values are code colors ) to be accepted as a color parametrer in plotly.graph.object

So, I'm trying to get the colors from the dictionary 'Disaster_type' to draw the markers in geoscatters depending of the type of disaster.
Basically, I want to reprensent in the graphic the natural diasasters with it's color code. eg; it's is a volcanic activity paint it 'orange'. I want to change the size of the marker as well depending of the magnitude of the disaster, but that's for another day.
here's the link of the dataset: https://www.kaggle.com/datasets/brsdincer/all-natural-disasters-19002021-eosdis
import plotly.graph_objects as go
import pandas as pd
import plotly as plt
df = pd.read_csv('1900_2021_DISASTERS - main.csv')
df.head()
df.tail()
disaster_set = {disaster for disaster in df['Disaster Type']}
disaster_type = {'Storm':'aliceblue',
'Volcanic activity':'orange',
'Flood':'royalblue',
'Mass movement (dry)':'darkorange',
'Landslide':'#C76114',
'Extreme temperature':'#FF0000',
'Animal accident':'gray55',
'Glacial lake outburst':'#7D9EC0',
'Earthquake':'#CD8C95',
'Insect infestation':'#EEE8AA',
'Wildfire':' #FFFF00',
'Fog':'#00E5EE',
'Drought':'#FFEFD5',
'Epidemic':'#00CD66 ',
'Impact':'#FF6347'}
# disaster_type_lower = {(k, v.lower()) for k, v in disaster_type.items()}
# print(disaster_type_lower)
# for values in disaster_type.values():
# disaster_type[values] = disaster_type.lowercase()
fig = go.Figure(data=go.Scattergeo(
lon = df['Longitude'],
lat = df['Latitude'],
text = df['Country'],
mode = 'markers',
marker_color = disaster_type_.values()
)
)
fig.show()
I cant figure how, I've left in comments after the dict how I tried to do that.
It changes them to lowercase, but know I dont know hot to get them...My brain is completly melted
it's a simple case of pandas map
found data that appears same as yours on kaggle so have used that
one type is unmapped Extreme temperature so used a fillna("red") to remove any errors
gray55 gave me an error so replaced it with RGB equivalent
import kaggle.cli
import sys
import pandas as pd
from zipfile import ZipFile
import urllib
import plotly.graph_objects as go
# fmt: off
# download data set
url = "https://www.kaggle.com/brsdincer/all-natural-disasters-19002021-eosdis"
sys.argv = [sys.argv[0]] + f"datasets download {urllib.parse.urlparse(url).path[1:]}".split(" ")
kaggle.cli.main()
zfile = ZipFile(f'{urllib.parse.urlparse(url).path.split("/")[-1]}.zip')
dfs = {f.filename: pd.read_csv(zfile.open(f)) for f in zfile.infolist()}
# fmt: on
df = dfs["DISASTERS/1970-2021_DISASTERS.xlsx - emdat data.csv"]
disaster_type = {
"Storm": "aliceblue",
"Volcanic activity": "orange",
"Flood": "royalblue",
"Mass movement (dry)": "darkorange",
"Landslide": "#C76114",
"Extreme temperature": "#FF0000",
"Animal accident": "#8c8c8c", # gray55
"Glacial lake outburst": "#7D9EC0",
"Earthquake": "#CD8C95",
"Insect infestation": "#EEE8AA",
"Wildfire": " #FFFF00",
"Fog": "#00E5EE",
"Drought": "#FFEFD5",
"Epidemic": "#00CD66 ",
"Impact": "#FF6347",
}
fig = go.Figure(
data=go.Scattergeo(
lon=df["Longitude"],
lat=df["Latitude"],
text=df["Country"],
mode="markers",
marker_color=df["Disaster Type"].map(disaster_type).fillna("red"),
)
)
fig.show()

Altair/Vega-Lite heatmap: Filter top k

I am attempting to create a heatmap and retain only the top 5 samples based on the mean of the relative abundance. I am able to sort the heatmap properly, but I can't figure out how to retain only the top 5, in this case samples c, e, b, y, a. I am pasting a subset of the df with the image. I've tried myriad permutations of the "Top K Items Tutorial" link at the altair-viz website. I'd prefer to use altair to do the filtering if possible, as opposed to filtering the df itself in the python code.
Dateframe:
,Lowest_Taxon,p1,p2,p3,p4,p5,p6,p7
0,a,0.03241281,0.0,0.467738067,3.14456785,0.589519651,13.5744323,0.0
1,b,0.680669,9.315121951,2.848404893,13.99058458,2.139737991,16.60779366,7.574639383
2,c,40.65862829,1.244878049,71.01223315,4.82197541,83.18777293,0.0,0.0
3,d,0.0,0.0,0.0,0.548471137,0.272925764,0.925147183,0.0
4,e,0.090755867,13.81853659,5.205085152,27.75721011,1.703056769,19.6691898,12.27775914
5,f,0.0,0.0,0.0,0.0,0.0,0.0,0.0
6,g,0.187994295,0.027317073,0.0,0.0,0.0,0.02242781,0.0
7,h,0.16854661,0.534634146,1.217318302,7.271813154,1.73580786,0.57751612,0.57027843
8,i,0.142616362,2.528780488,1.163348525,0.34279446,0.0,0.0,0.0
9,j,1.711396344,0.694634146,0.251858959,4.273504274,0.087336245,1.721334455,0.899027172
10,k,0.0,1.475121951,0.0,0.0,0.0,5.573310906,0.0
11,l,0.194476857,0.253658537,1.517150396,2.413273002,0.949781659,5.147182506,1.650452868
12,m,0.0,1.736585366,0.0,0.063988299,0.0,8.42724979,0.623951694
13,n,0.0,0.0,0.0,0.0,0.0,0.0,0.0
14,o,4.68689226,0.12097561,0.0,0.0,0.0,0.0,0.0
15,p,0.0,0.885853659,0.0,0.0,0.0,0.913933277,0.046964106
16,q,0.252819914,0.050731707,0.023986568,0.0,0.087336245,0.0,0.0
17,r,0.0,0.0,0.0,0.0,0.0,0.0,0.0
18,s,0.0,0.0,0.0,0.0,0.0,0.0,0.0
19,t,0.0,0.0,0.0,0.0,0.0,0.0,0.0
20,u,0.0,0.058536585,0.089949628,0.356506239,0.0,0.285954584,1.17410265
21,v,0.0,0.0,0.0,0.0,0.0,0.0,0.0
22,w,0.0,0.0,0.0,0.0,0.0,0.0,0.0
23,x,1.471541553,2.396097561,0.593667546,0.278806161,0.065502183,0.280347631,0.952700436
24,y,0.0,0.32,0.0,0.461629873,0.0,7.804878049,18.38980208
25,z,0.0,0.0,0.0,0.0,0.0,0.0,0.0
Code block:
import pandas as pd
import numpy as np
import altair as alt
from vega_datasets import data
from altair_saver import save
# Read in the file and fill empty cells with zero
df = pd.read_excel("path\to\df")
doNotMelt = df.drop(df.iloc[:,1:], axis=1)
df_melted = pd.melt(df, id_vars = doNotMelt, var_name = 'SampleID', value_name = 'Relative_abundance')
# Tell altair to plot as many rows as is necessary
alt.data_transformers.disable_max_rows()
alt.Chart(df_melted).mark_rect().encode(
alt.X('SampleID:N'),
alt.Y('Lowest_Taxon:N', sort=alt.EncodingSortField(field='Relative_abundance', op='mean', order='descending')),
alt.Color('Relative_abundance:Q')
)
If you know what you want to show is the entries with c, e, b, y and a (and it will not change later) you could simply apply a transform_filter on the field Lowest_Taxon.
If you want to calculate on the spot which ones make it into the top five, it needs a bit more effort, i.e. a combination of joinaggregate, window and filter transforms.
For both I paste an example below. By the way, I converted the original data that you pasted into a csv file which is imported by the code snippets. You can make it easier for others to to use your pandas toy data by providing it as a dict, which can then be simply read directly in the code.
Simple approach:
import pandas as pd
import altair as alt
import numpy as np
alt.data_transformers.disable_max_rows()
df = pd.read_csv('df.csv', index_col=0)
doNotMelt = df.drop(df.iloc[:,1:], axis=1)
df_melted = pd.melt(df, id_vars = doNotMelt, var_name = 'SampleID', value_name = 'Relative_abundance')
alt.Chart(df_melted).mark_rect().encode(
alt.X('SampleID:N'),
alt.Y('Lowest_Taxon:N', sort=alt.EncodingSortField(field='Relative_abundance', op='mean', order='descending')),
alt.Color('Relative_abundance:Q')
).transform_filter(
alt.FieldOneOfPredicate(field='Lowest_Taxon', oneOf=['c', 'e', 'b', 'y', 'a'])
)
Flexible approach:
set n to how many of the top entries you want to see
import pandas as pd
import altair as alt
import numpy as np
alt.data_transformers.disable_max_rows()
df = pd.read_csv('df.csv', index_col=0)
doNotMelt = df.drop(df.iloc[:,1:], axis=1)
df_melted = pd.melt(df, id_vars = doNotMelt, var_name = 'SampleID', value_name = 'Relative_abundance')
n = 5 # number of entries to display
alt.Chart(df_melted).mark_rect().encode(
alt.X('SampleID:N'),
alt.Y('Lowest_Taxon:N', sort=alt.EncodingSortField(field='Relative_abundance', op='mean', order='descending')),
alt.Color('Relative_abundance:Q')
).transform_joinaggregate(
mean_rel_ab = 'mean(Relative_abundance)',
count_of_samples = 'valid(Relative_abundance)',
groupby = ['Lowest_Taxon']
).transform_window(
rank='rank(mean_rel_ab)',
sort=[alt.SortField('mean_rel_ab', order='descending')],
frame = [None, None]
).transform_filter(
(alt.datum.rank <= (n-1) * alt.datum.count_of_samples + 1)

HoverTool on Bokeh: Time format issue (date do not appear correctly) x axis from datetime DF column in pandas

Here the code I wrote.
I took the data from pandas DF (not pasted here).
The x values are from DF index columns that is a DateTime column.
The issue that I want to resolve is in line:
TOOLTIPS = [("index", "$index"),("(Time,Temperature)", "($x, $y)"),]
when I have to change the $x format to a correct format in order to see the the time format in the hover window on the bokeh plot.
see the python code
import datetime as dt
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource, CDSView, BooleanFilter
from bokeh.models import DatetimeTickFormatter
x=df_bases.index
y0=df_bases["base_1"]
y1=df_bases["base_5"]
y2=df_bases["base_12"]
# output to static HTML file
output_file("temperatures from thermocouples.html")
# add some renderers
output_file("Thermocouples temperature.html", title="Thermocouples temperature")
TOOLTIPS = [("index", "$index"),("(Time,Temperature)", "($x, $y)"),]
# create a new plot with a datetime axis type
p = figure( tooltips=TOOLTIPS , plot_width=1250, plot_height=580, x_axis_type="datetime", x_axis_label='Time',
y_axis_label='Temperature [°C]', title="Thermocouples temperature")
p.line(x, y0, legend="thermocouple 1", line_width=1 , color='navy', alpha=1)
p.line(x, y1, legend="thermocouple 5", color="green")
p.line(x, y2, legend="thermocouple 12", line_width=1 , color='orange', alpha=1)#, line_dash="4 4")
p.border_fill_color = "whitesmoke"
p.xaxis.formatter=DatetimeTickFormatter(
microseconds = ['%Y-%m-%d %H:%M:%S.%f'],
milliseconds = ['%Y-%m-%d %H:%M:%S.%3N'],
seconds = ["%Y-%m-%d %H:%M:%S"],
minsec = ["%Y-%m-%d %H:%M:%S"],
minutes = ["%Y-%m-%d %H:%M:%S"],
hourmin = ["%Y-%m-%d %H:%M:%S"],
hours=["%Y-%m-%d %H:%M:%S"],
days=["%Y-%m-%d %H:%M:%S"],
months=["%Y-%m-%d %H:%M:%S"],
years=["%Y-%m-%d %H:%M:%S"],
)
p.title.align = 'center'
# create a column data source for the plots to share
source = ColumnDataSource(data=dict(x=x, y0=y0, y1=y1, y2=y2))
# create a view of the source for one plot to use
view = CDSView(source=source)
# show the results
show(p)
Currently (as of Bokeh 1.2) the hover tool does not have any "always on" mode It only hovers in response to hit-testing glyphs that are added to the plot. Additionally there is no way to apply formatting to "special vars" like $x (that will be possible starting in Bokeh 2.0). Custom formatters can only be applied to hover tooltips for data columns. Given that, my best suggestion is to switch to using #xinstead (which interrogates the "x" data column, not the x mouse position". If you do that, you can use all the techniques in the Formatting Tooltip Fields section of the docs.
Since you did not provide a complete example (no data to run), I can only provide partial untested suggestions:
# use #x{%F} to specify the %F datetime format (or choose another) for the x column
TOOLTIPS = [("index", "$index"),("(Time,Temperature)", "(#x{%F}, $y)")]
# tell bokeh to use the "datetime" formatter for the x column
p.hover.formatters = {'x': 'datetime'}
# just a suggestion, often useful for timeseries plots
p.hover.mode = 'vline'

In Bokeh, how do I add tooltips to a Timeseries chart (hover tool)?

Is it possible to add Tooltips to a Timeseries chart?
In the simplified code example below, I want to see a single column name ('a','b' or 'c') when the mouse hovers over the relevant line.
Instead, a "???" is displayed and ALL three lines get a tool tip (rather than just the one im hovering over)
Per the documentation (
http://docs.bokeh.org/en/latest/docs/user_guide/tools.html#hovertool), field names starting with “#” are interpreted as columns on the data source.
How can I display the 'columns' from a pandas DataFrame in the tooltip?
Or, if the high level TimeSeries interface doesn't support this, any clues for using the lower level interfaces to do the same thing? (line? multi_line?) or convert the DataFrame into a different format (ColumnDataSource?)
For bonus credit, how should the "$x" be formatted to display the date as a date?
thanks in advance
import pandas as pd
import numpy as np
from bokeh.charts import TimeSeries
from bokeh.models import HoverTool
from bokeh.plotting import show
toy_df = pd.DataFrame(data=np.random.rand(5,3), columns = ('a', 'b' ,'c'), index = pd.DatetimeIndex(start='01-01-2015',periods=5, freq='d'))
p = TimeSeries(toy_df, tools='hover')
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
("Series", "#columns"),
("Date", "$x"),
("Value", "$y"),
]
show(p)
Below is what I came up with.
Its not pretty but it works.
Im still new to Bokeh (& Python for that matter) so if anyone wants to suggest a better way to do this, please feel free.
import pandas as pd
import numpy as np
from bokeh.charts import TimeSeries
from bokeh.models import HoverTool
from bokeh.plotting import show
toy_df = pd.DataFrame(data=np.random.rand(5,3), columns = ('a', 'b' ,'c'), index = pd.DatetimeIndex(start='01-01-2015',periods=5, freq='d'))
_tools_to_show = 'box_zoom,pan,save,hover,resize,reset,tap,wheel_zoom'
p = figure(width=1200, height=900, x_axis_type="datetime", tools=_tools_to_show)
# FIRST plot ALL lines (This is a hack to get it working, why can't i pass in a dataframe to multi_line?)
# It's not pretty but it works.
# what I want to do!: p.multi_line(df)
ts_list_of_list = []
for i in range(0,len(toy_df.columns)):
ts_list_of_list.append(toy_df.index.T)
vals_list_of_list = toy_df.values.T.tolist()
# Define colors because otherwise multi_line will use blue for all lines...
cols_to_use = ['Black', 'Red', 'Lime']
p.multi_line(ts_list_of_list, vals_list_of_list, line_color=cols_to_use)
# THEN put scatter one at a time on top of each one to get tool tips (HACK! lines with tooltips not yet supported by Bokeh?)
for (name, series) in toy_df.iteritems():
# need to repmat the name to be same dimension as index
name_for_display = np.tile(name, [len(toy_df.index),1])
source = ColumnDataSource({'x': toy_df.index, 'y': series.values, 'series_name': name_for_display, 'Date': toy_df.index.format()})
# trouble formating x as datestring, so pre-formating and using an extra column. It's not pretty but it works.
p.scatter('x', 'y', source = source, fill_alpha=0, line_alpha=0.3, line_color="grey")
hover = p.select(dict(type=HoverTool))
hover.tooltips = [("Series", "#series_name"), ("Date", "#Date"), ("Value", "#y{0.00%}"),]
hover.mode = 'mouse'
show(p)
I’m not familiar with Pandas,I just use python list to show the very example of how to add tooltips to muti_lines, show series names ,and properly display date/time。Below is the result.
Thanks to #bs123's answer and #tterry's answer in Bokeh Plotting: Enable tooltips for only some glyphs
my result
# -*- coding: utf-8 -*-
from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.models import HoverTool
from datetime import datetime
dateX_str = ['2016-11-14','2016-11-15','2016-11-16']
#conver the string of datetime to python datetime object
dateX = [datetime.strptime(i, "%Y-%m-%d") for i in dateX_str]
v1= [10,13,5]
v2 = [8,4,14]
v3= [14,9,6]
v = [v1,v2,v3]
names = ['v1','v2','v3']
colors = ['red','blue','yellow']
output_file('example.html',title = 'example of add tooltips to multi_timeseries')
tools_to_show = 'hover,box_zoom,pan,save,resize,reset,wheel_zoom'
p = figure(x_axis_type="datetime", tools=tools_to_show)
#to show the tooltip for multi_lines,you need use the ColumnDataSource which define the data source of glyph
#the key is to use the same column name for each data source of the glyph
#so you don't have to add tooltip for each glyph,the tooltip is added to the figure
#plot each timeseries line glyph
for i in xrange(3):
# bokeh can't show datetime object in tooltip properly,so we use string instead
source = ColumnDataSource(data={
'dateX': dateX, # python datetime object as X axis
'v': v[i],
'dateX_str': dateX_str, #string of datetime for display in tooltip
'name': [names[i] for n in xrange(3)]
})
p.line('dateX', 'v',source=source,legend=names[i],color = colors[i])
circle = p.circle('dateX', 'v',source=source, fill_color="white", size=8, legend=names[i],color = colors[i])
#to avoid some strange behavior(as shown in the picture at the end), only add the circle glyph to the renders of hover tool
#so tooltip only takes effect on circle glyph
p.tools[0].renderers.append(circle)
# show the tooltip
hover = p.select(dict(type=HoverTool))
hover.tooltips = [("value", "#v"), ("name", "#name"), ("date", "#dateX_str")]
hover.mode = 'mouse'
show(p)
tooltips with some strange behavior,two tips displayed at the same time
Here is my solution. I inspected the glyph render data source to see what are the names on it. Then I use those names on the hoover tooltips. You can see the resulting plot here.
import numpy as np
from bokeh.charts import TimeSeries
from bokeh.models import HoverTool
from bokeh.plotting import show
toy_df = pd.DataFrame(data=np.random.rand(5,3), columns = ('a', 'b' ,'c'), index = pd.DatetimeIndex(start='01-01-2015',periods=5, freq='d'))
#Bockeh display dates as numbers so convert to string tu show correctly
toy_df.index = toy_df.index.astype(str)
p = TimeSeries(toy_df, tools='hover')
#Next 3 lines are to inspect how are names on gliph to call them with #name on hover
#glyph_renderers = p.select(dict(type=GlyphRenderer))
#bar_source = glyph_renderers[0].data_source
#print(bar_source.data) #Here we can inspect names to call on hover
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
("Series", "#series"),
("Date", "#x_values"),
("Value", "#y_values"),
]
show(p)
The original poster's code doesn't work with the latest pandas (DatetimeIndex constructor has changed), but Hovertool now supports a formatters attribute that lets you specify a format as a strftime string. Something like
fig.add_tool(HoverTool(
tooltip=[
('time', '#index{%Y-%m-%d}')
],
formatters={
'#index': 'datetime'
}
))

Resources