Sugar.js Date: equivalent to moment.months()? to get a list of all months - sugar.js

What´s the equivalent to moment.months() and moment.weekdaysShort()?
moment.months()
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
moment.weekdaysShort()
["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
The closest I´ve fond so far is this, Isn´t there a cleaner version?
Date.getLocale().months
["january", "february", "march", "april", "may", "june", "july", "august", "september", "october", "november", "december",
"jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"]

For now, I´m doing this in CS:
(m[0].toUpperCase() + m[1..-1] for m in Date.getLocale().months[0..11])

Related

Subset Data based on User Login

I have a shiny application with a login interface. I need to subset data based on the user login. Within the data, I have a column which marks the data whether the data belongs to them. The column is called "memberstate" and essentially contains the login username. The code I am tyring to use is borrowed from R Studio and is as follows:
user <- reactive({
session$user
})
### Code to manage row level security
isManager <- reactive({
if (user() == "manager"){
return(TRUE)
} else{
return(FALSE)
}
})
# Based on the logged in user, pull out only the data this user
# should be able to see.
data <- read.csv("data/DHA&PPTabletsClean.csv")
myData <- reactive({
if (isManager()){
# If a manager, show everything.
return(data)
} else{
# If a Member State, only show their own data.
return(data[data$memberstate == user(),])
}
})
I then try and use MyData for plotting graphs. I am getting the following error message
"Error in Mydata: could not find function "Mydata" " I am a newbie to R Shiny. Kindly assist.
Part of the data is as follows:
dput(data)
structure(list(Brand = c("Malaril", "Malaril", "Malaril", "Malaril",
"Malaril", "Malaril", "Malaril", "Malaril", "Malaril", "Malaril",
"Malaril", "Malaril", "Malaril", "Malaril", "Malaril", "Malaril",
"Malaril", "Malaril", "Malaril", "Malaril", "Malaril", "Malaril",
"Malaril", "Malaril", "Malaril", "Malaril", "Malaril", "Malaril",
"Malaril", "Malaril", "Malaril", "Malaril", "Malaril", "Malaril"
), ActiveIngredient = c("Dihydroartemisinin", "Dihydroartemisinin",
"Dihydroartemisinin", "Dihydroartemisinin", "Dihydroartemisinin",
"Dihydroartemisinin", "Piperaquine Phosphate", "Piperaquine Phosphate",
"Piperaquine Phosphate", "Piperaquine Phosphate", "Piperaquine Phosphate",
"Piperaquine Phosphate", "Dihydroartemisinin", "Dihydroartemisinin",
"Dihydroartemisinin", "Dihydroartemisinin", "Dihydroartemisinin",
"Dihydroartemisinin", "Piperaquine Phosphate", "Piperaquine Phosphate",
"Piperaquine Phosphate", "Piperaquine Phosphate", "Piperaquine Phosphate",
"Piperaquine Phosphate", "Dihydroartemisinin", "Dihydroartemisinin",
"Dihydroartemisinin", "Dihydroartemisinin", "Dihydroartemisinin",
"Dihydroartemisinin", "Piperaquine Phosphate", "Piperaquine Phosphate",
"Piperaquine Phosphate", "Piperaquine Phosphate"), Assay = c(94.9,
94.9, 94.9, 94.9, 94.9, 94.9, 101.6, 101.6, 101.6, 101.6, 101.6,
101.6, 95, 95, 95, 95, 95, 95, 100.2, 100.2, 100.2, 100.2, 100.2,
100.2, 96.4, 96.4, 96.4, 96.4, 96.4, 96.4, 100.6, 100.6, 100.6,
100.6), Assayperc = c(0.949, 0.949, 0.949, 0.949, 0.949, 0.949,
1.016, 1.016, 1.016, 1.016, 1.016, 1.016, 0.95, 0.95, 0.95, 0.95,
0.95, 0.95, 1.002, 1.002, 1.002, 1.002, 1.002, 1.002, 0.965,
0.965, 0.965, 0.965, 0.965, 0.965, 1.006, 1.006, 1.006, 1.006
), AssayLL = c(90L, 90L, 90L, 90L, 90L, 90L, 93L, 93L, 93L, 93L,
93L, 93L, 90L, 90L, 90L, 90L, 90L, 90L, 93L, 93L, 93L, 93L, 93L,
93L, 90L, 90L, 90L, 90L, 90L, 90L, 93L, 93L, 93L, 93L), AssaypercLL = c(0.9,
0.9, 0.9, 0.9, 0.9, 0.9, 0.93, 0.93, 0.93, 0.93, 0.93, 0.93,
0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.93, 0.93, 0.93, 0.93, 0.93, 0.93,
0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.93, 0.93, 0.93, 0.93), AssayUL = c(110L,
110L, 110L, 110L, 110L, 110L, 107L, 107L, 107L, 107L, 107L, 107L,
110L, 110L, 110L, 110L, 110L, 110L, 107L, 107L, 107L, 107L, 107L,
107L, 110L, 110L, 110L, 110L, 110L, 110L, 107L, 107L, 107L, 107L
), AssaypercUL = c(1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.07, 1.07,
1.07, 1.07, 1.07, 1.07, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.07, 1.07,
1.07, 1.07, 1.07, 1.07, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.07, 1.07,
1.07, 1.07), DateManufacture = c("1/10/2017", "1/10/2017", "1/10/2017",
"1/10/2017", "1/10/2017", "1/10/2017", "1/10/2017", "1/10/2017",
"1/10/2017", "1/10/2017", "1/10/2017", "1/10/2017", "1/6/2018",
"1/6/2018", "1/6/2018", "1/6/2018", "1/6/2018", "1/6/2018", "1/6/2018",
"1/6/2018", "1/6/2018", "1/6/2018", "1/6/2018", "1/6/2018", "1/8/2018",
"1/8/2018", "1/8/2018", "1/8/2018", "1/8/2018", "1/8/2018", "1/8/2018",
"1/8/2018", "1/8/2018", "1/8/2018"), ExpiryDate = c("1/9/2019",
"1/9/2019", "1/9/2019", "1/9/2019", "1/9/2019", "1/9/2019", "1/9/2019",
"1/9/2019", "1/9/2019", "1/9/2019", "1/9/2019", "1/9/2019", "1/5/2020",
"1/5/2020", "1/5/2020", "1/5/2020", "1/5/2020", "1/5/2020", "1/5/2020",
"1/5/2020", "1/5/2020", "1/5/2020", "1/5/2020", "1/5/2020", "1/7/2020",
"1/7/2020", "1/7/2020", "1/7/2020", "1/7/2020", "1/7/2020", "1/7/2020",
"1/7/2020", "1/7/2020", "1/7/2020"), ShelfLifeYrs = c(2L, 2L,
2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L
), ShelfLifeDysRecpt = c(-547L, -547L, -547L, -547L, -547L, -547L,
-547L, -547L, -547L, -547L, -547L, -547L, -304L, -304L, -304L,
-304L, -304L, -304L, -304L, -304L, -304L, -304L, -304L, -304L,
-243L, -243L, -243L, -243L, -243L, -243L, -243L, -243L, -243L,
-243L), DateReceiptSample = c("1/3/2021", "1/3/2021", "1/3/2021",
"1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021",
"1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021",
"1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021",
"1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021",
"1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021", "1/3/2021",
"1/3/2021"), COADateIssue = c("27/5/2021", "27/5/2021", "27/5/2021",
"27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021",
"27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021",
"27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021",
"27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021",
"27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021",
"27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021", "27/5/2021",
"27/5/2021"), TestingOutcome = c("Pass", "Pass", "Pass", "Pass",
"Pass", "Pass", "Pass", "Pass", "Pass", "Pass", "Pass", "Pass",
"Fail", "Fail", "Fail", "Fail", "Fail", "Fail", "Fail", "Fail",
"Fail", "Fail", "Fail", "Fail", "Pass", "Pass", "Pass", "Pass",
"Pass", "Pass", "Pass", "Pass", "Pass", "Pass"), FailureReason = c("",
"", "", "", "", "", "", "", "", "", "", "", "Dihydroartemisinin Dissolution",
"Dihydroartemisinin Dissolution", "Dihydroartemisinin Dissolution",
"Dihydroartemisinin Dissolution", "Dihydroartemisinin Dissolution",
"Dihydroartemisinin Dissolution", "Dihydroartemisinin Dissolution",
"Dihydroartemisinin Dissolution", "Dihydroartemisinin Dissolution",
"Dihydroartemisinin Dissolution", "Dihydroartemisinin Dissolution",
"Dihydroartemisinin Dissolution", "", "", "", "", "", "", "",
"", "", ""), Dissolution = c(77L, 81L, 84L, 86L, 82L, 81L, 100L,
96L, 98L, 101L, 97L, 102L, 62L, 59L, 62L, 66L, 65L, 61L, 99L,
95L, 97L, 103L, 99L, 102L, 97L, 80L, 81L, 86L, 80L, 80L, 103L,
101L, 101L, 101L), Dissolutionperc = c(0.77, 0.81, 0.84, 0.86,
0.82, 0.81, 1, 0.96, 0.98, 1.01, 0.97, 1.02, 0.62, 0.59, 0.62,
0.66, 0.65, 0.61, 0.99, 0.95, 0.97, 1.03, 0.99, 1.02, 0.97, 0.8,
0.81, 0.86, 0.8, 0.8, 1.03, 1.01, 1.01, 1.01), DissolLL = c(70L,
70L, 70L, 70L, 70L, 70L, 80L, 80L, 80L, 80L, 80L, 80L, 70L, 70L,
70L, 70L, 70L, 70L, 80L, 80L, 80L, 80L, 80L, 80L, 70L, 70L, 70L,
70L, 70L, 70L, 80L, 80L, 80L, 80L), DissolutionpercLL = c(0.7,
0.7, 0.7, 0.7, 0.7, 0.7, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.7, 0.7,
0.7, 0.7, 0.7, 0.7, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.7, 0.7, 0.7,
0.7, 0.7, 0.7, 0.8, 0.8, 0.8, 0.8), Mass = c(NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), pH = c(NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA), Dosageform = c("Tablet", "Tablet", "Tablet", "Tablet", "Tablet",
"Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet",
"Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet",
"Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet",
"Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet", "Tablet",
"Tablet"), Therapeuticclass = c("Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial",
"Antimalarial", "Antimalarial", "Antimalarial", "Antimalarial"
), memberstate = c("", "", "", "", "", "ruvimbo", "ruvimbo",
"ruvimbo", "ruvimbo", "ruvimbo", "ruvimbo", "ruvimbo", "ruvimbo",
"ruvimbo", "ruvimbo", "ruvimbo", "ruvimbo", "ruvimbo", "ruvimbo",
"ruvimbo", "ruvimbo", "ruvimbo", "", "", "", "", "", "", "",
"", "", "", "", "")), class = "data.frame", row.names = c(NA,
-34L))
Regards
Chris

How to split dictionary into train and test for Few shot learning

I am training a few-shot learning algorithm and I need to split my dictionary into training and test set based on types key
Here is how the data looks:
word is a text data/sentence and their corresponding label is in labels. types is different entities present in data.
{ "word":
[
["it", "was", "produced", "under", "the", "banner", "of", "pritish", "nandy", "communications", "and", "was", "written", "and", "directed", "by", "first", "time", "director", "saket", "chaudhary", "."],
["``", "the", "new", "york", "times", "``", "described", "the", "book", "as", "a", "``", "user", "'s", "manual", "for", "how", "to", "attract", "targeted", "traffic", "by", "a", "deeper", "understanding", "of", "google", "adsense", "code", "''", "."],
["the", "commodore", "1571", "is", "commodore", "'s", "high-end", "5\u00bc", "''", "floppy", "disk", "drive", "."],
["her", "interest", "in", "environmental", "and", "social", "issues", "led", "her", "to", "obtain", "a", "photography", "degree", "from", "edinburgh", "college", "of", "art", "and", "a", "m.a", "in", "photojournalism", "and", "documentary", "photography", "from", "the", "london", "college", "of", "communication", "in", "2006", "."],
["in", "1852", ",", "he", "sold", "a", "landscape", "painting", "``", "aus", "dem", "bondhusthal", "``", "(", "``", "from", "the", "bondhusdalen", "``", ")", ",", "to", "bridgewater", "gallery", "in", "london", "."]
],
"label":
[
["O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "person-director", "person-director", "O"],
["O", "art-other", "art-other", "art-other", "art-other", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O"],
["O", "product-other", "product-other", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O"],
["O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "other-educationaldegree", "other-educationaldegree", "O", "O", "O", "O", "O", "O", "O", "other-educationaldegree", "other-educationaldegree", "other-educationaldegree", "other-educationaldegree", "other-educationaldegree", "other-educationaldegree", "O", "O", "O", "O", "O", "O", "O", "O", "O"],
["O", "O", "O", "O", "O", "O", "O", "O", "O", "art-painting", "art-painting", "art-painting", "O", "O", "O", "art-painting", "art-painting", "art-painting", "O", "O", "O", "O", "O", "O", "O", "O", "O"]
]
"types": ["art-other", "product-other", "other-educationaldegree"]
}
I want to split the data into train and test and keep the same structure intact.
I followed these links but it did not wok out in my case:
Split a dictionary where values of keys are multiple lists into train and test set Python

How can I have the averageHighTemps function to take the weather database as an argument, and returns the average of the monthly high temperatures

def main():
highTemps = [-3, -2, 3, 11, 19, 23, 26, 25, 20, 13, 6, 0]
lowTemps = [-11, -10, -5, 1, 8, 13, 16, 15, 11, 5, -1, -7]
weatherDB = createDB(highTemps, lowTemps)
for m in weatherDB:
for t in weatherDB[m]:
print(m, t, weatherDB[m][t])
m = input("Enter a Month Name: ")
if m in weatherDB:
print(weatherDB[m])
else:
print("Month not found")
def tempByMonth(weatherDB, month):
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"]
weatherDB = {}
for i in range(len(months)):
month = months[i]
weatherDB[month]
return weatherDB
def averageHighTemps(weatherDB):
#Here is the function
return
#DO NOT change this function:
def createDB(highTemps, lowTemps):
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"]
weatherDB = {}
for i in range(len(months)):
month = months[i]
weatherDB[month]={"high":highTemps[i],"low":lowTemps[i]}
return weatherDB
main()
How can I have the averageHighTemps function to take the weather database as an argument, and returns the average of the monthly high temperatures. This code should only be in the function of averageHighTemps.
I'm not entirely sure what you're after but this takes the weatherDB as an argument, creates a list of the high temperature from each month and from that calculates a mean average.
def averageHighTemps(weatherDB):
highs = [month["high"] for month in weatherDB.values()]
avg = sum(highs) / len(weatherDB)
return avg
You would then probably want to put in your main() function:
print(f"The average high temperature was: {averageHighTemps(weatherDB)}")

Fitting an ellipse to points: why an ellipse fits for some datasets while for others there is an offset of 90 deg?

I have trouble fitting an ellipse to a set of 2D data points. The code seems to work for some datasets, while for others the ellipse is rotated 90 degrees but always has the right shape and center. I am trying to make the SAME code work for all the datasets. Here is the code (from https://github.com/ndvanforeest/fit_ellipse):
I have already looked at Fitting an ellipse to a set of 2-D points but I still have the problem.
import matplotlib.pyplot as plt
import numpy as np
import math
from numpy.linalg import eig, inv
from matplotlib.patches import Ellipse
def fitEllipse(x,y):
x = x[:,np.newaxis]
y = y[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = eig(np.dot(inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
return a
def ellipse_center(a):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
num = b*b-a*c
x0=(c*d-b*f)/num
y0=(a*f-b*d)/num
return np.array([x0,y0])
def ellipse_angle_of_rotation( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
return 0.5*np.arctan(2*b/(a-c))
def ellipse_axis_length( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1=np.sqrt(up/down1)
res2=np.sqrt(up/down2)
#Assert res1 is the major axis and res2 is the minor axis
if(res2 > res1):
tmp = res2
res2 = res1
res1 = tmp
return np.array([res1, res2])
def ellipse_angle_of_rotation2( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
if a > c:
return np.arctan(2*b/(a-c))/2
else:
return np.pi/2 + np.arctan(2*b/(a-c))/2
Here is an example of a dataset that does NOT work as ellipse is rotated 90 deg. You can see this in this plot https://i.stack.imgur.com/khpel.png:
# --------------------------------------------------------------------------
x = np.array([40.5, 40.5, 40.5, 40.5, 40.5, 40.5, 40.5, 40.5, 40.5, 40.5, 40.5, 40.5, 41.5, 41.5, 41.5, 41.5, 41.5, 41.5, 41.5, 41.5, 41.5, 42.5, 42.5, 42.5, 42.5, 42.5, 42.5, 42.5, 42.5, 42.5, 42.5, 43.5, 43.5, 43.5, 43.5, 43.5, 43.5, 44.5, 44.5, 44.5, 44.5, 44.5, 44.5, 44.5, 44.5, 45.5, 45.5, 45.5, 45.5, 45.5, 46.5, 46.5, 46.5, 46.5, 46.5, 46.5, 47.5, 47.5, 47.5, 47.5, 47.5, 47.5, 47.5, 48.5, 48.5, 48.5, 48.5, 49.5, 49.5, 49.5, 50.5, 50.5, 50.5, 50.5, 51.5, 51.5, 51.5, 51.5, 52.5, 52.5, 52.5, 52.5, 52.5, 53.5, 53.5, 53.5, 53.5, 53.5, 53.5, 54.5, 54.5, 54.5, 54.5, 54.5, 54.5, 55.5, 55.5, 55.5, 55.5, 55.5, 55.5, 55.5, 55.5, 56.5, 56.5, 56.5, 56.5, 56.5, 57.5, 57.5, 57.5, 57.5, 58.5, 58.5, 58.5, 58.5, 58.5, 58.5, 59.5, 59.5, 59.5, 59.5, 59.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 61.5])
y = np.array([42.5, 43.5, 46.5, 48.5, 49.5, 50.5, 51.5, 52.5, 54.5, 56.5, 57.5, 60.5, 41.5, 44.5, 45.5, 47.5, 53.5, 55.5, 57.5, 59.5, 60.5, 33.5, 37.5, 38.5, 39.5, 40.5, 58.5, 60.5, 62.5, 63.5, 64.5, 32.5, 34.5, 35.5, 36.5, 61.5, 64.5, 28.5, 29.5, 31.5, 65.5, 66.5, 67.5, 68.5, 71.5, 27.5, 30.5, 69.5, 70.5, 72.5, 24.5, 25.5, 26.5, 73.5, 74.5, 75.5, 20.5, 21.5, 22.5, 23.5, 76.5, 78.5, 79.5, 18.5, 19.5, 77.5, 79.5, 16.5, 17.5, 80.5, 16.5, 17.5, 81.5, 82.5, 18.5, 79.5, 80.5, 81.5, 19.5, 20.5, 21.5, 23.5, 78.5, 22.5, 24.5, 25.5, 75.5, 76.5, 77.5, 24.5, 26.5, 27.5, 28.5, 73.5, 74.5, 29.5, 30.5, 31.5, 32.5, 69.5, 70.5, 71.5, 72.5, 33.5, 65.5, 66.5, 67.5, 68.5, 34.5, 35.5, 36.5, 64.5, 37.5, 38.5, 59.5, 60.5, 61.5, 63.5, 39.5, 41.5, 57.5, 58.5, 62.5, 40.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 50.5])
And here is an example of a dataset for which the code works as you can see in this plot https://i.stack.imgur.com/aYTrS.png:
# -----------------------------------------------------------------------------
x = np.array([25.5, 25.5, 25.5, 26.5, 26.5, 26.5, 26.5, 27.5, 27.5, 27.5, 27.5, 28.5, 28.5, 29.5, 29.5, 30.5, 30.5, 31.5, 31.5, 32.5, 32.5, 33.5, 33.5, 34.5, 34.5, 35.5, 35.5, 36.5, 36.5, 37.5, 37.5, 38.5, 38.5, 39.5, 39.5, 40.5, 40.5, 41.5, 41.5, 42.5, 42.5, 43.5, 43.5, 44.5, 44.5, 45.5, 45.5, 46.5, 46.5, 47.5, 47.5, 48.5, 48.5, 49.5, 49.5, 49.5, 50.5, 50.5, 51.5, 51.5, 52.5, 52.5, 53.5, 53.5, 54.5, 54.5, 55.5, 55.5, 56.5, 56.5, 57.5, 57.5, 58.5, 58.5, 59.5, 59.5, 60.5, 60.5, 61.5, 61.5, 62.5, 62.5, 62.5, 63.5, 63.5, 64.5, 64.5, 65.5, 65.5, 66.5, 66.5, 66.5, 66.5, 67.5, 67.5, 67.5, 68.5, 68.5, 68.5, 69.5, 69.5, 70.5, 70.5, 70.5, 71.5, 71.5, 71.5, 71.5, 71.5, 71.5, 72.5, 72.5])
y = np.array([54.5, 55.5, 57.5, 50.5, 53.5, 56.5, 58.5, 50.5, 51.5, 52.5, 58.5, 49.5, 58.5, 48.5, 58.5, 48.5, 59.5, 47.5, 60.5, 46.5, 61.5, 45.5, 61.5, 44.5, 60.5, 44.5, 60.5, 43.5, 61.5, 42.5, 60.5, 42.5, 60.5, 42.5, 61.5, 41.5, 61.5, 40.5, 61.5, 40.5, 61.5, 39.5, 61.5, 39.5, 61.5, 39.5, 62.5, 38.5, 61.5, 38.5, 61.5, 38.5, 61.5, 37.5, 61.5, 62.5, 37.5, 60.5, 37.5, 61.5, 38.5, 60.5, 37.5, 60.5, 37.5, 60.5, 37.5, 60.5, 37.5, 59.5, 37.5, 59.5, 36.5, 58.5, 37.5, 57.5, 36.5, 58.5, 37.5, 57.5, 36.5, 37.5, 56.5, 38.5, 55.5, 38.5, 55.5, 37.5, 54.5, 38.5, 39.5, 53.5, 54.5, 38.5, 40.5, 52.5, 39.5, 40.5, 52.5, 41.5, 51.5, 40.5, 49.5, 50.5, 41.5, 42.5, 43.5, 44.5, 47.5, 48.5, 45.5, 46.5])
Here is the rest of the code:
s = fitEllipse(x,y)
center = ellipse_center(s)
phi = ellipse_angle_of_rotation2(s)
axes = ellipse_axis_length(s)
print("The angle of rotation is", (phi*180/np.pi))
# get the individual axes
a, b = axes
ell = Ellipse(center, 2*a, 2*b, phi* (180 / np.pi), facecolor='none', edgecolor='black' )
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
ax.add_artist(ell)
ell.set_clip_box(ax.bbox)
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
scat = plt.scatter(x, y, c = "r", s = 1)
plt.show()
I want to make the SAME code work for both datasets.
A very robust method is shown below (Valid any angle of rotation).
From the coefficients of the above equation it is easy to compute the coordinates of the center, the length of axes, the angle of rotation, the foci, ... of the ellipse. See Eqs. (15-21) in : https://mathworld.wolfram.com/Ellipse.html
More general information in : https://fr.scribd.com/doc/14819165/Regressions-coniques-quadriques-circulaire-spherique (Edited in French).
Note that this method is valid to fit hyperbola as well. Of course the method is far to be new. This can be found in general litterature about fitting polynomial functions.

Not Enough Arguments For an IF Statement...But There Are

When I run this equation in Excel it tells me that there is only 1 argument for the IF statement. I am not sure why it is saying that when I have 3 arguments. Within the OR statement I have 2 different AND statements. It works just fine if I get rid of the second AND statement. Did I mess up a parentheses somewhere? I am not sure what is wrong. Any help would be greatly appreciated. Thanks!
=IF(OR(ARRAYFORMULA(SUM(COUNTIF(B19:O19,{"I","Ip","Ia","It","Ih","A","Aa","Ap","At","Ah","X","R","Rt","Rx","Rp","Rh","K","Kt","E","Et","AL","HL","TV*","FFSL","ADM*"})))=10, AND(ARRAYFORMULA(SUM(COUNTIF(B19:O19,{"R-10","Rx-10*","Rp-10","Rt-10*","Rh-10","I-10","Ia-10","Ip-10","It-10","Ih-10","X-10*","A-10*","At-10"})))=4, ARRAYFORMULA(SUM(COUNTIF(B19:O19,{"I","Ip","Ia","It","Ih","A","Aa","Ap","At","Ah","X","R","Rt","Rx","Rp","Rh","K","Kt","E","Et","AL","HL","TV*","FFSL","ADM*"})))=5),AND(ARRAYFORMULA(SUM(COUNTIF(B19:O19,{"HL-9","X-9","N-9","E-9","J-9","Jh-9","Nh-9","Eh-9"})))=8,ARRAYFORMULA(SUM(COUNTIF(B19:O19,{"I","Ip","Ia","It","Ih","A","Aa","Ap","At","Ah","X","R","Rt","Rx","Rp","Rh","K","Kt","E","Et","AL","HL","TV*","FFSL","ADM*"})))=1) ,"80 Hours","Error"))
This question makes me think "If only there was an online Excel Formula Beautifier".
Oh look, there is.
If you copy-and-paste it into the beautifier you get the code below.
You can now see that your parameters "80 Hours", "Error" are parameters of the first ARRAYFORMULA function, not the IF function.
=IF(
OR(
ARRAYFORMULA(
SUM(
COUNTIF(
B19:O19,
{ "I",
"Ip",
"Ia",
"It",
"Ih",
"A",
"Aa",
"Ap",
"At",
"Ah",
"X",
"R",
"Rt",
"Rx",
"Rp",
"Rh",
"K",
"Kt",
"E",
"Et",
"AL",
"HL",
"TV*",
"FFSL",
"ADM*"
ARRAYROWSTOP)
ARRAYSTOP)
)
)
) = 10,
AND(
ARRAYFORMULA(
SUM(
COUNTIF(
B19:O19,
{ "R-10",
"Rx-10*",
"Rp-10",
"Rt-10*",
"Rh-10",
"I-10",
"Ia-10",
"Ip-10",
"It-10",
"Ih-10",
"X-10*",
"A-10*",
"At-10"
ARRAYROWSTOP)
ARRAYSTOP)
)
)
) = 4,
ARRAYFORMULA(
SUM(
COUNTIF(
B19:O19,
{ "I",
"Ip",
"Ia",
"It",
"Ih",
"A",
"Aa",
"Ap",
"At",
"Ah",
"X",
"R",
"Rt",
"Rx",
"Rp",
"Rh",
"K",
"Kt",
"E",
"Et",
"AL",
"HL",
"TV*",
"FFSL",
"ADM*"
ARRAYROWSTOP)
ARRAYSTOP)
)
)
) = 5
),
AND(
ARRAYFORMULA(
SUM(
COUNTIF(
B19:O19,
{ "HL-9",
"X-9",
"N-9",
"E-9",
"J-9",
"Jh-9",
"Nh-9",
"Eh-9"
ARRAYROWSTOP)
ARRAYSTOP)
)
)
) = 8,
ARRAYFORMULA(
SUM(
COUNTIF(
B19:O19,
{ "I",
"Ip",
"Ia",
"It",
"Ih",
"A",
"Aa",
"Ap",
"At",
"Ah",
"X",
"R",
"Rt",
"Rx",
"Rp",
"Rh",
"K",
"Kt",
"E",
"Et",
"AL",
"HL",
"TV*",
"FFSL",
"ADM*"
ARRAYROWSTOP)
ARRAYSTOP)
)
)
) = 1
),
"80 Hours",
"Error"
)
)

Resources