I am trying to plot the Pareto front of a TuneMultiCritResult object, tuned with a control object of class TuneMultiCritControlMBO:
# multi-criteria optimization of (tpr, fpr) with MBO
lrn = makeLearner("classif.ksvm")
rdesc = makeResampleDesc("Holdout")
ps = makeParamSet(
makeNumericParam("C", lower = -12, upper = 12, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -12, upper = 12, trafo = function(x) 2^x)
)
ctrl = makeTuneMultiCritControlMBO()
res = tuneParamsMultiCrit(lrn, sonar.task, rdesc, par.set = ps,
measures = list(tpr, fpr), control = ctrl)
Printing the object res gives the following:
> res
Tune multicrit result:
Points on front: 14
> res$ind
[1] 1 2 4 5 6 7 9 11 12 14 15 16 17 18
But the length of the optimization path saved in res$opt.path only has 10 points, the ones proposed by MBO I guess.
> res$opt.path
Optimization path
Dimensions: x = 2/2, y = 2
Length: 10
Add x values transformed: FALSE
Error messages: TRUE. Errors: 0 / 10.
Exec times: TRUE. Range: 0.031 - 0.041. 0 NAs.
Since the function plotTuneMultiCritResult relies on the objects res$ind and res$opt.path to print the front, it shows weird results.
I think that the correct way to go is to copy the optimization path of the object res$mbo.result$opt.path into res$opt.path, but my question is: What's the point of having different optimization paths in res$opt.path and res$mbo.result$opt.path?
Thanks!!
VĂctor
Using mlr_2.13 and mlrMBO_1.1.3 and the following code everything works like expected. I suggeset that you use the MBO Control object to specify how much iterations your optimization should have. Otherwise a default (4*d evaluations for the initial design + 10 iterations) will be used.
set.seed(1)
library(mlr)
library(mlrMBO)
# multi-criteria optimization of (tpr, fpr) with MBO
lrn = makeLearner("classif.ksvm")
rdesc = makeResampleDesc("Holdout")
ps = makeParamSet(
makeNumericParam("C", lower = -12, upper = 12, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -12, upper = 12, trafo = function(x) 2^x)
)
mbo.ctrl = makeMBOControl(n.objectives = 2)
mbo.ctrl = setMBOControlTermination(mbo.ctrl, iters = 20)
ctrl = makeTuneMultiCritControlMBO(n.objectives = 2)
res = tuneParamsMultiCrit(lrn, sonar.task, rdesc, par.set = ps,
measures = list(tpr, fpr), control = ctrl)
plotTuneMultiCritResult(res = res, path = FALSE) # path = FALSE would only shows the Pareto Front
Related
I have a code that defines a function, then I try to use the variables I defined within that function in another expression. When I do this, I get an error that is:
Undefined function or variable 'phi'.
I'm not sure why phi is undefined, since I have it in the if/else statement.
It might be better to explain with my (shortened) code:
global I11 I22 I33 Mx My Mz w10 w20 w30 eps10 eps20 eps30 eps40...
C110 C120 C130 C210 C220 C230 C310 C320 C330 IC K0
IC = [w10 w20 w30...
eps10 eps20 eps30 eps40...
C110 C120 C130 C210 C220 C230 C310 C320 C330];
opts = odeset('RelTol', 1*10^(-10),'AbsTol', 1*10^(-10));
[t, y] = ode45(#(t,y) DynEqn2(t,y,I11,I22,I33,Mx,My,Mz), [ti tf], IC, opts);
N = sqrt(sum(y(:,4:7).^2,2));
kap = acosd(1-2*y(:,5).^2-2*y(:,7).^2);
phi1 = acosd((2.*(y(:,4).*y(:,5)+y(:,6).*y(:,7)))/sind(kap));
phi2 = asind((2.*(y(:,6).*y(:,4)-y(:,5).*y(:,7)))/sind(kap));
if phi1==phi2
phi = phi1;
elseif phi1==180-phi2
phi = phi1;
elseif -phi1==phi2
phi = -phi1;
elseif -phi1==180-phi2
phi = -phi1;
else
disp('Something is wrong with phi')
end
figure (1)
plot(t,phi)
figure (2)
plot(t,kap)
function soln = DynEqn2(t,y,I11,I22,I33,Mx,My,Mz)
w1 = y(1);
w2 = y(2);
w3 = y(3);
eps1 = y(4);
eps2 = y(5);
eps3 = y(6);
eps4 = y(7);
C11 = y(8);
C12 = y(9);
C13 = y(10);
C21 = y(11);
C22 = y(12);
C23 = y(13);
C31 = y(14);
C32 = y(15);
C33 = y(16);
w1_dot = (Mx - w2*w3*(I33-I22))/I11;
w2_dot = (My - w1*w3*(I11-I33))/I22;
w3_dot = (Mz - w1*w2*(I22-I11))/I33;
eps1_dot = .5*(w1*eps4-w2*eps3+w3*eps2);
eps2_dot = .5*(w1*eps3+w2*eps4-w3*eps1);
eps3_dot = .5*(-w1*eps2+w2*eps1+w3*eps4);
eps4_dot = -.5*(w1*eps1+w2*eps2+w3*eps3);
C11_dot = C12*w3-C13*w2;
C12_dot = C13*w1-C11*w3;
C13_dot = C11*w2-C12*w1;
C21_dot = C22*w3-C23*w2;
C22_dot = C23*w1-C21*w3;
C23_dot = C21*w2-C22*w1;
C31_dot = C32*w3-C33*w2;
C32_dot = C33*w1-C31*w3;
C33_dot = C31*w2-C32*w1;
soln = [w1_dot; w2_dot; w3_dot; ...
eps1_dot; eps2_dot; eps3_dot; eps4_dot; ...
C11_dot; C12_dot; C13_dot; C21_dot; C22_dot; C23_dot; C31_dot; C32_dot; C33_dot];
end
My lines where I calculate phi1, phi2, and then the if/else statement to find phi, are what I am struggling with.
I made sure that the variables defined in the function work, so for example, in the command window I typed in 'y(:,4)' and got the correct output. But whenever I try to use this within the functions i.e. 'phi1', it repeatedly outputs an incorrect value of '90.0000' until I stop it.
Where I define the 'N' variable, it is something similar, yet that one works without errors.
Does anyone have any ideas how to amend this issue?
Any help is appreciated, thanks.
Edit: The complete error message is as follows:
Undefined function or variable 'phi'.
Error in HW6_Q1 (line 85)
plot(t,phi)
I figured out my solution with the help from a colleague not on Stack Overflow.
I forgot the ./, which turned my phi into a matrix, rather than a vector which is what I wanted from it.
I am learning about speech recognition recently, and I have learned that the idea of prefix beam search is to merge paths with the same prefix, such as [1,1,_] and [_,1,_] (as you can see, _ indicates blank mark).
Based on this understanding, I implemented a version of mine, which can be simplified using pseudo code like this:
def prefix_beam_search(y, beam_size, blank):
seq_len, n_class = y.shape
logY = np.log(y)
beam = [([], 0)]
for t in range(seq_len):
buff = []
for prefix, p in beam:
for i in range(n_class):
new_prefix = list(prefix) + [i]
new_p = p + logY[t][i]
buff.append((new_prefix, new_p))
# merge the paths with same prefix'
new_beam = defaultdict(lambda: ninf)
for prefix, p in buff:
# 'norm_prefix' can simplify the path, [1,1,_,2] ==> [1,2]
# However, the ending 'blank' is retained, [1,1,_] ==> [1,_]
prefix = norm_prefix(prefix, blank)
new_beam[prefix] = logsumexp(new_beam[prefix], p)
# choose the best paths
new_beam = sorted(new_beam.items(), key=lambda x: x[1], reverse=True)
beam = new_beam[: beam_size]
return beam
But most of the versions I found online (according to the paper) are like this:
def _prefix_beam_decode(y, beam_size, blank):
T, V = y.shape
log_y = np.log(y)
beam = [(tuple(), (0, ninf))]
for t in range(T):
new_beam = defaultdict(lambda: (ninf, ninf))
for prefix, (p_b, p_nb) in beam:
for i in range(V):
p = log_y[t, i]
if i == blank:
new_p_b, new_p_nb = new_beam[prefix]
new_p_b = logsumexp(new_p_b, p_b + p, p_nb + p)
new_beam[prefix] = (new_p_b, new_p_nb)
continue
end_t = prefix[-1] if prefix else None
new_prefix = prefix + (i,)
new_p_b, new_p_nb = new_beam[new_prefix]
if i != end_t:
new_p_nb = logsumexp(new_p_nb, p_b + p, p_nb + p)
else:
new_p_nb = logsumexp(new_p_nb, p_b + p)
new_beam[new_prefix] = (new_p_b, new_p_nb)
if i == end_t:
new_p_b, new_p_nb = new_beam[prefix]
new_p_nb = logsumexp(new_p_nb, p_nb + p)
new_beam[prefix] = (new_p_b, new_p_nb)
beam = sorted(new_beam.items(), key=lambda x: logsumexp(*x[1]), reverse=True)
beam = beam[:beam_size]
return beam
The results of the two are different, and my version tends to return longer strings. And I don't quite understand the main two aspects:
Are there any details of my version that are not thoughtful?
The common version while generate new prefix by new_prefix = prefix + (i,) regardless of whether the end of the previous are the same as the given 's'. For example, the old prefix is [a,a,b] and when a new character s is added, both [a,a,b] and [a,a,b,b] are saved. What is the purpose if this? And does it cause double counting?
Looking forward to your answer, thanks in advance!
When you choose the best paths in your code, you don't want to differentiate between [1,_] and [1] since both correspond to the same prefix [1].
If you have for example:
[1], [1,_], [1,2]
then you want the probability of [1] and [1,_] both to have the sum of the two.
probability([1]) = probability([1])+probability([1,_])
probability([1,_]) = probability([1])+probability([1,_])
And after sorting with these probabilities, you may want to keep so many that the number of true prefixes is beam_size.
For example, you have [1], [1,_], [2], [3].
Of which probabilities are: 0.1, 0.08, 0.11, 0.15
Then the probabilities with which you want to sort them are:
0.18, 0.18, 0.11, 0.15, respectively (0.18 = 0.1 + 0.08)
Sorted: [1]:0.18, [1,_]: 0.18, [3]:0.15, [2]:0.11
And if you have beam_size 2, for example, then you may want to keep
[1], [1,_] and [3] so that you have 2 prefixes in your beam, because [1] and [1,_] count as the same prefix (as long as the next character is not 1 - that's why we keep track of [1] and [1,_] separately).
I have a function which I have defined as follows, you can see that it clearly requires 7 arguments;
def calc_z(w,S,var,a1,a2,yt1,yt2):
mu = w*S
sigma = mt.sqrt(var)
z = np.random.normal(mu,sigma)
u = [a1,a2,z]
yt = [yt1,yt2,1]
thetaset = np.random.rand(len(u))
m = [i for i in range(len(u))]
max_iter = 30
#Calculate E-step
for i in range(max_iter):
print 'Iteration:', i
print 'z:', z
print 'thetaset', thetaset
devLz = eq6(var,w,S,z,yt,u,thetaset,m)
dev2Lz2 = eq9(var,thetaset,u)
#Calculate M-Step
z = z - (devLz / dev2Lz2)
w = lambdaw * z
for i in range(len(thetaset)):
devLTheta = eq7(yt,u,thetaset,lambdatheta)
dev2LTheta2 = eq10(thetaset,u,lambdatheta)
thetaset = thetaset - (devLTheta / dev2LTheta2)
return z
I am using pyspark so I convert this to a udf
calc_z_udf = udf(calc_z,FloatType())
and then run it as follows (where I am clearly passing in 7 arguments - Or am I going mad!?);
data = data.withColumn('z', calc_z_udf(data['w'],data['Org_Depth_Diff_S'],data['var'],data['proximity_rank_a1'],data['cotravel_count_a2'],data['cotravel_yt1'],data['proximity_yt2']))
When I run this however I am getting an error which states:
TypeError: calc_z() takes exactly 7 arguments (6 given)
Could anyone help me with why this might be as it is clear that when I am running the function I am infact passing in 7 arguments and not 6 as the error states?
I am not sure its is the reason no need to send column objects you can just pass strings:
data = data.withColumn('z', calc_z_udf('w', 'Org_Depth_Diff_S','var', 'proximity_rank_a1', 'cotravel_count_a2', 'cotravel_yt1', 'proximity_yt2'))
I'm trying to calculate the precision and recall using the nltk.metrics.score (http://www.nltk.org/_modules/nltk/metrics/scores.html) with my NLTK.NaiveBayesClassifier.
However, I stumble upon the error:
"unsupported operand type(s) for +: 'int' and 'NoneType".
which I suspect is from my 10-fold cross-validation where in some reference sets, there are zero negative (the data set is a bit imbalanced where 87% of it is positive).
According to nltk.metrics.score,
def precision(reference, test):
"Given a set of reference values and a set of test values, return
the fraction of test values that appear in the reference set.
In particular, return card(``reference`` intersection
``test``)/card(``test``).
If ``test`` is empty, then return None."
It seems that some of my 10-fold set is returning recall as None since there are no Negative in the reference set. Any idea on how to approach this problem?
My full code is as follow:
trainfeats = negfeats + posfeats
n = 10 # 5-fold cross-validation
subset_size = len(trainfeats) // n
accuracy = []
pos_precision = []
pos_recall = []
neg_precision = []
neg_recall = []
pos_fmeasure = []
neg_fmeasure = []
cv_count = 1
for i in range(n):
testing_this_round = trainfeats[i*subset_size:][:subset_size]
training_this_round = trainfeats[:i*subset_size] + trainfeats[(i+1)*subset_size:]
classifier = NaiveBayesClassifier.train(training_this_round)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testing_this_round):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
cv_accuracy = nltk.classify.util.accuracy(classifier, testing_this_round)
cv_pos_precision = precision(refsets['Positive'], testsets['Positive'])
cv_pos_recall = recall(refsets['Positive'], testsets['Positive'])
cv_pos_fmeasure = f_measure(refsets['Positive'], testsets['Positive'])
cv_neg_precision = precision(refsets['Negative'], testsets['Negative'])
cv_neg_recall = recall(refsets['Negative'], testsets['Negative'])
cv_neg_fmeasure = f_measure(refsets['Negative'], testsets['Negative'])
accuracy.append(cv_accuracy)
pos_precision.append(cv_pos_precision)
pos_recall.append(cv_pos_recall)
neg_precision.append(cv_neg_precision)
neg_recall.append(cv_neg_recall)
pos_fmeasure.append(cv_pos_fmeasure)
neg_fmeasure.append(cv_neg_fmeasure)
cv_count += 1
print('---------------------------------------')
print('N-FOLD CROSS VALIDATION RESULT ' + '(' + 'Naive Bayes' + ')')
print('---------------------------------------')
print('accuracy:', sum(accuracy) / n)
print('precision', (sum(pos_precision)/n + sum(neg_precision)/n) / 2)
print('recall', (sum(pos_recall)/n + sum(neg_recall)/n) / 2)
print('f-measure', (sum(pos_fmeasure)/n + sum(neg_fmeasure)/n) / 2)
print('')
Perhaps not the most elegant, but guess the most simple fix would be setting it to 0 and the actual value if not None, e.g.:
cv_pos_precision = 0
if precision(refsets['Positive'], testsets['Positive']):
cv_pos_precision = precision(refsets['Positive'], testsets['Positive'])
And for the others as well, of course.
I am using R to extract tweets and analyse their sentiment, however when I get to the lines below I get an error saying "Object of type 'closure' is not subsettable"
scores$drink = factor(rep(c("east"), nd))
scores$very.pos = as.numeric(scores$score >= 2)
scores$very.neg = as.numeric(scores$score <= -2)
Full code pasted below
load("twitCred.Rdata")
east_tweets <- filterStream("tweetselnd.json", locations = c(-0.10444, 51.408699, 0.33403, 51.64661),timeout = 120, oauth = twitCred)
tweets.df <- parseTweets("tweetselnd.json", verbose = FALSE)
##function score.sentiment
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
# Parameters
# sentences: vector of text to score
# pos.words: vector of words of postive sentiment
# neg.words: vector of words of negative sentiment
# .progress: passed to laply() to control of progress bar
scores = laply(sentences,
function(sentence, pos.words, neg.words)
{
# remove punctuation
sentence = gsub("[[:punct:]]", "", sentence)
# remove control characters
sentence = gsub("[[:cntrl:]]", "", sentence)
# remove digits?
sentence = gsub('\\d+', '', sentence)
# define error handling function when trying tolower
tryTolower = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
# use tryTolower with sapply
sentence = sapply(sentence, tryTolower)
# split sentence into words with str_split (stringr package)
word.list = str_split(sentence, "\\s+")
words = unlist(word.list)
# compare words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# get the position of the matched term or NA
# we just want a TRUE/FALSE
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# final score
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
# data frame with scores for each sentence
scores.df = data.frame(text=sentences, score=scores)
return(scores.df)
}
pos = readLines(file.choose())
neg = readLines(file.choose())
east_text = sapply(east_tweets, function(x) x$getText())
scores = score.sentiment(tweetseldn.json, pos, neg, .progress='text')
scores()$drink = factor(rep(c("east"), nd))
scores()$very.pos = as.numeric(scores()$score >= 2)
scores$very.neg = as.numeric(scores$score <= -2)
# how many very positives and very negatives
numpos = sum(scores$very.pos)
numneg = sum(scores$very.neg)
# global score
global_score = round( 100 * numpos / (numpos + numneg) )
If anyone could help with as to why I'm getting this error it will be much appreciated. Also I've seen other answeres about adding '()' when referring to the variable 'scores' such as scores()$.... but it hasn't worked for me. Thank you.
The changes below got rid of the error:
x <- scores
x$drink = factor(rep(c("east"), nd))
x$very.pos = as.numeric(x$score >= 2)
x$very.neg = as.numeric(x$score <= -2)