forked from lawrennd/gp
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathgpLogLikelihood.m
193 lines (184 loc) · 6.79 KB
/
gpLogLikelihood.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
function ll = gpLogLikelihood(model)
% GPLOGLIKELIHOOD Compute the log likelihood of a GP.
% FORMAT
% DESC computes the log likelihood of a data set given a GP model.
% ARG model : the GP model for which log likelihood is to be
% computed.
% RETURN ll : the log likelihood of the data in the GP model.
%
% SEEALSO : gpCreate, gpLogLikeGradients, modelLogLikelihood
%
% COPYRIGHT : Neil D. Lawrence, 2005, 2006, 2009
% GP
switch model.approx
case 'ftc'
% No approximation, just do a full computation on K.
% For very high D, we use the matrix S which is M*M'
if isfield(model, 'S')
ll = -0.5*(model.d*model.logDetK_uu + sum(sum(model.invK_uu.* ...
model.S)));
return;
end
ll = 0;
for i = 1:size(model.m, 2)
if ~isfield(model, 'isSpherical') | model.isSpherical
ll = ll -.5*model.logDetK_uu- .5*model.m(:, i)'*model.invK_uu*model.m(:, i);
else
if model.isMissingData
m = model.m(model.indexPresent{i}, i);
else
m = model.m(:, i);
end
ll = ll - .5*model.logDetK_uu(i) - .5*m'*model.invK_uu{i}*m;
end
end
case {'dtc', 'dtcvar'}
% Deterministic training conditional
if ~isfield(model, 'isSpherical') | model.isSpherical
E = model.K_uf*model.m;
EET = E*E';
if length(model.beta)==1
ll = -0.5*(model.d*(-(model.N-model.k)*log(model.beta) ...
- model.logDetK_uu +model.logdetA) ...
- (sum(sum(model.Ainv.*EET)) ...
-sum(sum(model.m.*model.m)))*model.beta);
if strcmp(model.approx, 'dtcvar')
ll = ll - model.d*0.5*sum(model.diagD);
end
else
error('Not implemented variable length beta yet.');
end
else
ll = 0;
for i = 1:model.d
ind = gpDataIndices(model, i);
e = model.K_uf(:, ind)*model.m(ind, i);
if length(model.beta)==1
ll = ll - 0.5*((-(model.N-model.k)*log(model.beta) ...
- model.logDetK_uu +model.logdetA(i)) ...
- (e'*model.Ainv{i}*e ...
-model.m(ind, i)'*model.m(ind, i))* ...
model.beta);
if(isnan(ll))
error('Log likelihood is NaN')
end
if strcmp(model.approx, 'dtcvar')
error('Not implemented dtcvar for non-spherical yet.');
end
else
error('Not implemented variable length beta yet.');
end
end
end
case 'fitc'
% Fully independent training conditional.
if ~isfield(model, 'isSpherical') | model.isSpherical
if length(model.beta)==1
if false
% This is the original objective
Dinvm = model.Dinv*model.m;
K_ufDinvm = model.K_uf*Dinvm;
ll = -0.5*(model.d*(sum(log(model.diagD))...
-(model.N-model.k)*log(model.beta) ...
+ model.detDiff)...
+ (sum(sum(Dinvm.*model.m))...
- sum(sum((model.Ainv*K_ufDinvm).*K_ufDinvm)))*model.beta);
ll = ll - 0.5*model.N*model.d*log(2*pi);
else
% This is objective to match Ed Snelson's code
ll = - model.d*(sum(log(diag(model.Lm))) + 0.5*(-(model.N - model.k)*log(model.beta)+(model.N*log(2*pi)+sum(log(model.diagD)))));
for i = 1:model.d
ll = ll - 0.5*model.beta*(model.scaledM(:, i)'*model.scaledM(:, i) ...
- model.bet(:, i)'*model.bet(:, i));
end
end
else
error('Variable length Beta not implemented yet.')
end
else
if length(model.beta)==1
if false
ll = 0;
for i = 1:model.d
ind = gpDataIndices(model, i);
Dinvm = model.Dinv{i}*model.m(ind, i);
K_ufDinvm = model.K_uf(:, ind)*Dinvm;
ll = ll -0.5*(sum(log(model.diagD{i})) ...
- (length(ind) - model.k)*log(model.beta) ...
+ model.detDiff(i) ...
+ (sum(sum(Dinvm.*model.m(ind, i))) ...
- sum(sum((model.Ainv{i}*K_ufDinvm).* ...
K_ufDinvm)))*model.beta ...
+length(ind)*log(2*pi));
end
else
% This is objective to match Ed Snelson's code
ll = 0;
for i = 1:model.d
ind = gpDataIndices(model, i);
ll = ll - (sum(log(diag(model.Lm{i}))) ...
+ 0.5*(-(length(ind) - model.k)*log(model.beta) ...
+(length(ind)*log(2*pi)+sum(log(model.diagD{i})))));
ll = ll - 0.5*model.beta*(model.scaledM{i}'*model.scaledM{i} ...
- model.bet{i}'*model.bet{i});
end
end
else
error('Variable length Beta not implemented yet.')
end
end
case 'pitc'
% Partially independent training conditional.
if ~isfield(model, 'isSpherical') | model.isSpherical
if length(model.beta)==1
ll = model.d*(model.logDetA-model.logDetK_uu +model.k*log(model.beta));
% Loop through the blocks computing each part to be added.
K_ufDinvm = zeros(model.k, model.d);
for i = 1:length(model.blockEnd)
ind = gpBlockIndices(model, i);
Dinvm{i} = model.Dinv{i}*model.m(ind, :);
K_ufDinvm = K_ufDinvm + model.K_uf(:, ind)*Dinvm{i};
end
ll = ll - model.beta*sum(sum((model.Ainv*K_ufDinvm).*K_ufDinvm));
for i = 1:length(model.blockEnd)
ind = gpBlockIndices(model, i);
ll = ll + model.d*(model.logDetD(i) ...
- length(ind)*log(model.beta))...
+ model.beta*sum(sum(Dinvm{i}.*model.m(ind, :)));
end
ll = -0.5*ll;
ll = ll - 0.5*model.N*model.d*log(2*pi);
else
error('Variable Length Beta not implemented yet.')
end
else
if length(model.beta)==1
ll = 0;
for j = 1:model.d
ll = ll + model.logDetA(j)-model.logDetK_uu + model.k*log(model.beta);
% Loop through the blocks computing each part to be added.
K_ufDinvm = zeros(model.k, 1);
for i = 1:length(model.blockEnd)
ind = gpDataIndices(model, j, i);
Dinvm{i, j} = model.Dinv{i, j}*model.m(ind, j);
K_ufDinvm = K_ufDinvm + model.K_uf(:, ind)*Dinvm{i, j};
end
ll = ll - model.beta*sum(sum((model.Ainv{i}*K_ufDinvm).*K_ufDinvm));
for i = 1:length(model.blockEnd)
ind = gpDataIndices(model, j, i);
ll = ll + model.logDetD(i, j) ...
- length(ind)*log(model.beta) ...
+ model.beta*sum(sum(Dinvm{i, j}.*model.m(ind, j)));
ll = ll + length(ind)*log(2*pi);
end
end
ll = -0.5*ll;
else
error('Variable Length Beta not implemented yet.');
end
end
end
if model.learnScales
ll = ll - sum(log(model.scale));
end
ll = ll - model.d*model.N/2*log(2*pi);