1 function [K1 KN ERR]=SeparateKernel(H)
\r
2 % This function SEPARATEKERNEL will separate ( do decomposition ) any
\r
3 % 2D, 3D or nD kernel into 1D kernels. Ofcourse only a sub-set of Kernels
\r
4 % are separable such as a Gaussian Kernel, but it will give least-squares
\r
5 % sollutions for non-separatable kernels.
\r
7 % Separating a 3D or 4D image filter in to 1D filters will give an large
\r
8 % speed-up in image filtering with for instance the function imfilter.
\r
10 % [K1 KN ERR]=SeparateKernel(H);
\r
13 % H : The 2D, 3D ..., ND kernel
\r
16 % K1 : Cell array with the 1D kernels
\r
17 % KN : Approximation of the ND input kernel by the 1D kernels
\r
18 % ERR : The sum of absolute difference between approximation and input kernel
\r
21 % How the algorithm works:
\r
22 % If we have a separable kernel like
\r
28 % We like to solve unknow 1D kernels,
\r
29 % a=[a(1) a(2) a(3)]
\r
30 % b=[b(1) b(2) b(3)]
\r
36 % --------------------
\r
37 % a(1)|h(1,1) h(1,2) h(1,3)
\r
38 % a(2)|h(2,1) h(2,2) h(2,3)
\r
39 % a(3)|h(3,1) h(3,2) h(3,3)
\r
42 % h(1,1) == a(1)*b(1)
\r
43 % h(2,1) == a(2)*b(1)
\r
44 % h(3,1) == a(3)*b(1)
\r
45 % h(4,1) == a(1)*b(2)
\r
48 % We want to solve this by using fast matrix (least squares) math,
\r
52 % c a column vector with all kernel values H
\r
53 % d a column vector with the unknown 1D kernels
\r
55 % But matrices "add" values and we have something like h(1,1) == a(1)*b(1);
\r
56 % We solve this by taking the log at both sides
\r
57 % (We replace zeros by a small value. Whole lines/planes of zeros are
\r
58 % removed at forehand and re-added afterwards)
\r
60 % log( h(1,1) ) == log(a(1)) + log b(1))
\r
62 % The matrix is something like this,
\r
64 % a1 a2 a3 b1 b2 b3
\r
65 % M = [1 0 0 1 0 0; h11
\r
75 % Least squares solution
\r
78 % with the 1D kernels
\r
80 % [a(1);a(2);a(3);b(1);b(2);b(3)] = d
\r
82 % The Problem of Negative Values!!!
\r
84 % The log of a negative value is possible it gives a complex value, log(-1) = i*pi
\r
85 % if we take the expontential it is back to the old value, exp(i*pi) = -1
\r
87 % But if we use the solver with on of the 1D vectors we get something like, this :
\r
89 % input result abs(result) angle(result)
\r
90 % -1 -0.0026 + 0.0125i 0.0128 1.7744
\r
91 % 2 0.0117 + 0.0228i 0.0256 1.0958
\r
92 % -3 -0.0078 + 0.0376i 0.0384 1.7744
\r
93 % 4 0.0234 + 0.0455i 0.0512 1.0958
\r
94 % 5 0.0293 + 0.0569i 0.0640 1.0958
\r
96 % The absolute value is indeed correct (difference in scale is compensated
\r
97 % by the order 1D vectors)
\r
99 % As you can see the angle is correlated with the sign of the values. But I
\r
100 % didn't found the correlation yet. For some matrices it is something like
\r
102 % sign=mod(angle(solution)*scale,pi) == pi/2;
\r
104 % In the current algorithm, we just flip the 1D kernel values one by one.
\r
105 % The sign change which gives the smallest error is permanently swapped.
\r
106 % Until swapping signs no longer decreases the error
\r
109 % a=permute(rand(5,1),[1 2 3 4])-0.5;
\r
110 % b=permute(rand(5,1),[2 1 3 4])-0.5;
\r
111 % c=permute(rand(5,1),[3 2 1 4])-0.5;
\r
112 % d=permute(rand(5,1),[4 2 3 1])-0.5;
\r
113 % H = repmat(a,[1 5 5 5]).*repmat(b,[5 1 5 5]).*repmat(c,[5 5 1 5]).*repmat(d,[5 5 5 1]);
\r
114 % [K,KN,err]=SeparateKernel(H);
\r
115 % disp(['Summed Absolute Error between Real and approximation by 1D filters : ' num2str(err)]);
\r
117 % a=permute(rand(3,1),[1 2 3])-0.5;
\r
118 % b=permute(rand(3,1),[2 1 3])-0.5;
\r
119 % c=permute(rand(3,1),[3 2 1])-0.5;
\r
120 % H = repmat(a,[1 3 3]).*repmat(b,[3 1 3 ]).*repmat(c,[3 3 1 ])
\r
121 % [K,KN,err]=SeparateKernel(H); err
\r
123 % a=permute(rand(4,1),[1 2 3])-0.5;
\r
124 % b=permute(rand(4,1),[2 1 3])-0.5;
\r
125 % H = repmat(a,[1 4]).*repmat(b,[4 1]);
\r
126 % [K,KN,err]=SeparateKernel(H); err
\r
128 % Function is written by D.Kroon, uses "log idea" from A. J. Hendrikse,
\r
129 % University of Twente (July 2010)
\r
132 % We first make some structure which contains information about
\r
133 % the transformation from kernel to 1D kernel array, number of dimensions
\r
135 data=InitializeDataStruct(H);
\r
137 % Make the matrix of c = M * d;
\r
138 M=makeMatrix(data);
\r
140 % Solve c = M * d with least squares
\r
141 warning('off','MATLAB:rankDeficientMatrix');
\r
142 par=exp(M\log(abs(data.H(:))));
\r
144 % Improve the values by solving the remaining difference
\r
145 KN = Filter1DtoFilterND(par,data);
\r
146 par2=exp(M\log(abs(KN(:)./data.H(:))));
\r
149 % Change the sign of a 1D filtering value if it decrease the error
\r
150 par = FilterCorrSign(par,data);
\r
152 % Split the solution d in separate 1D kernels
\r
153 K1 = ValueList2Filter1D(par,data);
\r
155 % Re-add the removed zero rows/planes to the 1D vectors
\r
156 K1=re_add_zero_rows(data,K1);
\r
158 % Calculate the approximation of the ND kernel if using the 1D kernels
\r
159 KN = Filter1DtoFilterND(par,data,K1);
\r
161 % Calculate the absolute error
\r
162 ERR =sum(abs(H(:)-KN(:)));
\r
164 function par = FilterCorrSign(par,data)
\r
165 Ert=zeros(1,length(par));
\r
167 par=sign(rand(size(par))-0.5).*par;
\r
169 % Calculate the approximation of the ND kernel if using the 1D kernels
\r
170 KN = Filter1DtoFilterND(par,data);
\r
171 % Calculate the absolute error
\r
172 ERR =sum(abs(data.H(:)-KN(:)));
\r
173 % Flip the sign of every 1D filter value, and look if the error
\r
175 for i=1:length(par)
\r
176 par2=par; par2(i)=-par2(i);
\r
177 KN = Filter1DtoFilterND(par2,data);
\r
178 Ert(i) =sum(abs(data.H(:)-KN(:)));
\r
180 % Flip the sign of the 1D filter value with the largest improvement
\r
181 [t,j]=min(Ert); if(t<ERR), par(j)=-par(j); end
\r
184 function data=InitializeDataStruct(H)
\r
185 data.sizeHreal=size(H);
\r
186 data.nreal=ndims(H);
\r
187 [H,preserve_zeros]=remove_zero_rows(H);
\r
190 data.preserve_zeros=preserve_zeros;
\r
192 data.sizeH=size(data.H);
\r
193 data.sep_parb=cumsum([1 data.sizeH(1:data.n-1)]);
\r
194 data.sep_pare=cumsum(data.sizeH);
\r
195 data.sep_parl=data.sep_pare-data.sep_parb+1;
\r
196 data.par=(1:numel(H))+1;
\r
198 function [H,preserve_zeros]=remove_zero_rows(H)
\r
199 % Remove whole columns/rows/planes with zeros,
\r
200 % because we know at forehand that they will give a kernel 1D value of 0
\r
201 % and will otherwise increase the error in the end result.
\r
202 preserve_zeros=zeros(numel(H),2); pz=0;
\r
205 H2D=reshape(H,size(H,1),[]);
\r
206 check_zero=~any(H2D,2);
\r
207 if(any(check_zero))
\r
208 zero_rows=find(check_zero);
\r
209 for j=1:length(zero_rows)
\r
211 preserve_zeros(pz,:)=[i zero_rows(j)];
\r
212 sizeH(1)=sizeH(1)-1;
\r
214 H2D(check_zero,:)=[];
\r
215 H=reshape(H2D,sizeH);
\r
218 sizeH=circshift(sizeH,[0 -1]);
\r
219 H=reshape(H,sizeH);
\r
221 preserve_zeros=preserve_zeros(1:pz,:);
\r
223 function K1=re_add_zero_rows(data,K1)
\r
224 % Re-add the 1D kernel values responding to a whole column/row or plane
\r
226 for i=1:size(data.preserve_zeros,1)
\r
227 di=data.preserve_zeros(i,1);
\r
228 pos=data.preserve_zeros(i,2);
\r
229 if(di>length(K1)), K1{di}=1; end
\r
232 val=[val(1:pos-1);0;val(pos:end)];
\r
233 dim=ones(1,data.nreal); dim(di)=length(val);
\r
234 K1{di}=reshape(val,dim);
\r
237 function M=makeMatrix(data)
\r
238 M = zeros(numel(data.H),sum(data.sizeH));
\r
239 K1 = (1:numel(data.H))';
\r
241 p=data.par(data.sep_parb(i):data.sep_pare(i)); p=p(:);
\r
242 dim=ones(1,data.n); dim(i)=data.sep_parl(i);
\r
243 Ki=reshape(p(:),dim);
\r
244 dim=data.sizeH; dim(i)=1;
\r
245 K2=repmat(Ki,dim)-1;
\r
246 M(sub2ind(size(M),K1(:),K2(:)))=1;
\r
249 function Kt = Filter1DtoFilterND(par,data,K1)
\r
251 Kt=ones(data.sizeH);
\r
253 p=par(data.sep_parb(i):data.sep_pare(i)); p=p(:);
\r
254 dim=ones(1,data.n); dim(i)=data.sep_parl(i);
\r
255 Ki=reshape(p(:),dim);
\r
256 dim=data.sizeH; dim(i)=1;
\r
257 Kt=Kt.*repmat(Ki,dim);
\r
260 Kt=ones(data.sizeHreal);
\r
262 dim=data.sizeHreal; dim(i)=1;
\r
263 Kt=Kt.*repmat(K1{i},dim);
\r
267 function K = ValueList2Filter1D(par,data)
\r
270 p=par(data.sep_parb(i):data.sep_pare(i)); p=p(:);
\r
271 dim=ones(1,data.n); dim(i)=data.sep_parl(i);
\r
272 K{i}=reshape(p(:),dim);
\r