Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
O
openstructure
Manage
Activity
Members
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Releases
Container registry
Model registry
Analyze
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
schwede
openstructure
Commits
af4add3e
Commit
af4add3e
authored
10 years ago
by
Studer Gabriel
Browse files
Options
Downloads
Patches
Plain Diff
remove levenberg-marquardt algorithm
parent
fd1eba91
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
modules/img/alg/src/CMakeLists.txt
+0
-1
0 additions, 1 deletion
modules/img/alg/src/CMakeLists.txt
modules/img/alg/src/levenberg_marquardt.h
+0
-177
0 additions, 177 deletions
modules/img/alg/src/levenberg_marquardt.h
with
0 additions
and
178 deletions
modules/img/alg/src/CMakeLists.txt
+
0
−
1
View file @
af4add3e
...
@@ -59,7 +59,6 @@ gaussian.hh
...
@@ -59,7 +59,6 @@ gaussian.hh
gaussian_laplacian.hh
gaussian_laplacian.hh
highest_peak_search_3d.hh
highest_peak_search_3d.hh
histogram.hh
histogram.hh
levenberg_marquardt.h
line_iterator.hh
line_iterator.hh
local_sigma_threshold.hh
local_sigma_threshold.hh
mask_image.hh
mask_image.hh
...
...
This diff is collapsed.
Click to expand it.
modules/img/alg/src/levenberg_marquardt.h
deleted
100644 → 0
+
0
−
177
View file @
fd1eba91
// Copyright (c) 2007, 2008, 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// A simple implementation of levenberg marquardt.
//
// [1] K. Madsen, H. Nielsen, O. Tingleoff. Methods for Non-linear Least
// Squares Problems.
// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
//
// TODO(keir): Cite the Lourakis' dogleg paper.
#ifndef LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
#define LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
#include
<cmath>
#include
<Eigen/Core>
#include
<Eigen/Eigenvalues>
namespace
ost
{
namespace
img
{
namespace
alg
{
template
<
typename
Function
,
typename
Jacobian
,
typename
Solver
=
Eigen
::
JacobiSVD
<
Eigen
::
Matrix
<
typename
Function
::
FMatrixType
::
RealScalar
,
Function
::
XMatrixType
::
RowsAtCompileTime
,
Function
::
XMatrixType
::
RowsAtCompileTime
>
>
>
class
LevenbergMarquardt
{
public:
typedef
typename
Function
::
XMatrixType
::
RealScalar
Scalar
;
typedef
typename
Function
::
FMatrixType
FVec
;
typedef
typename
Function
::
XMatrixType
Parameters
;
typedef
Eigen
::
Matrix
<
typename
Function
::
FMatrixType
::
RealScalar
,
Function
::
FMatrixType
::
RowsAtCompileTime
,
Function
::
XMatrixType
::
RowsAtCompileTime
>
JMatrixType
;
typedef
Eigen
::
Matrix
<
typename
JMatrixType
::
RealScalar
,
JMatrixType
::
ColsAtCompileTime
,
JMatrixType
::
ColsAtCompileTime
>
AMatrixType
;
// TODO(keir): Some of these knobs can be derived from each other and
// removed, instead of requiring the user to set them.
enum
Status
{
RUNNING
,
GRADIENT_TOO_SMALL
,
// eps > max(J'*f(x))
RELATIVE_STEP_SIZE_TOO_SMALL
,
// eps > ||dx|| / ||x||
ERROR_TOO_SMALL
,
// eps > ||f(x)||
HIT_MAX_ITERATIONS
,
};
LevenbergMarquardt
(
const
Function
&
f
)
:
f_
(
f
),
df_
(
f
)
{}
struct
SolverParameters
{
SolverParameters
()
:
gradient_threshold
(
1e-20
),
relative_step_threshold
(
1e-20
),
error_threshold
(
1e-16
),
initial_scale_factor
(
1e-3
),
max_iterations
(
200
)
{}
Scalar
gradient_threshold
;
// eps > max(J'*f(x))
Scalar
relative_step_threshold
;
// eps > ||dx|| / ||x||
Scalar
error_threshold
;
// eps > ||f(x)||
Scalar
initial_scale_factor
;
// Initial u for solving normal equations.
int
max_iterations
;
// Maximum number of solver iterations.
};
struct
Results
{
Scalar
error_magnitude
;
// ||f(x)||
Scalar
gradient_magnitude
;
// ||J'f(x)||
int
iterations
;
Status
status
;
};
Status
Update
(
const
Parameters
&
x
,
const
SolverParameters
&
params
,
JMatrixType
*
J
,
AMatrixType
*
A
,
FVec
*
error
,
Parameters
*
g
)
{
*
J
=
df_
(
x
);
*
A
=
(
*
J
).
transpose
()
*
(
*
J
);
*
error
=
-
f_
(
x
);
*
g
=
(
*
J
).
transpose
()
*
*
error
;
if
(
g
->
array
().
abs
().
maxCoeff
()
<
params
.
gradient_threshold
)
{
return
GRADIENT_TOO_SMALL
;
}
else
if
(
error
->
norm
()
<
params
.
error_threshold
)
{
return
ERROR_TOO_SMALL
;
}
return
RUNNING
;
}
Results
minimize
(
Parameters
*
x_and_min
)
{
SolverParameters
params
;
return
minimize
(
params
,
x_and_min
);
}
Results
minimize
(
const
SolverParameters
&
params
,
Parameters
*
x_and_min
)
{
Parameters
&
x
=
*
x_and_min
;
JMatrixType
J
;
AMatrixType
A
;
FVec
error
;
Parameters
g
;
Results
results
;
results
.
status
=
Update
(
x
,
params
,
&
J
,
&
A
,
&
error
,
&
g
);
Scalar
u
=
Scalar
(
params
.
initial_scale_factor
*
A
.
diagonal
().
maxCoeff
());
Scalar
v
=
2
;
Parameters
dx
,
x_new
;
int
i
;
for
(
i
=
0
;
results
.
status
==
RUNNING
&&
i
<
params
.
max_iterations
;
++
i
)
{
// LOG(INFO) << "iteration: " << i;
// LOG(INFO) << "||f(x)||: " << f_(x).norm();
// LOG(INFO) << "max(g): " << g.array().abs().maxCoeff();
// LOG(INFO) << "u: " << u;
// LOG(INFO) << "v: " << v;
AMatrixType
A_augmented
=
A
+
u
*
AMatrixType
::
Identity
(
J
.
cols
(),
J
.
cols
());
Solver
solver
(
A_augmented
,
Eigen
::
ComputeThinU
|
Eigen
::
ComputeThinV
);
dx
=
solver
.
solve
(
g
);
if
(
dx
.
norm
()
<=
params
.
relative_step_threshold
*
x
.
norm
())
{
results
.
status
=
RELATIVE_STEP_SIZE_TOO_SMALL
;
break
;
}
x_new
=
x
+
dx
;
// Rho is the ratio of the actual reduction in error to the reduction
// in error that would be obtained if the problem was linear.
// See [1] for details.
Scalar
rho
((
error
.
squaredNorm
()
-
f_
(
x_new
).
squaredNorm
())
/
dx
.
dot
(
u
*
dx
+
g
));
if
(
rho
>
0
)
{
// Accept the Gauss-Newton step because the linear model fits well.
x
=
x_new
;
results
.
status
=
Update
(
x
,
params
,
&
J
,
&
A
,
&
error
,
&
g
);
Scalar
tmp
=
Scalar
(
2
*
rho
-
1
);
u
=
u
*
std
::
max
(
Scalar
(
1
/
3.
),
1
-
(
tmp
*
tmp
*
tmp
));
v
=
2
;
continue
;
}
// Reject the update because either the normal equations failed to solve
// or the local linear model was not good (rho < 0). Instead, increase u
// to move closer to gradient descent.
u
*=
v
;
v
*=
2
;
}
if
(
results
.
status
==
RUNNING
)
{
results
.
status
=
HIT_MAX_ITERATIONS
;
}
results
.
error_magnitude
=
error
.
norm
();
results
.
gradient_magnitude
=
g
.
norm
();
results
.
iterations
=
i
;
return
results
;
}
private
:
const
Function
&
f_
;
Jacobian
df_
;
};
}}}
// namespace mv
#endif // LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment