Skip to content

Commit

Permalink
Some type relaxation in examples and code (#528)
Browse files Browse the repository at this point in the history
* Remove some Float64 type annotations.

* Small doc changes.

* More type cleanup.
  • Loading branch information
pkofod committed Feb 4, 2018
1 parent ce3a7e7 commit ede177e
Show file tree
Hide file tree
Showing 14 changed files with 49 additions and 49 deletions.
8 changes: 4 additions & 4 deletions docs/src/algo/nelder_mead.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,13 @@ to the `AffineSimplexer` above, but with a small twist. Instead of always adding
a constant is only added to entries that are zero. If the entry is non-zero, five
percent of the level is added. This might be implemented (by the user) as
```julia
struct MatlabSimplexer <: Optim.Simplexer
a::Float64
b::Float64
struct MatlabSimplexer{T} <: Optim.Simplexer
a::T
b::T
end
MatlabSimplexer(;a = 0.00025, b = 0.05) = MatlabSimplexer(a, b)

function Optim.simplexer(A::MatlabSimplexer, initial_x::Array{T, N}) where {T, N}
function Optim.simplexer(A::MatlabSimplexer, initial_x::AbstractArray{T, N}) where {T, N}
n = length(initial_x)
initial_simplex = Array{T, N}[initial_x for i = 1:n+1]
for j = 1:n
Expand Down
6 changes: 3 additions & 3 deletions docs/src/dev/contributing.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@ Minim(; alphaguess = LineSearches.InitialStatic(), linesearch = LineSearches.Hag
Minim(linesearch, minim_parameter)
type MinimState{T,N,G}
x::Array{T,N}
x_previous::Array{T,N}
x::AbstractArray{T,N}
x_previous::AbstractArray{T,N}
f_x_previous::T
s::Array{T,N}
s::AbstractArray{T,N}
@add_linesearch_fields()
end
Expand Down
2 changes: 1 addition & 1 deletion src/deprecate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,4 @@ function optimize(df::OnceDifferentiable,
u::Array{T},
F::Fminbox{O}; kwargs...) where {T<:AbstractFloat,O<:AbstractOptimizer}
throw(ErrorException("Optimizing an objective `obj` without providing an initial `x` has been deprecated without backwards compatability. Please explicitly provide an `x`: `optimize(obj, x, l, u, method, options)``"))
end
end
2 changes: 1 addition & 1 deletion src/multivariate/optimize/optimize.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ function optimize(d::D, initial_x::AbstractArray{Tx, N}, method::M,
initial_x = complex_to_real(d, initial_x)

n = length(initial_x)
tr = OptimizationTrace{typeof(method)}()
tr = OptimizationTrace{typeof(value(d)), typeof(method)}()
tracing = options.store_trace || options.show_trace || options.extended_trace || options.callback != nothing
stopped, stopped_by_callback, stopped_by_time_limit = false, false, false
f_limit_reached, g_limit_reached, h_limit_reached = false, false, false
Expand Down
6 changes: 3 additions & 3 deletions src/multivariate/solvers/first_order/cg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@
# below. The default value for alphamax is Inf. See alphamaxfunc
# for cgdescent and alphamax for linesearch_hz.

struct ConjugateGradient{T, Tprep<:Union{Function, Void}, IL, L} <: FirstOrderOptimizer
eta::Float64
struct ConjugateGradient{Tf, T, Tprep<:Union{Function, Void}, IL, L} <: FirstOrderOptimizer
eta::Tf
P::T
precondprep!::Tprep
alphaguess!::IL
Expand Down Expand Up @@ -84,7 +84,7 @@ function ConjugateGradient(; alphaguess = LineSearches.InitialHagerZhang(),
precondprep = (P, x) -> nothing,
manifold::Manifold=Flat())

ConjugateGradient(Float64(eta),
ConjugateGradient(eta,
P, precondprep,
alphaguess, linesearch,
manifold)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# See p. 280 of Murphy's Machine Learning
# x_k1 = x_k - alpha * gr + mu * (x - x_previous)

struct MomentumGradientDescent{IL,L} <: FirstOrderOptimizer
mu::Float64
struct MomentumGradientDescent{Tf, IL,L} <: FirstOrderOptimizer
mu::Tf
alphaguess!::IL
linesearch!::L
manifold::Manifold
Expand All @@ -14,7 +14,7 @@ function MomentumGradientDescent(; mu::Real = 0.01,
alphaguess = LineSearches.InitialPrevious(), # TODO: investigate good defaults
linesearch = LineSearches.HagerZhang(), # TODO: investigate good defaults
manifold::Manifold=Flat())
MomentumGradientDescent(Float64(mu), alphaguess, linesearch, manifold)
MomentumGradientDescent(mu, alphaguess, linesearch, manifold)
end

mutable struct MomentumGradientDescentState{Tx, T} <: AbstractOptimizerState
Expand Down
2 changes: 1 addition & 1 deletion src/multivariate/solvers/zeroth_order/nelder_mead.jl
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ function print_header(method::NelderMead)
@printf "------ -------------- --------------\n"
end

function Base.show(io::IO, trace::OptimizationTrace{NelderMead})
function Base.show(io::IO, trace::OptimizationTrace{T, NelderMead}) where T
@printf io "Iter Function value √(Σ(yᵢ-ȳ)²)/n \n"
@printf io "------ -------------- --------------\n"
for state in trace.states
Expand Down
5 changes: 3 additions & 2 deletions src/multivariate/solvers/zeroth_order/particle_swarm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -279,9 +279,10 @@ function get_swarm_state(X, score, best_point, previous_state)
# the weighing factors c1 and c2 are adapted.
# New state is not only depending on the current swarm state,
# but also from the previous state.
T = eltype(X)
n, n_particles = size(X)
f_best, i_best = findmin(score)
d = zeros(Float64, n_particles)
d = zeros(T, n_particles)
for i in 1:n_particles
dd = 0.0
for k in 1:n_particles
Expand All @@ -297,7 +298,7 @@ function get_swarm_state(X, score, best_point, previous_state)
dmax = Base.maximum(d)

f = (dg - dmin) / (dmax - dmin)
mu = zeros(Float64, 4)
mu = zeros(T, 4)
mu[1] = get_mu_1(f)
mu[2] = get_mu_2(f)
mu[3] = get_mu_3(f)
Expand Down
8 changes: 4 additions & 4 deletions src/types.jl
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,14 @@ function print_header(method::AbstractOptimizer)
@printf "Iter Function value Gradient norm \n"
end

struct OptimizationState{T <: AbstractOptimizer}
struct OptimizationState{Tf, T <: AbstractOptimizer}
iteration::Int
value::Float64
g_norm::Float64
value::Tf
g_norm::Tf
metadata::Dict
end

const OptimizationTrace{T} = Vector{OptimizationState{T}}
const OptimizationTrace{Tf, T} = Vector{OptimizationState{Tf, T}}

abstract type OptimizationResults end

Expand Down
35 changes: 17 additions & 18 deletions src/univariate/optimize/interface.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

# Univariate Options
function optimize(f::F,
lower::T,
Expand Down Expand Up @@ -31,23 +30,23 @@ function optimize(f::F,
end

function optimize(f::F,
lower::Real,
upper::Real;
kwargs...) where F<:Function
optimize(f,
Float64(lower),
Float64(upper);
kwargs...)
lower::Union{Integer, Real},
upper::Union{Integer, Real};
kwargs...) where F<:Function
optimize(f,
Float64(lower),
Float64(upper);
kwargs...)
end

function optimize(f::F,
lower::Real,
upper::Real,
mo::Union{Brent, GoldenSection};
kwargs...) where F<:Function
optimize(f,
Float64(lower),
Float64(upper),
mo;
kwargs...)
end
lower::Union{Integer, Real},
upper::Union{Integer, Real},
mo::Union{Brent, GoldenSection};
kwargs...) where F<:Function
optimize(f,
Float64(lower),
Float64(upper),
mo;
kwargs...)
end
2 changes: 1 addition & 1 deletion src/univariate/solvers/brent.jl
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ function optimize(
converged = false

# Trace the history of states visited
tr = OptimizationTrace{typeof(mo)}()
tr = OptimizationTrace{T, typeof(mo)}()
tracing = store_trace || show_trace || extended_trace || callback != nothing
@brenttrace

Expand Down
2 changes: 1 addition & 1 deletion src/univariate/solvers/golden_section.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ function optimize(f::F, x_lower::T, x_upper::T,
converged = false

# Trace the history of states visited
tr = OptimizationTrace{typeof(mo)}()
tr = OptimizationTrace{T, typeof(mo)}()
tracing = store_trace || show_trace || extended_trace || callback != nothing
@goldensectiontrace

Expand Down
8 changes: 4 additions & 4 deletions src/utilities/update.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
function update!(tr::OptimizationTrace{T},
function update!(tr::OptimizationTrace{Tf, T},
iteration::Integer,
f_x::Real,
f_x::Tf,
grnorm::Real,
dt::Dict,
store_trace::Bool,
show_trace::Bool,
show_every::Int = 1,
callback = nothing) where T
os = OptimizationState{T}(iteration, f_x, grnorm, dt)
callback = nothing) where {Tf, T}
os = OptimizationState{Tf, T}(iteration, f_x, grnorm, dt)
if store_trace
push!(tr, os)
end
Expand Down
6 changes: 3 additions & 3 deletions test/general/types.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ import Compat.String
@testset "Types" begin
solver = NelderMead()
T = typeof(solver)
trace = OptimizationTrace{T}()
push!(trace,OptimizationState{T}(1,1.0,1.0,Dict()))
push!(trace,OptimizationState{T}(2,1.0,1.0,Dict()))
trace = OptimizationTrace{Float64, T}()
push!(trace,OptimizationState{Float64, T}(1,1.0,1.0,Dict()))
push!(trace,OptimizationState{Float64, T}(2,1.0,1.0,Dict()))
@test length(trace) == 2
@test trace[end].iteration == 2

Expand Down

0 comments on commit ede177e

Please sign in to comment.