From f0a25b0d384fadcea18bcf66f4eef9fb94fed2a7 Mon Sep 17 00:00:00 2001 From: David Hanak Date: Fri, 30 Jun 2023 21:02:59 +0200 Subject: [PATCH] Upgrade to ProximalAlgorithms v0.5 and AbstractOperators v0.3 (#40) * drop support for ForwardBackward optimizer; * add support for PANOCplus; * stabilize unstable unit test; * bump upper version limit on DSP. --- Project.toml | 10 +-- docs/src/solvers.md | 2 +- src/StructuredOptimization.jl | 4 +- src/arraypartition.jl | 8 +- src/calculus/precomposeNonlinear.jl | 10 +-- src/solvers/build_solve.jl | 6 +- src/solvers/minimize.jl | 32 +++---- src/solvers/solvers_options.jl | 6 +- src/syntax/terms/proximalOperators_bind.jl | 98 +++++++++++----------- src/syntax/terms/term.jl | 54 ++++++------ test/test_build_minimize.jl | 9 +- test/test_usage.jl | 28 +++---- test/test_usage_small.jl | 8 +- 13 files changed, 136 insertions(+), 139 deletions(-) diff --git a/Project.toml b/Project.toml index e83c93f..3361767 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "StructuredOptimization" uuid = "46cd3e9d-64ff-517d-a929-236bc1a1fc9d" -version = "0.3.0" +version = "0.4.0-ci+20230622" [deps] AbstractOperators = "d9c5613a-d543-52d8-9afd-8f241a8c3f1c" @@ -12,11 +12,11 @@ ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" [compat] -AbstractOperators = "0.1 - 0.2" -DSP = "0.5.1 - 0.6" +AbstractOperators = "0.3" +DSP = "0.5.1 - 0.7" FFTW = "1" -ProximalAlgorithms = "0.3 - 0.4" -ProximalOperators = "0.8 - 0.14" +ProximalAlgorithms = "0.5" +ProximalOperators = "0.15" RecursiveArrayTools = "1 - 2" julia = "1.4" diff --git a/docs/src/solvers.md b/docs/src/solvers.md index 69e5b13..2398676 100644 --- a/docs/src/solvers.md +++ b/docs/src/solvers.md @@ -23,9 +23,9 @@ You can pick the algorithm to use as `Solver` object from the package. Currently, the following algorithms are supported. ```@docs -ForwardBackward ZeroFPR PANOC +PANOCplus ``` diff --git a/src/StructuredOptimization.jl b/src/StructuredOptimization.jl index 3fb728a..e989dad 100644 --- a/src/StructuredOptimization.jl +++ b/src/StructuredOptimization.jl @@ -6,8 +6,8 @@ using AbstractOperators using ProximalOperators using ProximalAlgorithms -import ProximalAlgorithms:ForwardBackward, ZeroFPR, PANOC -export ForwardBackward, ZeroFPR, PANOC +import ProximalAlgorithms: ZeroFPR, PANOC, PANOCplus +export ZeroFPR, PANOC, PANOCplus include("syntax/syntax.jl") include("calculus/precomposeNonlinear.jl") # TODO move to ProximalOperators? diff --git a/src/arraypartition.jl b/src/arraypartition.jl index d95bfe1..06eff5e 100644 --- a/src/arraypartition.jl +++ b/src/arraypartition.jl @@ -2,7 +2,7 @@ import ProximalOperators import RecursiveArrayTools @inline function ProximalOperators.prox( - h::ProximalOperators.ProximableFunction, + h, x::RecursiveArrayTools.ArrayPartition, gamma... ) @@ -13,7 +13,7 @@ import RecursiveArrayTools end @inline function ProximalOperators.gradient( - h::ProximalOperators.ProximableFunction, + h, x::RecursiveArrayTools.ArrayPartition ) # unwrap @@ -24,13 +24,13 @@ end @inline ProximalOperators.prox!( y::RecursiveArrayTools.ArrayPartition, - h::ProximalOperators.ProximableFunction, + h, x::RecursiveArrayTools.ArrayPartition, gamma... ) = ProximalOperators.prox!(y.x, h, x.x, gamma...) @inline ProximalOperators.gradient!( y::RecursiveArrayTools.ArrayPartition, - h::ProximalOperators.ProximableFunction, + h, x::RecursiveArrayTools.ArrayPartition ) = ProximalOperators.gradient!(y.x, h, x.x) diff --git a/src/calculus/precomposeNonlinear.jl b/src/calculus/precomposeNonlinear.jl index dfc0e99..19dec7c 100644 --- a/src/calculus/precomposeNonlinear.jl +++ b/src/calculus/precomposeNonlinear.jl @@ -2,19 +2,19 @@ import ProximalOperators: gradient!, gradient # this can be removed when moved t export PrecomposeNonlinear -struct PrecomposeNonlinear{P <: ProximableFunction, +struct PrecomposeNonlinear{P, T <: AbstractOperator, - D <: AbstractArray, + D <: AbstractArray, C <: AbstractArray - } <: ProximableFunction - g::P + } + g::P G::T bufD::D bufC::C bufC2::C end -function PrecomposeNonlinear(g::P, G::T) where {P, T} +function PrecomposeNonlinear(g::P, G::T) where {P, T} t, s = domainType(G), size(G,2) bufD = eltype(s) <: Int ? zeros(t,s) : ArrayPartition(zeros.(t,s)) t, s = codomainType(G), size(G,1) diff --git a/src/solvers/build_solve.jl b/src/solvers/build_solve.jl index 496fa3a..b360902 100644 --- a/src/solvers/build_solve.jl +++ b/src/solvers/build_solve.jl @@ -18,7 +18,7 @@ julia> A, b = randn(10,4), randn(10); julia> p = problem( ls(A*x - b ) , norm(x) <= 1 ); -julia> StructuredOptimization.parse_problem(p, ForwardBackward()); +julia> StructuredOptimization.parse_problem(p, PANOCplus()); ``` """ function parse_problem(terms::Tuple, solver::T) where T <: ForwardBackwardSolver @@ -65,14 +65,14 @@ julia> A, b = randn(10,4), randn(10); julia> p = problem(ls(A*x - b ), norm(x) <= 1); -julia> solve(p, ForwardBackward()); +julia> solve(p, PANOCplus()); julia> ~x ``` """ function solve(terms::Tuple, solver::ForwardBackwardSolver) x, kwargs = parse_problem(terms, solver) - x_star, it = solver(~x; kwargs...) + x_star, it = solver(; x0 = ~x, kwargs...) ~x .= x_star return x, it end diff --git a/src/solvers/minimize.jl b/src/solvers/minimize.jl index 0b3089b..b22a5e0 100644 --- a/src/solvers/minimize.jl +++ b/src/solvers/minimize.jl @@ -20,7 +20,7 @@ julia> @minimize ls(A*x-b) st x >= 0.; julia> ~x # access array with solution -julia> @minimize ls(A*x-b) st norm(x) == 2.0 with ForwardBackward(fast=true); +julia> @minimize ls(A*x-b) st norm(x) == 2.0 with PANOCplus(); julia> ~x # access array with solution ``` @@ -29,28 +29,28 @@ Returns as output a tuple containing the optimization variables and the number of iterations spent by the solver algorithm. """ macro minimize(cf::Union{Expr, Symbol}) - cost = esc(cf) + cost = esc(cf) return :(solve(problem($(cost)), default_solver())) end macro minimize(cf::Union{Expr, Symbol}, s::Symbol, cstr::Union{Expr, Symbol}) - cost = esc(cf) - if s == :(st) - constraints = esc(cstr) - return :(solve(problem($(cost), $(constraints)), default_solver())) - elseif s == :(with) - solver = esc(cstr) + cost = esc(cf) + if s == :(st) + constraints = esc(cstr) + return :(solve(problem($(cost), $(constraints)), default_solver())) + elseif s == :(with) + solver = esc(cstr) return :(solve(problem($(cost)), $(solver))) - else - error("wrong symbol after cost function! use `st` or `with`") - end + else + error("wrong symbol after cost function! use `st` or `with`") + end end macro minimize(cf::Union{Expr, Symbol}, s::Symbol, cstr::Union{Expr, Symbol}, w::Symbol, slv::Union{Expr, Symbol}) - cost = esc(cf) - s != :(st) && error("wrong symbol after cost function! use `st`") - constraints = esc(cstr) - w != :(with) && error("wrong symbol after constraints! use `with`") - solver = esc(slv) + cost = esc(cf) + s != :(st) && error("wrong symbol after cost function! use `st`") + constraints = esc(cstr) + w != :(with) && error("wrong symbol after constraints! use `with`") + solver = esc(slv) return :(solve(problem($(cost), $(constraints)), $(solver))) end diff --git a/src/solvers/solvers_options.jl b/src/solvers/solvers_options.jl index b104602..ff6b963 100644 --- a/src/solvers/solvers_options.jl +++ b/src/solvers/solvers_options.jl @@ -1,9 +1,5 @@ using ProximalAlgorithms -const ForwardBackwardSolver = Union{ - ProximalAlgorithms.ForwardBackward, - ProximalAlgorithms.ZeroFPR, - ProximalAlgorithms.PANOC, -} +const ForwardBackwardSolver = ProximalAlgorithms.IterativeAlgorithm const default_solver = ProximalAlgorithms.PANOC diff --git a/src/syntax/terms/proximalOperators_bind.jl b/src/syntax/terms/proximalOperators_bind.jl index 44e7220..c507638 100644 --- a/src/syntax/terms/proximalOperators_bind.jl +++ b/src/syntax/terms/proximalOperators_bind.jl @@ -4,7 +4,7 @@ import LinearAlgebra: norm export norm """ - norm(x::AbstractExpression, p=2, [q,] [dim=1]) + norm(x::AbstractExpression, p=2, [q,] [dim=1]) Returns the norm of `x`. @@ -28,33 +28,33 @@ where ``\\mathbf{x}_i`` is the ``i``-th column if `dim == 1` (or row if `dim == """ function norm(ex::AbstractExpression, p::Real=2) - if p == 0 - f = NormL0() - elseif p == 1 - f = NormL1() - elseif p == 2 - f = NormL2() - elseif p == Inf - f = NormLinf() - else - error("function not implemented") - end - return Term(f, ex) + if p == 0 + f = NormL0() + elseif p == 1 + f = NormL1() + elseif p == 2 + f = NormL2() + elseif p == Inf + f = NormLinf() + else + error("function not implemented") + end + return Term(f, ex) end # Nuclear norm function norm(ex::AbstractExpression, ::typeof(*)) - return Term(NuclearNorm(), ex) + return Term(NuclearNorm(), ex) end # Mixed Norm function norm(ex::AbstractExpression, p1::Int, p2::Int, dim::Int = 1 ) - if p1 == 2 && p2 == 1 - f = NormL21(1.0,dim) - else - error("function not implemented") - end - return Term(f, ex) + if p1 == 2 && p2 == 1 + f = NormL21(1.0,dim) + else + error("function not implemented") + end + return Term(f, ex) end # Least square terms @@ -62,7 +62,7 @@ end export ls """ - ls(x::AbstractExpression) + ls(x::AbstractExpression) Returns the squared norm (least squares) of `x`: @@ -77,12 +77,12 @@ ls(ex) = Term(SqrNormL2(), ex) import Base: ^ function (^)(t::Term{T1,T2,T3}, exp::Integer) where {T1, T2 <: NormL2, T3} - if exp == 2 - # The coefficient 2.0 is due to the fact that SqrNormL2 divides by 2.0 - return t.lambda^2*Term(SqrNormL2(2.0), t.A) - else - error("function not implemented") - end + if exp == 2 + # The coefficient 2.0 is due to the fact that SqrNormL2 divides by 2.0 + return t.lambda^2*Term(SqrNormL2(2.0), t.A) + else + error("function not implemented") + end end # HingeLoss @@ -90,7 +90,7 @@ end export hingeloss """ - hingeloss(x::AbstractExpression, y::Array) + hingeloss(x::AbstractExpression, y::Array) Applies the Hinge loss function ```math @@ -106,7 +106,7 @@ Term(HingeLoss(b), ex) export sqrhingeloss """ - sqrhingeloss(x::AbstractExpression, y::Array) + sqrhingeloss(x::AbstractExpression, y::Array) Applies the squared Hinge loss function ```math @@ -122,7 +122,7 @@ Term(SqrHingeLoss(b), ex) export crossentropy """ - crossentropy(x::AbstractExpression, y::Array) + crossentropy(x::AbstractExpression, y::Array) Applies the cross entropy loss function: ```math @@ -138,7 +138,7 @@ Term(CrossEntropy(b), ex) export logisticloss """ - logbarrier(x::AbstractExpression, y::AbstractArray) + logbarrier(x::AbstractExpression, y::AbstractArray) Applies the logistic loss function: ```math @@ -154,7 +154,7 @@ Term(LogisticLoss(y, 1.0), ex) export logbarrier """ - logbarrier(x::AbstractExpression) + logbarrier(x::AbstractExpression) Applies the log barrier function: ```math @@ -169,7 +169,7 @@ Term(LogBarrier(1.0), ex) export huberloss """ - huberloss(x::AbstractExpression, ρ=1.0) + huberloss(x::AbstractExpression, ρ=1.0) Applies the Huber loss function: ```math @@ -185,7 +185,7 @@ Term(HuberLoss(rho), ex) import Base: maximum """ - maximum(x::AbstractExpression) + maximum(x::AbstractExpression) Applies the function: ```math @@ -198,7 +198,7 @@ Term(Maximum(), ex) export sumpositive """ - sumpositive(x::AbstractExpression, ρ=1.0) + sumpositive(x::AbstractExpression, ρ=1.0) Applies the function: ```math @@ -212,7 +212,7 @@ import LinearAlgebra: dot export dot """ - dot(c::AbstractVector, x::AbstractExpression) + dot(c::AbstractVector, x::AbstractExpression) Applies the function: ```math @@ -284,10 +284,10 @@ import Base: <=, >=, in (>=)(ub, ex::AbstractExpression) = Term(IndBox(-Inf, ub), ex) function in(ex::AbstractExpression, bnds::AbstractArray) - if length(bnds) != 2 - error("should provide 2 bounds!") - end - return Term(IndBox(bnds[1], bnds[2]), ex) + if length(bnds) != 2 + error("should provide 2 bounds!") + end + return Term(IndBox(bnds[1], bnds[2]), ex) end # Rank constraints @@ -299,9 +299,9 @@ export rank # rank(X) <= r, # therefore here the parameter (1) doesn't really have a role. # We should probably fix this: it allows weird things in expressing problems. -# Maybe we should have Rank <: ProximableFunction (with no prox! nor gradient! +# Maybe we should have Rank (with no prox! nor gradient! # defined), that gives IndBallRank when combined with <=. -struct Rank <: ProximableFunction end +struct Rank end rank(ex::AbstractExpression) = Term(Rank(), ex) import Base: <= @@ -368,7 +368,7 @@ end import Base: conj """ - conj(t::Term) + conj(t::Term) Returns the convex conjugate transform of `t`: ```math @@ -388,11 +388,11 @@ julia> t = conj(norm(x,1)) """ function conj(t::Term) - if typeof(operator(t)) <: Eye - return Term(1.0,Conjugate(Postcompose(t.f,t.lambda)),t.A) - else - error("cannot perform convex conjugation") - end + if typeof(operator(t)) <: Eye + return Term(1.0,Conjugate(Postcompose(t.f,t.lambda)),t.A) + else + error("cannot perform convex conjugation") + end end @@ -400,7 +400,7 @@ end export smooth """ - smooth(t::Term, gamma = 1.0) + smooth(t::Term, gamma = 1.0) Smooths the nonsmooth term `t` using Moreau envelope: diff --git a/src/syntax/terms/term.jl b/src/syntax/terms/term.jl index 6dae57a..2e42973 100644 --- a/src/syntax/terms/term.jl +++ b/src/syntax/terms/term.jl @@ -1,13 +1,13 @@ -struct Term{T1 <: Real, T2 <: ProximableFunction, T3 <: AbstractExpression} - lambda::T1 - f::T2 - A::T3 - Term(lambda::T1, f::T2, ex::T3) where {T1,T2,T3} = new{T1,T2,T3}(lambda,f,ex) +struct Term{T1 <: Real, T2, T3 <: AbstractExpression} + lambda::T1 + f::T2 + A::T3 + Term(lambda::T1, f::T2, ex::T3) where {T1,T2,T3} = new{T1,T2,T3}(lambda,f,ex) end -function Term(f::T, ex::AbstractExpression) where {T<:ProximableFunction} - A = convert(Expression,ex) - Term(1,f, A) +function Term(f, ex::AbstractExpression) + A = convert(Expression,ex) + Term(1,f, A) end # Operations @@ -28,12 +28,12 @@ import Base: + import Base: * function (*)(a::T1, t::Term{T,T2,T3}) where {T1<:Real, T, T2, T3} - coeff = *(promote(a,t.lambda)...) - Term(coeff, t.f, t.A) + coeff = *(promote(a,t.lambda)...) + Term(coeff, t.f, t.A) end function (*)(a::T1, t::T2) where {T1<:Real, N, T2 <: Tuple{Vararg{<:Term,N}} } - return a.*t + return a.*t end # Properties @@ -45,17 +45,17 @@ displacement(t::Term) = displacement(t.A) #importing properties from ProximalOperators import ProximalOperators: - is_affine, - is_cone, - is_convex, - is_generalized_quadratic, - is_prox_accurate, - is_quadratic, - is_separable, - is_set, - is_singleton, - is_smooth, - is_strongly_convex + is_affine, + is_cone, + is_convex, + is_generalized_quadratic, + is_prox_accurate, + is_quadratic, + is_separable, + is_set, + is_singleton, + is_smooth, + is_strongly_convex #importing properties from AbstractOperators is_f = [:is_linear, @@ -72,11 +72,11 @@ is_f = [:is_linear, ] for f in is_f - @eval begin - import AbstractOperators: $f - $f(t::Term) = $f(operator(t)) - $f(t::NTuple{N,Term}) where {N} = all($f.(t)) - end + @eval begin + import AbstractOperators: $f + $f(t::Term) = $f(operator(t)) + $f(t::NTuple{N,Term}) where {N} = all($f.(t)) + end end is_smooth(t::Term) = is_smooth(t.f) diff --git a/test/test_build_minimize.jl b/test/test_build_minimize.jl index bbb9a47..828e221 100644 --- a/test/test_build_minimize.jl +++ b/test/test_build_minimize.jl @@ -9,25 +9,26 @@ b = randn(5) println("\nTesting @minimize \n") ~x .= 0. ~y .= 0. -slv, = @minimize ls(A*x - B*y + b) st norm(x, 2) <= 1e4, norm(y, 1) <= 1.0 with ForwardBackward() +slv, = @minimize ls(A*x - B*y + b) st norm(x, 2) <= 1e4, norm(y, 1) <= 1.0 with PANOCplus() ~x .= 0. -slv, = @minimize ls(A*x - b) st norm(x, 1) <= 1.0 with ForwardBackward() +slv, = @minimize ls(A*x - b) st norm(x, 1) <= 1.0 with PANOCplus() ~x .= 0. slv, = @minimize ls(A*x - b) st norm(x, 1) <= 1.0 ~x .= 0. -slv, = @minimize ls(A*x - b) + norm(x, 1) with ForwardBackward() +slv, = @minimize ls(A*x - b) + norm(x, 1) with PANOCplus() ~x .= 0. slv, = @minimize ls(A*x - b) + norm(x, 1) ~x .= 0. slv, = @minimize ls(A*x - b) #TODO many many more tests +Random.seed!(12345) x = Variable(5) A = randn(10, 5) b = randn(10) println("\nTesting @minimize nonlinear \n") -slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with ForwardBackward(tol = 1e-6) +slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with PANOCplus(tol = 1e-6) xpg = copy(~x) ~x .= 0. slv, = @minimize ls(sigmoid(A*x,10) - b)+norm(x,1) with ZeroFPR(tol = 1e-6) diff --git a/test/test_usage.jl b/test/test_usage.jl index 461284e..8d5f2b8 100644 --- a/test/test_usage.jl +++ b/test/test_usage.jl @@ -15,13 +15,13 @@ b = randn(m) lam1 = 0.2 lam2 = 1.0 -# Solve with FPG +# Solve with PANOC+ x1_fpg = Variable(n1) x2_fpg = Variable(n2) expr = ls(A1*x1_fpg + A2*x2_fpg - b) + lam1*norm(x1_fpg, 1) + lam2*norm(x2_fpg, 2) prob = problem(expr) -@time sol = solve(prob, ForwardBackward(fast=true, tol=1e-10, verbose=false,maxit=20000)) +@time sol = solve(prob, PANOCplus(tol=1e-10, verbose=false,maxit=20000)) # Solve with ZeroFPR @@ -86,17 +86,17 @@ b = A*x_star + A'\y_star x_pg = Variable(n) expr = ls(A*x_pg - b) + lam*norm(x_pg, 1) prob = problem(expr) -@time sol = solve(prob, ForwardBackward(tol=1e-10, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-10, verbose=false)) @test norm(~x_pg - x_star, Inf) <= 1e-8 @test norm(A'*(A*~x_pg - b) + lam*sign.(~x_pg)) <= 1e-6 -# Solve with FPG +# Solve with PANOC+ x_fpg = Variable(n) expr = ls(A*x_fpg - b) + lam*norm(x_fpg, 1) prob = problem(expr) -@time sol = solve(prob, ForwardBackward(fast=true, tol=1e-10, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-10, verbose=false)) @test norm(~x_fpg - x_star, Inf) <= 1e-8 @test norm(A'*(A*~x_fpg - b) + lam*sign.(~x_fpg)) <= 1e-6 @@ -139,14 +139,14 @@ b = A*x_orig + randn(m) x_pg = Variable(n) expr = smooth(norm(A*x_pg - b, 2)) + lam*norm(x_pg, 1) prob = problem(expr) -@time sol = solve(prob, ForwardBackward(tol=1e-6, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-6, verbose=false)) -# Solve with FPG +# Solve with PANOC+ x_fpg = Variable(n) expr = smooth(norm(A*x_fpg - b, 2)) + lam*norm(x_fpg, 1) prob = problem(expr) -@time sol = solve(prob, ForwardBackward(fast=true, tol=1e-6, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-6, verbose=false)) # Solve with ZeroFPR @@ -190,17 +190,17 @@ b = A*x_orig + randn(m) x_pg = Variable(n) expr = ls(A*x_pg - b) prob = problem(expr, x_pg in [lb, ub]) -@time sol = solve(prob, ForwardBackward(tol=1e-6, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-6, verbose=false)) @test norm(~x_pg - max.(lb, min.(ub, ~x_pg)), Inf) <= 1e-12 @test norm(~x_pg - max.(lb, min.(ub, ~x_pg - A'*(A*~x_pg - b))), Inf)/(1+norm(~x_pg, Inf)) <= 1e-6 -# Solve with FPG +# Solve with PANOC+ x_fpg = Variable(n) expr = ls(A*x_fpg - b) prob = problem(expr, x_fpg in [lb, ub]) -@time sol = solve(prob, ForwardBackward(fast=true, tol=1e-6, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-6, verbose=false)) @test norm(~x_fpg - max.(lb, min.(ub, ~x_fpg)), Inf) <= 1e-12 @test norm(~x_fpg - max.(lb, min.(ub, ~x_fpg - A'*(A*~x_fpg - b))), Inf)/(1+norm(~x_fpg, Inf)) <= 1e-6 @@ -264,17 +264,17 @@ b = A*x_star + A'\y_star x_pg = Variable(n) expr = ls(A*x_pg - b) prob = problem(expr, x_pg >= 0.0) -@time sol = solve(prob, ForwardBackward(tol=1e-8, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-8, verbose=false)) @test all(~x_pg .>= 0.0) @test norm(~x_pg - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-8 -# Solve with FPG +# Solve with PANOC+ x_fpg = Variable(n) expr = ls(A*x_fpg - b) prob = problem(expr, x_fpg >= 0.0) -@time sol = solve(prob, ForwardBackward(fast=true, tol=1e-8, verbose=false)) +@time sol = solve(prob, PANOCplus(tol=1e-8, verbose=false)) @test all(~x_fpg .>= 0.0) @test norm(~x_fpg - x_star, Inf)/(1+norm(x_star, Inf)) <= 1e-8 diff --git a/test/test_usage_small.jl b/test/test_usage_small.jl index 01060f3..8503707 100644 --- a/test/test_usage_small.jl +++ b/test/test_usage_small.jl @@ -1,10 +1,6 @@ A = randn(3,5) b = randn(3) -x_pg = Variable(5) -prob_pg = problem(ls(A*x_pg - b) + 1e-3*norm(x_pg, 1)) -sol_pg = solve(prob_pg, ForwardBackward()) - x_zfpr = Variable(5) prob_zfpr = problem(ls(A*x_zfpr - b) + 1e-3*norm(x_zfpr, 1)) sol_zfpr = solve(prob_zfpr, ZeroFPR()) @@ -12,3 +8,7 @@ sol_zfpr = solve(prob_zfpr, ZeroFPR()) x_pnc = Variable(5) prob_pnc = problem(ls(A*x_pnc - b) + 1e-3*norm(x_pnc, 1)) sol_pnc = solve(prob_pnc, PANOC()) + +x_pncp = Variable(5) +prob_pncp = problem(ls(A*x_pncp - b) + 1e-3*norm(x_pncp, 1)) +sol_pncp = solve(prob_pncp, PANOCplus())