Skip to content

Commit

Permalink
pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
tschm committed Jun 5, 2023
1 parent c3c5871 commit 728828d
Show file tree
Hide file tree
Showing 7 changed files with 80 additions and 32 deletions.
13 changes: 13 additions & 0 deletions .github/workflows/pre-commit.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: pre-commit

on:
pull_request:
push:

jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
- uses: pre-commit/action@v3.0.0
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@
.venv
book/_build

.DS_Store
19 changes: 19 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-toml
- id: end-of-file-fixer
- id: trailing-whitespace
- id: fix-encoding-pragma

- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black

- repo: https://github.com/asottile/reorder_python_imports
rev: v3.9.0
hooks:
- id: reorder-python-imports
args: [--py37-plus, --add-import, 'from __future__ import annotations']
12 changes: 2 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ Building tools on the shoulders of [Mosek](http://www.mosek.com).

## Motivation

We created this package to support the experiments given in the [paper](http://arxiv.org/abs/1310.3397)
by Schmelzer, Hauser, Dahl and Andersen.
We created this package to support the experiments given in the [paper](http://arxiv.org/abs/1310.3397)
by Schmelzer, Hauser, Dahl and Andersen.


## License
Expand All @@ -15,11 +15,3 @@ You need a valid Mosek license.
## Applications

You can solve various (un)constrained regression and Markowitz problems.








1 change: 0 additions & 1 deletion book.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,3 @@ ipython kernel install --name $NAME --user

jupyter-book clean book
jupyter-book build book

1 change: 0 additions & 1 deletion book/docs/mosekTools/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@

65 changes: 45 additions & 20 deletions book/docs/mosekTools/solver.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,14 @@
# -*- coding: utf-8 -*-
from __future__ import annotations

import numpy as np
from mosek.fusion import Expr, Domain, Model, ObjectiveSense, Matrix, BaseModel
from mosek.fusion import BaseModel
from mosek.fusion import Domain
from mosek.fusion import Expr
from mosek.fusion import Matrix
from mosek.fusion import Model
from mosek.fusion import ObjectiveSense


def __sum_weighted(c1, expr1, c2, expr2):
return Expr.add(Expr.mul(c1, expr1), Expr.mul(c2, expr2))
Expand All @@ -11,6 +20,7 @@ def __residual(matrix, rhs, expr):
"""
return Expr.sub(__mat_vec_prod(matrix, expr), rhs)


def __quad_cone(model, expr1, expr2):
model.constraint(Expr.vstack(expr1, expr2), Domain.inQCone())

Expand Down Expand Up @@ -95,8 +105,10 @@ def lsq_ls(matrix, rhs):
s.t. e'w = 1
"""
# define model
with Model('lsqPos') as model:
weights = model.variable("weights", matrix.shape[1], Domain.inRange(-np.infty, +np.infty))
with Model("lsqPos") as model:
weights = model.variable(
"weights", matrix.shape[1], Domain.inRange(-np.infty, +np.infty)
)

# e'*w = 1
model.constraint(Expr.sum(weights), Domain.equalsTo(1.0))
Expand All @@ -118,7 +130,7 @@ def lsq_pos(matrix, rhs):
w >= 0
"""
# define model
with Model('lsqPos') as model:
with Model("lsqPos") as model:
# introduce n non-negative weight variables
weights = model.variable("weights", matrix.shape[1], Domain.inRange(0.0, 1.0))

Expand All @@ -142,9 +154,11 @@ def lsq_pos_l1_penalty(matrix, rhs, cost_multiplier, weights_0):
w >= 0
"""
# define model
with Model('lsqSparse') as model:
with Model("lsqSparse") as model:
# introduce n non-negative weight variables
weights = model.variable("weights", matrix.shape[1], Domain.inRange(0.0, +np.infty))
weights = model.variable(
"weights", matrix.shape[1], Domain.inRange(0.0, +np.infty)
)

# e'*w = 1
model.constraint(Expr.sum(weights), Domain.equalsTo(1.0))
Expand All @@ -158,7 +172,7 @@ def lsq_pos_l1_penalty(matrix, rhs, cost_multiplier, weights_0):
cost = model.variable("cost", matrix.shape[1], Domain.unbounded())
model.constraint(Expr.sub(cost, p), Domain.equalsTo(0.0))

t = __l1_norm(model, 'abs(weights)', cost)
t = __l1_norm(model, "abs(weights)", cost)

# Minimise v + t
model.objective(ObjectiveSense.Minimize, __sum_weighted(1.0, v, 1.0, t))
Expand All @@ -172,15 +186,19 @@ def lasso(matrix, rhs, lamb):
"""
min 2-norm (matrix*w - rhs)^2 + lamb * 1-norm(w)
"""
# define model
# define model
with Model("lasso") as model:
weights = model.variable("weights", matrix.shape[1]) #, Domain.inRange(-np.infty, +np.infty))
weights = model.variable(
"weights", matrix.shape[1]
) # , Domain.inRange(-np.infty, +np.infty))
# introduce variables and constraints

v = __l2_norm_squared(model, "2-norm(res)**", __residual(matrix, rhs, weights))
t = __l1_norm(model, "1-norm(w)", weights)

model.objective(ObjectiveSense.Minimize, __sum_weighted(c1=1.0, expr1=v, c2=lamb, expr2=t))
model.objective(
ObjectiveSense.Minimize, __sum_weighted(c1=1.0, expr1=v, c2=lamb, expr2=t)
)
# solve the problem
model.solve()

Expand All @@ -197,10 +215,10 @@ def markowitz_riskobjective(exp_ret, covariance_mat, bound):
stdev = __stdev(model, "std", weights, covariance_mat)

# impose a bound on this standard deviation
#mBound.upper(model, stdev, bound)
# mBound.upper(model, stdev, bound)
model.constraint(stdev, Domain.lessThan(bound))

#mModel.maximise(model=model, expr=Expr.dot(exp_ret, weights))
# mModel.maximise(model=model, expr=Expr.dot(exp_ret, weights))
model.objective(ObjectiveSense.Maximize, Expr.dot(exp_ret, weights))
# solve the problem
model.solve()
Expand All @@ -212,18 +230,24 @@ def markowitz(exp_ret, covariance_mat, aversion):
# define model
with Model("mean var") as model:
# set of n weights (unconstrained)
weights = model.variable("weights", len(exp_ret), Domain.inRange(-np.infty, +np.infty))
weights = model.variable(
"weights", len(exp_ret), Domain.inRange(-np.infty, +np.infty)
)

model.constraint(Expr.sum(weights), Domain.equalsTo(1.0))

# standard deviation induced by covariance matrix
var = __variance(model, "var", weights, covariance_mat)

model.objective(ObjectiveSense.Maximize, Expr.sub(Expr.dot(exp_ret, weights), Expr.mul(aversion, var)))
model.objective(
ObjectiveSense.Maximize,
Expr.sub(Expr.dot(exp_ret, weights), Expr.mul(aversion, var)),
)
model.solve()
#mModel.maximise(model=model, expr=Expr.sub(Expr.dot(exp_ret, weights), Expr.mul(aversion, var)))
# mModel.maximise(model=model, expr=Expr.sub(Expr.dot(exp_ret, weights), Expr.mul(aversion, var)))
return np.array(weights.level())


def minimum_variance(matrix):
# Given the matrix of returns a (each column is a series of returns) this method
# computes the weights for a minimum variance portfolio, e.g.
Expand All @@ -246,23 +270,24 @@ def minimum_variance(matrix):
r = Expr.mul(Matrix.dense(matrix), weights)
# compute l2_norm squared of those returns
# minimize this l2_norm
model.objective(ObjectiveSense.Minimize, __l2_norm_squared(model, "2-norm^2(r)", expr=r))
model.objective(
ObjectiveSense.Minimize, __l2_norm_squared(model, "2-norm^2(r)", expr=r)
)
# solve the problem
model.solve()
# return the series of weights
return np.array(weights.level())

if __name__ == '__main__':

if __name__ == "__main__":
# MOSEK (often) requires that the environment variable MOSEKLM_LICENSE_FILE is defined and set to the port on the server
# that is exposed by the license management program.
from numpy.random import randn
import mosek


A = randn(5, 3)

print (minimum_variance(matrix=A))

print(minimum_variance(matrix=A))

# please note that the Mosek License may still be in cache which could interfere/block subsequent programs running
# One way to make sure the license is no longer in cache is to use the trick:
Expand Down

0 comments on commit 728828d

Please sign in to comment.