Skip to content

Commit

Permalink
Merge pull request #5836 from jbrockmendel/over
Browse files Browse the repository at this point in the history
CLN: over-indentation E117
  • Loading branch information
bashtage committed Jun 7, 2019
2 parents ae3c913 + e97e9a1 commit a61a9e2
Show file tree
Hide file tree
Showing 12 changed files with 93 additions and 83 deletions.
2 changes: 2 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ select=
# E111: Indentation is not a multiple of four
E114,
# E114: Indentation is not a multiple of four (comment)
E117,
# E117: over-indented
E227,
# E227: missing whitespace around bitwise or shift operator
E228,
Expand Down
6 changes: 3 additions & 3 deletions statsmodels/base/_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,14 +299,14 @@ def fit_constrained_wrap(model, constraints, start_params=None, **fit_kwds):
fit_kwds=fit_kwds)
#create dummy results Instance, TODO: wire up properly
res = self.fit(start_params=params, maxiter=0,
warn_convergence=False) # we get a wrapper back
warn_convergence=False) # we get a wrapper back
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type == 'nonrobust':
res._results.normalized_cov_params = cov / res_constr.scale
res._results.normalized_cov_params = cov / res_constr.scale
else:
res._results.normalized_cov_params = None
res._results.normalized_cov_params = None

k_constr = len(q)
res._results.df_resid += k_constr
Expand Down
54 changes: 27 additions & 27 deletions statsmodels/discrete/tests/test_count_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,30 +530,30 @@ def test_predict_generic_zi(self):


class TestZeroInflatedNegativeBinomialP_predict2(object):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)

cls.endog = data.endog
exog = data.exog
start_params = np.array(
[-2.83983767, -2.31595924, -3.9263248 , -4.01816431, -5.52251843,
-2.4351714 , -4.61636366, -4.17959785, -0.12960256, -0.05653484,
-0.21206673, 0.08782572, -0.02991995, 0.22901208, 0.0620983 ,
0.06809681, 0.0841814 , 0.185506 , 1.36527888])
mod = sm.ZeroInflatedNegativeBinomialP(
cls.endog, exog, exog_infl=exog, p=2)
res = mod.fit(start_params=start_params, method="bfgs",
maxiter=1000, disp=0)

cls.res = res

def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
atol=0.02)

def test_zero_nonzero_mean(self):
mean1 = self.endog.mean()
mean2 = ((1 - self.res.predict(which='prob-zero').mean()) *
self.res.predict(which='mean-nonzero').mean())
assert_allclose(mean1, mean2, atol=0.2)
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)

cls.endog = data.endog
exog = data.exog
start_params = np.array([
-2.83983767, -2.31595924, -3.9263248, -4.01816431, -5.52251843,
-2.4351714, -4.61636366, -4.17959785, -0.12960256, -0.05653484,
-0.21206673, 0.08782572, -0.02991995, 0.22901208, 0.0620983,
0.06809681, 0.0841814, 0.185506, 1.36527888])
mod = sm.ZeroInflatedNegativeBinomialP(
cls.endog, exog, exog_infl=exog, p=2)
res = mod.fit(start_params=start_params, method="bfgs",
maxiter=1000, disp=0)

cls.res = res

def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
atol=0.02)

def test_zero_nonzero_mean(self):
mean1 = self.endog.mean()
mean2 = ((1 - self.res.predict(which='prob-zero').mean()) *
self.res.predict(which='mean-nonzero').mean())
assert_allclose(mean1, mean2, atol=0.2)
5 changes: 3 additions & 2 deletions statsmodels/iolib/foreign.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,8 +544,9 @@ def _dtype_to_stata_type(dtype):
#TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we do?
elif dtype.type == np.object_:
# try to coerce it to the biggest string
# not memory efficient, what else could we do?
return chr(244)
elif dtype == np.float64:
return chr(255)
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/miscmodels/nonlinls.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def fit_random(self, ntries=10, rvs_generator=None, nparams=None):
'''

if nparams is None:
nparams = self.nparams
nparams = self.nparams
if rvs_generator is None:
rvs = np.random.uniform(low=-10, high=10, size=(ntries, nparams))
else:
Expand Down
14 changes: 10 additions & 4 deletions statsmodels/sandbox/descstats.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,16 @@ def descstats(data, cols=None, axis=0):
# +str(x[var].max())+')'+os.linesep}
# else:
for var in range(x.shape[1]):
desc += "%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g \
%(range)20s" % {'name': var, 'obs': len(x[:,var]), 'mean': x[:,var].mean(),
'stddev': x[:,var].std(), 'range': '('+str(x[:,var].min())+', '+\
str(x[:,var].max())+')'+os.linesep}
xv = x[:, var]
kwargs = {
'name': var,
'obs': len(xv),
'mean': xv.mean(),
'stddev': xv.std(),
'range': '('+str(xv.min())+', '+str(xv.max())+')'+os.linesep
}
desc += ("%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g "
"%(range)20s" % kwargs)
else:
raise ValueError("data not understood")

Expand Down
2 changes: 1 addition & 1 deletion statsmodels/sandbox/regression/penalized.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def __init__(self, endog, exog, r_matrix=None, q_matrix=None,
else:
self.q_matrix = np.zeros(k_constraints)[:, None]
if self.q_matrix.shape != (k_constraints, 1):
raise ValueError('q_matrix has wrong shape')
raise ValueError('q_matrix has wrong shape')

if sigma_prior is not None:
sigma_prior = np.asarray(sigma_prior)
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/sandbox/tools/try_mctools.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def lb(x):
#----------------------

def randwalksim(nobs=500, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
return (drift+np.random.randn(nobs)).cumsum()

def adf20(x):
return unitroot_adf(x, 2, trendorder=0, autolag=None)
Expand Down
9 changes: 5 additions & 4 deletions statsmodels/tsa/arima_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,10 +390,11 @@ def _make_arma_names(data, k_trend, order, exog_names):
if k_ma ==0 and k_ar ==0:
if len(exog_names) != 0:
return exog_names
elif (exog_names[-k_ma:] == ma_lag_names ) and \
exog_names[-(k_ar+k_ma):-k_ma] == ar_lag_names and \
(not exog_names or not trend_name or trend_name[0] == exog_names[0]):
return exog_names
elif (exog_names[-k_ma:] == ma_lag_names and
exog_names[-(k_ar+k_ma):-k_ma] == ar_lag_names and
(not exog_names or not trend_name
or trend_name[0] == exog_names[0])):
return exog_names

exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names
return exog_names
Expand Down
8 changes: 4 additions & 4 deletions statsmodels/tsa/vector_ar/irf.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,12 +486,12 @@ def err_band_sz3(self, orth=False, svar=False, repl=1000, signif=0.05,

gamma = np.zeros((repl, periods+1, neqs, neqs))
for p in range(repl):
c=0
c = 0
for j in range(neqs):
for i in range(neqs):
gamma[p,1:,i,j] = W[j,k[j],i*periods:(i+1)*periods] * irf_resim[p,1:,i,j]
if i == neqs-1:
gamma[p,1:,i,j] = W[j,k[j],i*periods:] * irf_resim[p,1:,i,j]
gamma[p,1:,i,j] = W[j,k[j],i*periods:(i+1)*periods] * irf_resim[p,1:,i,j]
if i == neqs-1:
gamma[p,1:,i,j] = W[j,k[j],i*periods:] * irf_resim[p,1:,i,j]

gamma_sort = np.sort(gamma, axis=0) #sort to get quantiles
indx = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,17 @@


def print_debug_output(results, dt):
print("\n\n\nDETERMINISTIC TERMS: " + dt)
coefs = results["est"]["Lagged endogenous term"]
print("coefs:")
print(str(type(coefs)) + str(coefs.shape))
print(coefs)
print("se: ")
print(results["se"]["Lagged endogenous term"])
print("t: ")
print(results["t"]["Lagged endogenous term"])
print("p: ")
print(results["p"]["Lagged endogenous term"])
print("\n\n\nDETERMINISTIC TERMS: " + dt)
coefs = results["est"]["Lagged endogenous term"]
print("coefs:")
print(str(type(coefs)) + str(coefs.shape))
print(coefs)
print("se: ")
print(results["se"]["Lagged endogenous term"])
print("t: ")
print(results["t"]["Lagged endogenous term"])
print("p: ")
print(results["p"]["Lagged endogenous term"])


def dt_s_tup_to_string(dt_s_tup):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,32 +11,32 @@


def print_debug_output(results, dt):
print("\n\n\nDETERMINISTIC TERMS: " + dt)
alpha = results["est"]["alpha"]
print("alpha:")
print(str(type(alpha)) + str(alpha.shape))
print(alpha)
print("\n\n\nDETERMINISTIC TERMS: " + dt)
alpha = results["est"]["alpha"]
print("alpha:")
print(str(type(alpha)) + str(alpha.shape))
print(alpha)
print("se: ")
print(results["se"]["alpha"])
print("t: ")
print(results["t"]["alpha"])
print("p: ")
print(results["p"]["alpha"])
beta = results["est"]["beta"]
print("beta:")
print(str(type(beta)) + str(beta.shape))
print(beta)
gamma = results["est"]["Gamma"]
print("Gamma:")
print(str(type(gamma)) + str(gamma.shape))
print(gamma)
if "co" in dt or "s" in dt or "lo" in dt:
c = results["est"]["C"]
print("C:")
print(str(type(c)) + str(c.shape))
print(c)
print("se: ")
print(results["se"]["alpha"])
print("t: ")
print(results["t"]["alpha"])
print("p: ")
print(results["p"]["alpha"])
beta = results["est"]["beta"]
print("beta:")
print(str(type(beta)) + str(beta.shape))
print(beta)
gamma = results["est"]["Gamma"]
print("Gamma:")
print(str(type(gamma)) + str(gamma.shape))
print(gamma)
if "co" in dt or "s" in dt or "lo" in dt:
c = results["est"]["C"]
print("C:")
print(str(type(c)) + str(c.shape))
print(c)
print("se: ")
print(results["se"]["C"])
print(results["se"]["C"])


def dt_s_tup_to_string(dt_s_tup):
Expand Down

0 comments on commit a61a9e2

Please sign in to comment.