Skip to content

Commit

Permalink
fix(deps): update dependency statsmodels to ^0.14.0 (#93)
Browse files Browse the repository at this point in the history
* fix(deps): update dependency statsmodels to ^0.14.0

* Update poetry.lock

* specify statsmodel version

* fix tests

* remove 3.7 from ci tests

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Xinyu Zou <[email protected]>
Co-authored-by: vjohnson1-godaddy <[email protected]>
  • Loading branch information
3 people authored Aug 16, 2023
1 parent e99df63 commit 094cb3e
Show file tree
Hide file tree
Showing 6 changed files with 576 additions and 465 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.7', '3.8', '3.9', '3.10']
python-version: ['3.8', '3.9', '3.10']
include:
- os: ubuntu-latest
- os: windows-latest
Expand Down
1,019 changes: 565 additions & 454 deletions poetry.lock

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ maintainers = ["GoDaddy <[email protected]>"]
keywords = ["sample size", "experimentation", "power analysis"]

[tool.poetry.dependencies]
python = ">=3.7.1,<3.11"
statsmodels = "^0.13.1"
python = ">=3.8,<3.11"
statsmodels = "^0.14.0"
jsonschema = "^4.5.1"

[tool.poetry.dev-dependencies]
Expand Down
6 changes: 3 additions & 3 deletions sample_size/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def _generate_alt_p_values(
z_alt = stats.norm.rvs(loc=effect_size, size=size, random_state=random_state)
p_values: npt.NDArray[np.float_] = stats.norm.sf(np.abs(z_alt))
if self.alternative == "two-sided":
p_values *= 2
return 2 * p_values
return p_values


Expand Down Expand Up @@ -136,7 +136,7 @@ def _generate_alt_p_values(
p_values: npt.NDArray[np.float_] = stats.t.sf(np.abs(t_alt), 2 * (sample_size - 1))
# Todo: use accurate p-value calculation due to nct's asymmetric distribution
if self.alternative == "two-sided":
p_values *= 2
return 2 * p_values
return p_values


Expand Down Expand Up @@ -186,5 +186,5 @@ def _generate_alt_p_values(
z_alt = stats.norm.rvs(loc=effect_size, size=size, random_state=random_state)
p_values: npt.NDArray[np.float_] = stats.norm.sf(np.abs(z_alt))
if self.alternative == "two-sided":
p_values *= 2
return 2 * p_values
return p_values
2 changes: 1 addition & 1 deletion sample_size/multiple_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def fdr_bh(a: npt.NDArray[np.float_]) -> npt.NDArray[np.bool_]:
for i, m in enumerate(metrics):
p_values.append(m.generate_p_values(true_alt[i], sample_size, random_state))

rejected = np.apply_along_axis(fdr_bh, 0, np.array(p_values)) # type: ignore[no-untyped-call]
rejected = np.apply_along_axis(fdr_bh, 0, np.array(p_values))

true_discoveries = rejected & true_alt

Expand Down
8 changes: 4 additions & 4 deletions tests/sample_size/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def test_boolean_metric_get_probability_too_small(self):
@patch("scipy.stats.norm")
def test_boolean__generate_alt_p_values(self, size, sample_size, alternative, mock_norm, mock_variance):
p_value_generator = mock_norm.sf
p_values = MagicMock()
p_values = ["🏝️", "🏜️", "🌋"]
mock_norm.rvs.return_value = -ord("🌮")
p_value_generator.return_value = p_values
mock_variance.__get__ = MagicMock(return_value=self.DEFAULT_MOCK_VARIANCE)
Expand Down Expand Up @@ -159,7 +159,7 @@ def test_numeric_metric_constructor_sets_params(self):
@patch("scipy.stats.t")
def test_numeric__generate_alt_p_values(self, size, sample_size, alternative, mock_t, mock_nct, mock_variance):
p_value_generator = mock_t.sf
p_values = MagicMock()
p_values = ["🏝️", "🏜️", "🌋"]
mock_nct.rvs.return_value = -ord("🌮")
p_value_generator.return_value = p_values
mock_variance.__get__ = MagicMock(return_value=self.DEFAULT_VARIANCE)
Expand Down Expand Up @@ -226,7 +226,7 @@ def test_ratio_metric_variance(self):
@patch("scipy.stats.norm")
def test_ratio__generate_alt_p_values(self, size, sample_size, alternative, mock_norm, mock_variance):
p_value_generator = mock_norm.sf
p_values = MagicMock()
p_values = ["🏝️", "🏜️", "🌋"]
mock_norm.rvs.return_value = -ord("🌮")
p_value_generator.return_value = p_values
mock_variance.__get__ = MagicMock(return_value=self.DEFAULT_VARIANCE)
Expand All @@ -238,7 +238,7 @@ def test_ratio__generate_alt_p_values(self, size, sample_size, alternative, mock
self.DEFAULT_DENOMINATOR_VARIANCE,
self.DEFAULT_COVARIANCE,
self.DEFAULT_MDE,
self.DEFAULT_ALTERNATIVE,
alternative,
)

p = metric._generate_alt_p_values(size, sample_size, RANDOM_STATE)
Expand Down

0 comments on commit 094cb3e

Please sign in to comment.