From 8333bbe9bef7d02b1964b188bc5636cc1eea5ef7 Mon Sep 17 00:00:00 2001 From: Marty Oehme Date: Fri, 16 Feb 2024 10:03:10 +0100 Subject: [PATCH] refactor(code): Rename validity module From strength of findings to the more general validity module, which can then in turn contain the 'add_to_findings' function which unsurprisingly adds validities to findings. Makes more sense to me. --- scoping_review.qmd | 12 ++++++------ src/model/{strength_of_findings.py => validity.py} | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) rename src/model/{strength_of_findings.py => validity.py} (94%) diff --git a/scoping_review.qmd b/scoping_review.qmd index 9621874..e49e2ac 100644 --- a/scoping_review.qmd +++ b/scoping_review.qmd @@ -634,10 +634,10 @@ g = sns.PairGrid(validities[["internal_validity", "external_validity", "identifi ```{python} #| label: tbl-findings-institutional -from src.model import strength_of_findings as findings +from src.model import validity findings_institutional = pd.read_csv("02-data/supplementary/findings-institutional.csv") -fd_df = findings.add_validities(findings_institutional, by_intervention) +fd_df = validity.add_to_findings(findings_institutional, by_intervention) md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers="keys", tablefmt="grid")) ``` @@ -865,10 +865,10 @@ One limitation of the study is the modelling assumption that workers will have t ```{python} #| label: tbl-findings-structural -from src.model import strength_of_findings as findings +from src.model import validity findings_structural = pd.read_csv("02-data/supplementary/findings-structural.csv") -fd_df = findings.add_validities(findings_structural, by_intervention) +fd_df = validity.add_to_findings(findings_structural, by_intervention) md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers="keys", tablefmt="grid")) ``` @@ -1100,10 +1100,10 @@ Though the intervention clearly aims at strengthening some aspect of individual ```{python} #| label: tbl-findings-agency -from src.model import strength_of_findings as findings +from src.model import validity findings_agency = pd.read_csv("02-data/supplementary/findings-agency.csv") -fd_df = findings.add_validities(findings_agency, by_intervention) +fd_df = validity.add_to_findings(findings_agency, by_intervention) md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers="keys", tablefmt="grid")) ``` diff --git a/src/model/strength_of_findings.py b/src/model/validity.py similarity index 94% rename from src/model/strength_of_findings.py rename to src/model/validity.py index f078cd6..946e05f 100644 --- a/src/model/strength_of_findings.py +++ b/src/model/validity.py @@ -28,7 +28,7 @@ def _combined_validities( return r"\-" -def add_validities( +def add_to_findings( findings_df: DataFrame, studies_by_intervention: DataFrame ) -> DataFrame: valid_subset = ( @@ -36,7 +36,7 @@ def add_validities( ["internal_validity", "external_validity", "citation"] ] .fillna(1.0) - .drop_duplicates(subset=["citation"]) + .drop_duplicates(subset=["citation"]) # type: ignore .sort_values("internal_validity") )