diff --git a/scoping_review.qmd b/scoping_review.qmd index c2a3609..b79d593 100644 --- a/scoping_review.qmd +++ b/scoping_review.qmd @@ -609,7 +609,7 @@ def strength_for(val): findings_institutional = pd.read_csv("02-data/supplementary/findings-institutional.csv") fd_df = validity.add_to_findings(findings_institutional, by_intervention, study_strength_bins) -md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers="keys", tablefmt="grid")) +md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) ``` Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding. @@ -840,12 +840,14 @@ One limitation of the study is the modelling assumption that workers will have t from src.model import validity findings_structural = pd.read_csv("02-data/supplementary/findings-structural.csv") -fd_df = validity.add_to_findings(findings_structural, by_intervention) +fd_df = validity.add_to_findings(findings_structural, by_intervention, study_strength_bins) -md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers="keys", tablefmt="grid")) +md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) ``` -Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding. Validities are binned to a weak (-) evidence base up to a validity rank of 2.9, evidential (+) between 3.0 and 5.9 and strong evidence base (++) above 6.0. +Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding. +Validities are segmented to a weak (-) evidence base under a validity ranking of `{python} strength_for(r"\+")`, +evidential (+) from `{python} strength_for(r"\+")` and under `{python} strength_for(r"\++")` and strong evidence base (++) for `{python} strength_for(r"\++")` and above. Summary of main findings for structural policies @@ -1075,12 +1077,14 @@ Though the intervention clearly aims at strengthening some aspect of individual from src.model import validity findings_agency = pd.read_csv("02-data/supplementary/findings-agency.csv") -fd_df = validity.add_to_findings(findings_agency, by_intervention) +fd_df = validity.add_to_findings(findings_agency, by_intervention, study_strength_bins) -md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers="keys", tablefmt="grid")) +md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) ``` -Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding. Validities are binned to a weak (-) evidence base up to a validity rank of 2.9, evidential (+) between 3.0 and 5.9 and strong evidence base (++) above 6.0. +Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding. +Validities are segmented to a weak (-) evidence base under a validity ranking of `{python} strength_for(r"\+")`, +evidential (+) from `{python} strength_for(r"\+")` and under `{python} strength_for(r"\++")` and strong evidence base (++) for `{python} strength_for(r"\++")` and above. Summary of main findings for agency-based policies @@ -1225,6 +1229,20 @@ def regions_for_inequality(df, inequality:str): return sns.countplot(df_temp, x="region", order=df_temp["region"].value_counts().index) ``` +```{python} +#| label: fig-validity +from src.model import validity + +validities = validity.calculate(by_intervention) +validities["identifier"] = validities["author"].str.replace(r',.*$', '', regex=True) + " (" + validities["year"].astype(str) + ")" + +g = sns.PairGrid(validities[["internal_validity", "external_validity", "identifier"]].drop_duplicates(subset="identifier"), + x_vars=["internal_validity", "external_validity"], y_vars = ["identifier"] + ) + +# Create a stacked histplot using Seaborn +#sns.scatterplot(data=validities, x='external_validity', y='internal_validity', hue='intervention') +``` Policy interventions undertaken either with the explicit aim of reducing one or multiple inequalities, or analysed under the lens of such an aim implicitly, appear in a wide array of variations to their approach and primary targeted inequality, as was highlighted in the previous section. To make further sense of the studies shining a light on such approaches, it makes sense to divide their attention not just by primary approach, but by individual or overlapping inequalities being targeted, as well as the region of their operation.