diff --git a/01-codechunks/_prep-data.py b/01-codechunks/_prep-data.py index bfb1c02..0ad0d61 100644 --- a/01-codechunks/_prep-data.py +++ b/01-codechunks/_prep-data.py @@ -2,7 +2,7 @@ from pathlib import Path import os import re ## standard imports -from IPython.core.display import Markdown as md +from IPython.display import display, Markdown, HTML import numpy as np import pandas as pd from matplotlib import pyplot as plt diff --git a/article.qmd b/article.qmd index 5035f70..933f43d 100644 --- a/article.qmd +++ b/article.qmd @@ -90,7 +90,7 @@ with a focus on the narrowing criteria specified in @tbl-inclusion-criteria. ```{python} inclusion_criteria = pd.read_csv("02-data/supplementary/inclusion-criteria.tsv", sep="\t") -md(tabulate(inclusion_criteria, showindex=False, headers="keys", tablefmt="grid")) +Markdown(tabulate(inclusion_criteria, showindex=False, headers="keys", tablefmt="grid")) ``` Source: Author's elaboration @@ -211,7 +211,7 @@ def strength_for(val): findings_institutional = pd.read_csv("02-data/supplementary/findings-institutional.csv") -outp = md( +outp = Markdown( tabulate( validity.add_to_findings( findings_institutional, df_by_intervention, study_strength_bins @@ -680,7 +680,7 @@ Another reason could be the actual implementation of different policy programmes ```{python} terms_wow = pd.read_csv("02-data/supplementary/terms_wow.csv") -md(tabulate(terms_wow.fillna(""), showindex=False, headers="keys", tablefmt="grid")) +Markdown(tabulate(terms_wow.fillna(""), showindex=False, headers="keys", tablefmt="grid")) ``` World of work term cluster @@ -693,7 +693,7 @@ World of work term cluster terms_policy = pd.read_csv("02-data/supplementary/terms_policy.csv") # different headers to include 'social norms' headers = ["General", "Institutional", "Structural", "Agency & social norms"] -md(tabulate(terms_policy.fillna(""), showindex=False, headers=headers, tablefmt="grid")) +Markdown(tabulate(terms_policy.fillna(""), showindex=False, headers=headers, tablefmt="grid")) ``` Policy intervention term cluster @@ -704,7 +704,7 @@ Policy intervention term cluster ```{python} terms_inequality = pd.read_csv("02-data/supplementary/terms_inequality.csv") -md(tabulate(terms_inequality.fillna(""), showindex=False, headers="keys", tablefmt="grid")) +Markdown(tabulate(terms_inequality.fillna(""), showindex=False, headers="keys", tablefmt="grid")) ``` Inequality term cluster diff --git a/scoping_review.qmd b/scoping_review.qmd index 46ece2f..4850557 100644 --- a/scoping_review.qmd +++ b/scoping_review.qmd @@ -290,7 +290,7 @@ with the search query requiring a term from the general column and one other col #| label: tbl-wow-terms #| tbl-cap: World of work term cluster terms_wow = pd.read_csv("02-data/supplementary/terms_wow.csv") -md(tabulate(terms_wow.fillna(""), showindex=False, headers="keys", tablefmt="grid")) +Markdown(tabulate(terms_wow.fillna(""), showindex=False, headers="keys", tablefmt="grid")) ``` The world of work cluster, like the inequality and policy intervention clusters below, is made up of a general signifier (such as "work", "inequality" or "intervention") which has to be labelled in a study to form part of the sample, @@ -308,7 +308,7 @@ For the database query, a single term from the general category is required to b terms_policy = pd.read_csv("02-data/supplementary/terms_policy.csv") # different headers to include 'social norms' headers = ["General", "Institutional", "Structural", "Agency & social norms"] -md(tabulate(terms_policy.fillna(""), showindex=False, headers=headers, tablefmt="grid")) +Markdown(tabulate(terms_policy.fillna(""), showindex=False, headers=headers, tablefmt="grid")) ``` Lastly, the inequality cluster is once again made up of a general term describing inequality which has to form part of the query results, as well as at least one term describing a specific vertical or horizontal inequality, @@ -318,7 +318,7 @@ as seen in @tbl-inequality-terms. #| label: tbl-inequality-terms #| tbl-cap: Inequality term cluster terms_inequality = pd.read_csv("02-data/supplementary/terms_inequality.csv") -md(tabulate(terms_inequality.fillna(""), showindex=False, headers="keys", tablefmt="grid")) +Markdown(tabulate(terms_inequality.fillna(""), showindex=False, headers="keys", tablefmt="grid")) ``` A general as well as category-specific term from each cluster will be required, using a intersection merge (Boolean 'AND'), @@ -342,7 +342,7 @@ with a focus on the narrowing criteria specified in @tbl-inclusion-criteria. #| label: inclusion-criteria inclusion_criteria = pd.read_csv("02-data/supplementary/inclusion-criteria.tsv", sep="\t") -md(tabulate(inclusion_criteria, showindex=False, headers="keys", tablefmt="grid")) +Markdown(tabulate(inclusion_criteria, showindex=False, headers="keys", tablefmt="grid")) ``` Source: Author's elaboration @@ -543,7 +543,7 @@ def strength_for(val): findings_institutional = pd.read_csv("02-data/supplementary/findings-institutional.csv") fd_df = validity.add_to_findings(findings_institutional, by_intervention, study_strength_bins) -md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) +Markdown(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) ``` Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding. @@ -775,7 +775,7 @@ from src.model import validity findings_structural = pd.read_csv("02-data/supplementary/findings-structural.csv") fd_df = validity.add_to_findings(findings_structural, by_intervention, study_strength_bins) -md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) +Markdown(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) ``` Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding. @@ -1012,7 +1012,7 @@ from src.model import validity findings_agency = pd.read_csv("02-data/supplementary/findings-agency.csv") fd_df = validity.add_to_findings(findings_agency, by_intervention, study_strength_bins) -md(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) +Markdown(tabulate(fd_df[["area of policy", "internal_validity", "external_validity", "findings", "channels"]].fillna(""), showindex=False, headers=["area of policy", "internal strength", "external strength", "main findings", "channels"], tablefmt="grid")) ``` Note: Each main finding is presented with an internal strength of evidence and an external strength of evidence which describe the combined validities of the evidence base for the respective finding.