X-Git-Url: https://code.communitydata.science/covid19.git/blobdiff_plain/4f8a698c62f878e248dc1c4d4bb8e048a79fb661..282588772e99e7df51523928c364247e9ad5a54b:/wikipedia_views/analysis/pageview_example.R diff --git a/wikipedia_views/analysis/pageview_example.R b/wikipedia_views/analysis/pageview_example.R index 8a7aba3..fb5359a 100644 --- a/wikipedia_views/analysis/pageview_example.R +++ b/wikipedia_views/analysis/pageview_example.R @@ -4,13 +4,12 @@ ### Minimal example analysis file using pageview data library(tidyverse) -library(ggplot2) library(scales) -### Import and cleanup data +### Import and cleanup one datafile from the observatory DataURL <- - url("https://github.com/CommunityDataScienceCollective/COVID-19_Digital_Observatory/raw/master/wikipedia_views/data/dailyviews2020032600.tsv") + url("https://covid19.communitydata.science/datasets/wikipedia/digobs_covid19-wikipedia-enwiki_dailyviews-20200401.tsv") views <- read.table(DataURL, sep="\t", header=TRUE, stringsAsFactors=FALSE) @@ -30,12 +29,14 @@ views <- ### (see https://www.tidyverse.org for more info) views <- views[,c("article", "project", "timestamp", "views")] -views$timestamp <- factor(views$timestamp) +views$timestamp <- fct_explicit_na(views$timestamp) + ### Sorts and groups at the same time views.by.proj.date <- arrange(group_by(views, project, timestamp), desc(views)) + ### Export just the top 10 by pageviews write.table(head(views.by.proj.date, 10), file="output/top10_views_by_project_date.csv", sep=",",