X-Git-Url: https://code.communitydata.science/covid19.git/blobdiff_plain/ff96d52cb92966dd0c487e6452aeab70773cf3e6..576d882c04b4cc784cd5a6296cb3c3e5aa596d39:/wikipedia/example_analysis/pageview_example.R diff --git a/wikipedia/example_analysis/pageview_example.R b/wikipedia/example_analysis/pageview_example.R new file mode 100644 index 0000000..fb5359a --- /dev/null +++ b/wikipedia/example_analysis/pageview_example.R @@ -0,0 +1,52 @@ +### COVID-19 Digital Observatory +### 2020-03-28 +### +### Minimal example analysis file using pageview data + +library(tidyverse) +library(scales) + +### Import and cleanup one datafile from the observatory + +DataURL <- + url("https://covid19.communitydata.science/datasets/wikipedia/digobs_covid19-wikipedia-enwiki_dailyviews-20200401.tsv") + +views <- + read.table(DataURL, sep="\t", header=TRUE, stringsAsFactors=FALSE) + +### Alternatively, uncomment and run if working locally with full git +### tree +### +### Identify data source directory and file +## DataDir <- ("../data/") +## DataFile <- ("dailyviews2020032600.tsv") + +## related.searches.top <- read.table(paste(DataDir,DataFile, sep=""), +## sep="\t", header=TRUE, +## stringsAsFactors=FALSE) + +### Cleanup and do the grouping with functions from the Tidyverse +### (see https://www.tidyverse.org for more info) + +views <- views[,c("article", "project", "timestamp", "views")] +views$timestamp <- fct_explicit_na(views$timestamp) + + +### Sorts and groups at the same time +views.by.proj.date <- arrange(group_by(views, project, timestamp), + desc(views)) + + +### Export just the top 10 by pageviews +write.table(head(views.by.proj.date, 10), + file="output/top10_views_by_project_date.csv", sep=",", + row.names=FALSE) + +### A simple visualization +p <- ggplot(data=views.by.proj.date, aes(views)) + +## Density plot with log-transformed axis +p + geom_density() + scale_x_log10(labels=comma) + + +