]> code.communitydata.science - covid19.git/commitdiff
revisions to reflect updated example filename and clean comments in R code
authoraaronshaw <aaron.d.shaw@gmail.com>
Thu, 2 Apr 2020 18:38:32 +0000 (13:38 -0500)
committeraaronshaw <aaron.d.shaw@gmail.com>
Thu, 2 Apr 2020 18:38:32 +0000 (13:38 -0500)
wikipedia/example_analysis/output/top10_views_by_project_date.csv
wikipedia/example_analysis/pageview_example.R

index ce7eb5ef135eba3ee5405ef3b1f4b401dc3ed277..f4975846e2924afaba80879e8c877c940aae1cc6 100644 (file)
@@ -1,11 +1,11 @@
 "article","project","timestamp","views"
-"2019–20_coronavirus_pandemic","en.wikipedia","2020033100",831879
-"2020_coronavirus_pandemic_in_India","en.wikipedia","2020033100",323123
-"2019–20_coronavirus_pandemic_by_country_and_territory","en.wikipedia","2020033100",315572
-"2020_coronavirus_pandemic_in_the_United_States","en.wikipedia","2020033100",290535
-"Coronavirus_disease_2019","en.wikipedia","2020033100",211391
-"2020_coronavirus_pandemic_in_Italy","en.wikipedia","2020033100",209908
-"Coronavirus","en.wikipedia","2020033100",188921
-"USNS_Comfort_(T-AH-20)","en.wikipedia","2020033100",150422
-"USNS_Comfort_(T-AH-20)","en.wikipedia","2020033100",150422
-"WrestleMania_36","en.wikipedia","2020033100",137637
+"Charles,_Prince_of_Wales","en.wikipedia","2020010100",32880
+"Tom_Hanks","en.wikipedia","2020010100",23586
+"Boris_Johnson","en.wikipedia","2020010100",12974
+"Eurovision_Song_Contest_2020","en.wikipedia","2020010100",7901
+"Mike_Pence","en.wikipedia","2020010100",4088
+"Olga_Kurylenko","en.wikipedia","2020010100",3653
+"WrestleMania_36","en.wikipedia","2020010100",3484
+"World_Health_Organization","en.wikipedia","2020010100",3002
+"Severe_acute_respiratory_syndrome","en.wikipedia","2020010100",2037
+"Centers_for_Disease_Control_and_Prevention","en.wikipedia","2020010100",909
index fb5359aa64c856bec978b1a5bcfc9712de82c907..8d40f00719ad29054c3f3cc841ab2f2c300f16fd 100644 (file)
@@ -9,34 +9,22 @@ library(scales)
 ### Import and cleanup one datafile from the observatory
 
 DataURL <-
-    url("https://covid19.communitydata.science/datasets/wikipedia/digobs_covid19-wikipedia-enwiki_dailyviews-20200401.tsv")
+    url("https://covid19.communitydata.science/datasets/wikipedia/digobs_covid19-wikipedia-enwiki_dailyviews-20200101.tsv")
 
 views <-
     read.table(DataURL, sep="\t", header=TRUE, stringsAsFactors=FALSE) 
 
-### Alternatively, uncomment and run if working locally with full git
-### tree
-###
-### Identify data source directory and file
-## DataDir <- ("../data/")
-## DataFile <- ("dailyviews2020032600.tsv")
-
-## related.searches.top <- read.table(paste(DataDir,DataFile, sep=""),
-##                                   sep="\t", header=TRUE,
-##                                   stringsAsFactors=FALSE)
-
 ### Cleanup and do the grouping with functions from the Tidyverse
 ### (see https://www.tidyverse.org for more info)
 
 views <- views[,c("article", "project", "timestamp", "views")]
-views$timestamp <- fct_explicit_na(views$timestamp)
+views$timestamp <- fct_explicit_na(as.character(views$timestamp))
 
 
 ### Sorts and groups at the same time
 views.by.proj.date <- arrange(group_by(views, project, timestamp),
                         desc(views))
 
-
 ### Export just the top 10 by pageviews
 write.table(head(views.by.proj.date, 10),
             file="output/top10_views_by_project_date.csv", sep=",",

Community Data Science Collective || Want to submit a patch?