]> code.communitydata.science - covid19.git/blobdiff - transliterations/src/wikidata_transliterations.py
added initial version of revision-scraper
[covid19.git] / transliterations / src / wikidata_transliterations.py
index d878354876bc10fd229fc1bd0959c2518f3fe2e1..1ac956c749f0a774a3605faa2ac92db2d33a5ff2 100644 (file)
@@ -2,6 +2,7 @@ from wikidata_api_calls import run_sparql_query
 from itertools import chain, islice
 import csv
 from json import JSONDecodeError
+from os import path
 
 class LabelData:
     __slots__ = ['entityid','label','langcode','is_alt']
@@ -23,7 +24,7 @@ def GetAllLabels(in_csvs, outfile, topNs):
 
     def load_entity_ids(in_csv, topN=5):
         with open(in_csv,'r',newline='') as infile:
-            reader = csv.DictReader(infile)
+            reader = list(csv.DictReader(infile))
             for row in reader:
                 if int(row['search_position']) < topN:
                     yield row["entityid"]
@@ -84,6 +85,14 @@ def GetEntityLabels(entityids):
     return chain(*calls)
         
 
+def find_new_output_file(output, i = 1):
+    if path.exists(output):
+        name, ext = path.splitext(output)
+
+        return find_new_output_file(f"{name}_{i}.{ext}", i+1)
+    else:
+        return output
+
 if __name__ == "__main__":
     import argparse
     parser = argparse.ArgumentParser("Use wikidata to find transliterations of terms")
@@ -93,4 +102,6 @@ if __name__ == "__main__":
 
     args = parser.parse_args()
 
-    GetAllLabels(args.inputs, args.output, topNs=args.topN)
+    output = find_new_output_file(args.output)
+
+    GetAllLabels(args.inputs, output, topNs=args.topN)

Community Data Science Collective || Want to submit a patch?