1
0
mirror of https://github.com/ilri/csv-metadata-quality.git synced 2024-11-28 16:48:20 +01:00

Compare commits

..

No commits in common. "032a1db3926611ccf7ad73c7374eafe72214c5f6" and "344993370cf6b4b8e3ccb1b73f6b372186517c43" have entirely different histories.

5 changed files with 4 additions and 131 deletions

View File

@ -17,8 +17,6 @@ issues with ftfy
### Added
- Ability to drop invalid AGROVOC values with `-d` when checking AGROVOC values
with `-a <field.name>`
- Ability to add missing UN M.49 regions when both country and region columns
are present. Enable with `-u` (unsafe fixes) for now.
## [0.5.0] - 2021-12-08
### Added

View File

@ -28,7 +28,6 @@ If you use the DSpace CSV metadata quality checker please cite:
- Remove unnecessary Unicode like [non-breaking spaces](https://en.wikipedia.org/wiki/Non-breaking_space), [replacement characters](https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character), etc
- Check for "suspicious" characters that indicate encoding or copy/paste issues, for example "foreˆt" should be "forêt"
- Check for "mojibake" characters (and attempt to fix with `--unsafe-fixes`)
- Check for countries with missing regions (and attempt to fix with `--unsafe-fixes`)
- Remove duplicate metadata values
- Check for duplicate items, using the title, type, and date issued as an indicator
@ -71,7 +70,7 @@ While it is *theoretically* possible for a single `|` character to be used legit
This will also remove unnecessary trailing multi-value separators, for example `Kenya||Tanzania||`.
## Unsafe Fixes
You can enable several "unsafe" fixes with the `--unsafe-fixes` option. Currently this will remove newlines, perform Unicode normalization, attempt to fix "mojibake" characters, and add missing UN M.49 regions.
You can enable several "unsafe" fixes with the `--unsafe-fixes` option. Currently this will remove newlines, perform Unicode normalization, and attempt to fix "mojibake" characters.
### Newlines
This is considered "unsafe" because some systems give special importance to vertical space and render it properly. DSpace does not support rendering newlines in its XMLUI and has, at times, suffered from parsing errors that cause the import process to fail if an input file had newlines. The `--unsafe-fixes` option strips Unix line feeds (U+000A).
@ -92,9 +91,6 @@ Read more about [Unicode normalization](https://withblue.ink/2019/03/11/why-you-
Pay special attention to the output of the script as well as the resulting file to make sure no new issues have been introduced. The ideal way to solve these issues is to avoid it in the first place. See [this guide about opening CSVs in UTF-8 format in Excel](https://www.itg.ias.edu/content/how-import-csv-file-uses-utf-8-character-encoding-0).
### Countries With Missing Regions
When an input file has both country and region columns we can check to see if the ISO 3166 country names have matching UN M.49 regions and add them when they are missing.
## AGROVOC Validation
You can enable validation of metadata values in certain fields against the AGROVOC REST API with the `--agrovoc-fields` option. For example, in addition to agricultural subjects, many countries and regions are also present AGROVOC. Enable this validation by specifying a comma-separated list of fields:

View File

@ -205,23 +205,14 @@ def run(argv):
# Check: title in citation
check.title_in_citation(df_transposed[column])
if args.unsafe_fixes:
# Fix: countries match regions
df_transposed[column] = fix.countries_match_regions(df_transposed[column])
else:
# Check: countries match regions
check.countries_match_regions(df_transposed[column])
# Check: countries match regions
check.countries_match_regions(df_transposed[column])
if args.experimental_checks:
experimental.correct_language(df_transposed[column])
# Transpose the DataFrame back before writing. This is probably wasteful to
# do every time since we technically only need to do it if we've done the
# countries/regions fix above, but I can't think of another way for now.
df_transposed_back = df_transposed.T
# Write
df_transposed_back.to_csv(args.output_file, index=False)
df.to_csv(args.output_file, index=False)
# Close the input and output files before exiting
args.input_file.close()

View File

@ -3,7 +3,6 @@
import re
from unicodedata import normalize
import country_converter as coco
import pandas as pd
from colorama import Fore
from ftfy import TextFixerConfig, fix_text
@ -290,83 +289,3 @@ def mojibake(field, field_name):
return fix_text(field, config)
else:
return field
def countries_match_regions(row):
"""Check for the scenario where an item has country coverage metadata, but
does not have the corresponding region metadata. For example, an item that
has country coverage "Kenya" should also have region "Eastern Africa" acc-
ording to the UN M.49 classification scheme.
See: https://unstats.un.org/unsd/methodology/m49/
Return fixed string.
"""
# Initialize some variables at global scope so that we can set them in the
# loop scope below and still be able to access them afterwards.
country_column_name = ""
region_column_name = ""
title_column_name = ""
# Iterate over the labels of the current row's values to get the names of
# the title and citation columns. Then we check if the title is present in
# the citation.
for label in row.axes[0]:
# Find the name of the country column
match = re.match(r"^.*?country.*$", label)
if match is not None:
country_column_name = label
# Find the name of the region column
match = re.match(r"^.*?region.*$", label)
if match is not None:
region_column_name = label
# Find the name of the title column
match = re.match(r"^(dc|dcterms)\.title.*$", label)
if match is not None:
title_column_name = label
# Make sure we found the country and region columns
if country_column_name != "" and region_column_name != "":
# If we don't have any countries then we should return early before
# suggesting regions.
if row[country_column_name] is not None:
countries = row[country_column_name].split("||")
else:
return
if row[region_column_name] is not None:
regions = row[region_column_name].split("||")
else:
regions = list()
# An empty list for our regions so we can keep track for all countries
missing_regions = list()
for country in countries:
# Look up the UN M.49 regions for this country code. CoCo seems to
# only list the direct region, ie Western Africa, rather than all
# the parent regions ("Sub-Saharan Africa", "Africa", "World")
un_region = coco.convert(names=country, to="UNRegion")
if un_region not in regions:
if un_region not in missing_regions:
missing_regions.append(un_region)
if len(missing_regions) > 0:
for missing_region in missing_regions:
print(
f"{Fore.YELLOW}Adding missing region ({missing_region}): {Fore.RESET}{row[title_column_name]}"
)
# Add the missing regions back to the row, paying attention to whether
# or not the row's regions are blank or not.
if row[region_column_name] is not None:
row[region_column_name] = row[region_column_name] + "||".join(
missing_regions
)
else:
row[region_column_name] = "||".join(missing_regions)
return row

View File

@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-3.0-only
import pandas as pd
import csv_metadata_quality.fix as fix
@ -122,32 +120,3 @@ def test_fix_mojibake():
field_name = "dcterms.isPartOf"
assert fix.mojibake(field, field_name) == "CIAT Publicaçao"
def test_fix_country_not_matching_region():
"""Test an item with regions not matching its country list."""
title = "Testing an item with no matching region."
country = "Kenya"
region = ""
missing_region = "Eastern Africa"
# Emulate a column in a transposed dataframe (which is just a series)
d = {
"dc.title": title,
"cg.coverage.country": country,
"cg.coverage.region": region,
}
series = pd.Series(data=d)
result = fix.countries_match_regions(series)
# Emulate the correct series we are expecting
d_correct = {
"dc.title": title,
"cg.coverage.country": country,
"cg.coverage.region": missing_region,
}
series_correct = pd.Series(data=d_correct)
pd.testing.assert_series_equal(result, series_correct)