2021-03-19 15:04:13 +01:00
|
|
|
# SPDX-License-Identifier: GPL-3.0-only
|
|
|
|
|
2022-09-01 14:41:21 +02:00
|
|
|
import logging
|
2019-07-26 21:11:10 +02:00
|
|
|
import re
|
2020-10-06 16:11:39 +02:00
|
|
|
from unicodedata import normalize
|
2019-07-26 21:11:10 +02:00
|
|
|
|
2022-07-28 15:52:43 +02:00
|
|
|
import country_converter as coco
|
2019-08-29 00:15:04 +02:00
|
|
|
import pandas as pd
|
2021-02-21 12:01:25 +01:00
|
|
|
from colorama import Fore
|
2021-12-15 22:15:02 +01:00
|
|
|
from ftfy import TextFixerConfig, fix_text
|
2019-08-29 00:15:04 +02:00
|
|
|
|
2021-03-19 09:22:21 +01:00
|
|
|
from csv_metadata_quality.util import is_mojibake, is_nfc
|
2020-10-06 16:11:39 +02:00
|
|
|
|
2019-07-28 16:47:28 +02:00
|
|
|
|
2020-01-16 11:35:11 +01:00
|
|
|
def whitespace(field, field_name):
|
2019-07-26 18:08:28 +02:00
|
|
|
"""Fix whitespace issues.
|
|
|
|
|
|
|
|
Return string with leading, trailing, and consecutive whitespace trimmed.
|
|
|
|
"""
|
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
2019-07-26 18:08:28 +02:00
|
|
|
return
|
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Initialize an empty list to hold the cleaned values
|
2023-11-22 19:54:50 +01:00
|
|
|
values = []
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Try to split multi-value field on "||" separator
|
2019-08-29 00:10:39 +02:00
|
|
|
for value in field.split("||"):
|
2019-07-26 18:31:55 +02:00
|
|
|
# Strip leading and trailing whitespace
|
|
|
|
value = value.strip()
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Replace excessive whitespace (>2) with one space
|
2019-08-29 00:10:39 +02:00
|
|
|
pattern = re.compile(r"\s{2,}")
|
2019-07-26 18:31:55 +02:00
|
|
|
match = re.findall(pattern, value)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-29 15:16:30 +02:00
|
|
|
if match:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(
|
|
|
|
f"{Fore.GREEN}Removing excessive whitespace ({field_name}): {Fore.RESET}{value}"
|
|
|
|
)
|
2019-08-29 00:10:39 +02:00
|
|
|
value = re.sub(pattern, " ", value)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Save cleaned value
|
|
|
|
values.append(value)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Create a new field consisting of all values joined with "||"
|
2019-08-29 00:10:39 +02:00
|
|
|
new_field = "||".join(values)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
return new_field
|
2019-07-28 21:53:39 +02:00
|
|
|
|
|
|
|
|
2020-01-16 11:35:11 +01:00
|
|
|
def separators(field, field_name):
|
2021-01-03 14:30:03 +01:00
|
|
|
"""Fix for invalid and unnecessary multi-value separators, for example:
|
|
|
|
|
|
|
|
value|value
|
|
|
|
value|||value
|
|
|
|
value||value||
|
|
|
|
|
|
|
|
Prints the field with the invalid multi-value separator.
|
|
|
|
"""
|
2019-07-28 21:53:39 +02:00
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Initialize an empty list to hold the cleaned values
|
2023-11-22 19:54:50 +01:00
|
|
|
values = []
|
2019-07-28 21:53:39 +02:00
|
|
|
|
|
|
|
# Try to split multi-value field on "||" separator
|
2019-08-29 00:10:39 +02:00
|
|
|
for value in field.split("||"):
|
2021-01-03 14:30:03 +01:00
|
|
|
# Check if the value is blank and skip it
|
|
|
|
if value == "":
|
2021-02-21 12:01:25 +01:00
|
|
|
print(
|
|
|
|
f"{Fore.GREEN}Fixing unnecessary multi-value separator ({field_name}): {Fore.RESET}{field}"
|
|
|
|
)
|
2021-01-03 14:30:03 +01:00
|
|
|
|
|
|
|
continue
|
|
|
|
|
2019-07-28 21:53:39 +02:00
|
|
|
# After splitting, see if there are any remaining "|" characters
|
2019-08-29 00:10:39 +02:00
|
|
|
pattern = re.compile(r"\|")
|
2019-07-28 21:53:39 +02:00
|
|
|
match = re.findall(pattern, value)
|
|
|
|
|
2019-07-29 15:16:30 +02:00
|
|
|
if match:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(
|
2021-03-11 10:47:24 +01:00
|
|
|
f"{Fore.GREEN}Fixing invalid multi-value separator ({field_name}): {Fore.RESET}{value}"
|
2021-02-21 12:01:25 +01:00
|
|
|
)
|
2019-07-28 21:53:39 +02:00
|
|
|
|
2019-08-29 00:10:39 +02:00
|
|
|
value = re.sub(pattern, "||", value)
|
2019-07-28 21:53:39 +02:00
|
|
|
|
|
|
|
# Save cleaned value
|
|
|
|
values.append(value)
|
|
|
|
|
|
|
|
# Create a new field consisting of all values joined with "||"
|
2019-08-29 00:10:39 +02:00
|
|
|
new_field = "||".join(values)
|
2019-07-28 21:53:39 +02:00
|
|
|
|
|
|
|
return new_field
|
2019-07-29 15:38:10 +02:00
|
|
|
|
|
|
|
|
|
|
|
def unnecessary_unicode(field):
|
2019-08-10 23:07:21 +02:00
|
|
|
"""Remove and replace unnecessary Unicode characters.
|
2019-07-29 15:38:10 +02:00
|
|
|
|
|
|
|
Removes unnecessary Unicode characters like:
|
|
|
|
- Zero-width space (U+200B)
|
|
|
|
- Replacement character (U+FFFD)
|
|
|
|
|
2019-08-10 23:07:21 +02:00
|
|
|
Replaces unnecessary Unicode characters like:
|
|
|
|
- Soft hyphen (U+00AD) → hyphen
|
2019-10-01 15:55:04 +02:00
|
|
|
- No-break space (U+00A0) → space
|
2021-12-09 22:22:53 +01:00
|
|
|
- Thin space (U+2009) → space
|
2019-08-10 23:07:21 +02:00
|
|
|
|
|
|
|
Return string with characters removed or replaced.
|
2019-07-29 15:38:10 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check for zero-width space characters (U+200B)
|
2019-08-29 00:10:39 +02:00
|
|
|
pattern = re.compile(r"\u200B")
|
2019-07-29 15:38:10 +02:00
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(f"{Fore.GREEN}Removing unnecessary Unicode (U+200B): {Fore.RESET}{field}")
|
2019-08-29 00:10:39 +02:00
|
|
|
field = re.sub(pattern, "", field)
|
2019-07-29 15:38:10 +02:00
|
|
|
|
|
|
|
# Check for replacement characters (U+FFFD)
|
2019-08-29 00:10:39 +02:00
|
|
|
pattern = re.compile(r"\uFFFD")
|
2019-07-29 15:38:10 +02:00
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(f"{Fore.GREEN}Removing unnecessary Unicode (U+FFFD): {Fore.RESET}{field}")
|
2019-08-29 00:10:39 +02:00
|
|
|
field = re.sub(pattern, "", field)
|
2019-07-29 15:38:10 +02:00
|
|
|
|
|
|
|
# Check for no-break spaces (U+00A0)
|
2019-08-29 00:10:39 +02:00
|
|
|
pattern = re.compile(r"\u00A0")
|
2019-07-29 15:38:10 +02:00
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(
|
|
|
|
f"{Fore.GREEN}Replacing unnecessary Unicode (U+00A0): {Fore.RESET}{field}"
|
|
|
|
)
|
2019-10-01 15:55:04 +02:00
|
|
|
field = re.sub(pattern, " ", field)
|
2019-07-29 15:38:10 +02:00
|
|
|
|
2019-08-10 23:07:21 +02:00
|
|
|
# Check for soft hyphens (U+00AD), sometimes preceeded with a normal hyphen
|
2019-08-29 00:10:39 +02:00
|
|
|
pattern = re.compile(r"\u002D*?\u00AD")
|
2019-08-10 23:07:21 +02:00
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(
|
|
|
|
f"{Fore.GREEN}Replacing unnecessary Unicode (U+00AD): {Fore.RESET}{field}"
|
|
|
|
)
|
2019-08-29 00:10:39 +02:00
|
|
|
field = re.sub(pattern, "-", field)
|
2019-08-10 23:07:21 +02:00
|
|
|
|
2021-12-09 22:22:53 +01:00
|
|
|
# Check for thin spaces (U+2009)
|
|
|
|
pattern = re.compile(r"\u2009")
|
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
print(
|
|
|
|
f"{Fore.GREEN}Replacing unnecessary Unicode (U+2009): {Fore.RESET}{field}"
|
|
|
|
)
|
|
|
|
field = re.sub(pattern, " ", field)
|
|
|
|
|
2019-07-29 15:38:10 +02:00
|
|
|
return field
|
2019-07-29 17:05:03 +02:00
|
|
|
|
|
|
|
|
2020-01-16 11:35:11 +01:00
|
|
|
def duplicates(field, field_name):
|
2019-07-29 17:05:03 +02:00
|
|
|
"""Remove duplicate metadata values."""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Try to split multi-value field on "||" separator
|
2019-08-29 00:10:39 +02:00
|
|
|
values = field.split("||")
|
2019-07-29 17:05:03 +02:00
|
|
|
|
|
|
|
# Initialize an empty list to hold the de-duplicated values
|
2023-11-22 19:54:50 +01:00
|
|
|
new_values = []
|
2019-07-29 17:05:03 +02:00
|
|
|
|
|
|
|
# Iterate over all values
|
|
|
|
for value in values:
|
|
|
|
# Check if each value exists in our list of values already
|
|
|
|
if value not in new_values:
|
|
|
|
new_values.append(value)
|
|
|
|
else:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(
|
|
|
|
f"{Fore.GREEN}Removing duplicate value ({field_name}): {Fore.RESET}{value}"
|
|
|
|
)
|
2019-07-29 17:05:03 +02:00
|
|
|
|
|
|
|
# Create a new field consisting of all values joined with "||"
|
2019-08-29 00:10:39 +02:00
|
|
|
new_field = "||".join(new_values)
|
2019-07-29 17:05:03 +02:00
|
|
|
|
|
|
|
return new_field
|
2019-07-30 19:05:12 +02:00
|
|
|
|
|
|
|
|
2021-10-08 13:36:23 +02:00
|
|
|
def newlines(field, field_name):
|
2019-07-30 19:05:12 +02:00
|
|
|
"""Fix newlines.
|
|
|
|
|
|
|
|
Single metadata values should not span multiple lines because this is not
|
|
|
|
rendered properly in DSpace's XMLUI and even causes issues during import.
|
|
|
|
|
|
|
|
Implementation note: this currently only detects Unix line feeds (0x0a).
|
|
|
|
This is essentially when a user presses "Enter" to move to the next line.
|
|
|
|
Other newlines like the Windows carriage return are already handled with
|
|
|
|
the string stipping performed in the whitespace fixes.
|
|
|
|
|
|
|
|
Confusingly, in Vim '\n' matches a line feed when searching, but you must
|
|
|
|
use '\r' to *insert* a line feed, ie in a search and replace expression.
|
|
|
|
|
|
|
|
Return string with newlines removed.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check for Unix line feed (LF)
|
2019-08-29 00:10:39 +02:00
|
|
|
match = re.findall(r"\n", field)
|
2019-07-30 19:05:12 +02:00
|
|
|
|
|
|
|
if match:
|
2021-10-08 13:36:23 +02:00
|
|
|
print(f"{Fore.GREEN}Removing newline ({field_name}): {Fore.RESET}{field}")
|
2019-08-29 00:10:39 +02:00
|
|
|
field = field.replace("\n", "")
|
2019-07-30 19:05:12 +02:00
|
|
|
|
|
|
|
return field
|
2019-08-27 23:05:52 +02:00
|
|
|
|
|
|
|
|
|
|
|
def comma_space(field, field_name):
|
|
|
|
"""Fix occurrences of commas missing a trailing space, for example:
|
|
|
|
|
|
|
|
Orth,Alan S.
|
|
|
|
|
|
|
|
This is a very common mistake in author and citation fields.
|
|
|
|
|
|
|
|
Return string with a space added.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check for comma followed by a word character
|
2019-08-29 00:10:39 +02:00
|
|
|
match = re.findall(r",\w", field)
|
2019-08-27 23:05:52 +02:00
|
|
|
|
|
|
|
if match:
|
2021-02-21 12:01:25 +01:00
|
|
|
print(
|
|
|
|
f"{Fore.GREEN}Adding space after comma ({field_name}): {Fore.RESET}{field}"
|
|
|
|
)
|
2019-08-29 00:10:39 +02:00
|
|
|
field = re.sub(r",(\w)", r", \1", field)
|
2019-08-27 23:05:52 +02:00
|
|
|
|
|
|
|
return field
|
2020-01-15 10:37:54 +01:00
|
|
|
|
|
|
|
|
|
|
|
def normalize_unicode(field, field_name):
|
|
|
|
"""Fix occurrences of decomposed Unicode characters by normalizing them
|
|
|
|
with NFC to their canonical forms, for example:
|
|
|
|
|
|
|
|
Ouédraogo, Mathieu → Ouédraogo, Mathieu
|
|
|
|
|
|
|
|
Return normalized string.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check if the current string is using normalized Unicode (NFC)
|
2020-01-15 11:17:52 +01:00
|
|
|
if not is_nfc(field):
|
2021-02-21 12:01:25 +01:00
|
|
|
print(f"{Fore.GREEN}Normalizing Unicode ({field_name}): {Fore.RESET}{field}")
|
2020-01-15 10:37:54 +01:00
|
|
|
field = normalize("NFC", field)
|
|
|
|
|
|
|
|
return field
|
2021-03-19 09:22:21 +01:00
|
|
|
|
|
|
|
|
|
|
|
def mojibake(field, field_name):
|
|
|
|
"""Attempts to fix mojibake (text that was encoded in one encoding and deco-
|
|
|
|
ded in another, perhaps multiple times). See util.py.
|
|
|
|
|
|
|
|
Return fixed string.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return field
|
|
|
|
|
2021-12-15 20:51:51 +01:00
|
|
|
# We don't want ftfy to change “smart quotes” to "ASCII quotes"
|
|
|
|
config = TextFixerConfig(uncurl_quotes=False)
|
|
|
|
|
2021-03-19 09:22:21 +01:00
|
|
|
if is_mojibake(field):
|
|
|
|
print(f"{Fore.GREEN}Fixing encoding issue ({field_name}): {Fore.RESET}{field}")
|
|
|
|
|
2021-12-15 20:51:51 +01:00
|
|
|
return fix_text(field, config)
|
2021-03-19 09:22:21 +01:00
|
|
|
else:
|
|
|
|
return field
|
2022-07-28 15:52:43 +02:00
|
|
|
|
|
|
|
|
2022-09-02 14:59:22 +02:00
|
|
|
def countries_match_regions(row, exclude):
|
2022-07-28 15:52:43 +02:00
|
|
|
"""Check for the scenario where an item has country coverage metadata, but
|
|
|
|
does not have the corresponding region metadata. For example, an item that
|
|
|
|
has country coverage "Kenya" should also have region "Eastern Africa" acc-
|
|
|
|
ording to the UN M.49 classification scheme.
|
|
|
|
|
|
|
|
See: https://unstats.un.org/unsd/methodology/m49/
|
|
|
|
|
|
|
|
Return fixed string.
|
|
|
|
"""
|
|
|
|
# Initialize some variables at global scope so that we can set them in the
|
|
|
|
# loop scope below and still be able to access them afterwards.
|
|
|
|
country_column_name = ""
|
|
|
|
region_column_name = ""
|
|
|
|
title_column_name = ""
|
|
|
|
|
2022-09-01 14:40:45 +02:00
|
|
|
# Instantiate a CountryConverter() object here. According to the docs it is
|
|
|
|
# more performant to do that as opposed to calling coco.convert() directly
|
|
|
|
# because we don't need to re-load the country data with each iteration.
|
|
|
|
cc = coco.CountryConverter()
|
|
|
|
|
2022-09-01 14:41:21 +02:00
|
|
|
# Set logging to ERROR so country_converter's convert() doesn't print the
|
|
|
|
# "not found in regex" warning message to the screen.
|
|
|
|
logging.basicConfig(level=logging.ERROR)
|
|
|
|
|
2022-07-28 15:52:43 +02:00
|
|
|
# Iterate over the labels of the current row's values to get the names of
|
|
|
|
# the title and citation columns. Then we check if the title is present in
|
|
|
|
# the citation.
|
|
|
|
for label in row.axes[0]:
|
|
|
|
# Find the name of the country column
|
|
|
|
match = re.match(r"^.*?country.*$", label)
|
|
|
|
if match is not None:
|
|
|
|
country_column_name = label
|
|
|
|
|
2022-12-07 23:18:47 +01:00
|
|
|
# Find the name of the region column, but make sure it's not subregion!
|
2022-07-28 15:52:43 +02:00
|
|
|
match = re.match(r"^.*?region.*$", label)
|
2022-12-07 23:18:47 +01:00
|
|
|
if match is not None and "sub" not in label:
|
2022-07-28 15:52:43 +02:00
|
|
|
region_column_name = label
|
|
|
|
|
|
|
|
# Find the name of the title column
|
|
|
|
match = re.match(r"^(dc|dcterms)\.title.*$", label)
|
|
|
|
if match is not None:
|
|
|
|
title_column_name = label
|
|
|
|
|
2022-09-02 14:59:22 +02:00
|
|
|
# Make sure the user has not asked to exclude any metadata fields. If so, we
|
|
|
|
# should return immediately.
|
|
|
|
column_names = [country_column_name, region_column_name, title_column_name]
|
|
|
|
if any(field in column_names for field in exclude):
|
|
|
|
return row
|
|
|
|
|
2022-07-28 15:52:43 +02:00
|
|
|
# Make sure we found the country and region columns
|
|
|
|
if country_column_name != "" and region_column_name != "":
|
|
|
|
# If we don't have any countries then we should return early before
|
|
|
|
# suggesting regions.
|
|
|
|
if row[country_column_name] is not None:
|
|
|
|
countries = row[country_column_name].split("||")
|
|
|
|
else:
|
2022-08-02 09:17:30 +02:00
|
|
|
return row
|
2022-07-28 15:52:43 +02:00
|
|
|
|
|
|
|
if row[region_column_name] is not None:
|
|
|
|
regions = row[region_column_name].split("||")
|
|
|
|
else:
|
2023-11-22 19:54:50 +01:00
|
|
|
regions = []
|
2022-07-28 15:52:43 +02:00
|
|
|
|
|
|
|
# An empty list for our regions so we can keep track for all countries
|
2023-11-22 19:54:50 +01:00
|
|
|
missing_regions = []
|
2022-07-28 15:52:43 +02:00
|
|
|
|
|
|
|
for country in countries:
|
|
|
|
# Look up the UN M.49 regions for this country code. CoCo seems to
|
|
|
|
# only list the direct region, ie Western Africa, rather than all
|
|
|
|
# the parent regions ("Sub-Saharan Africa", "Africa", "World")
|
2022-09-01 14:40:45 +02:00
|
|
|
un_region = cc.convert(names=country, to="UNRegion")
|
2022-07-28 15:52:43 +02:00
|
|
|
|
2022-09-01 14:46:21 +02:00
|
|
|
# Add the new un_region to regions if it is not "not found" and if
|
|
|
|
# it doesn't already exist in regions.
|
|
|
|
if un_region != "not found" and un_region not in regions:
|
2022-07-28 15:52:43 +02:00
|
|
|
if un_region not in missing_regions:
|
2023-06-12 09:33:50 +02:00
|
|
|
try:
|
|
|
|
print(
|
|
|
|
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}{row[title_column_name]}"
|
|
|
|
)
|
|
|
|
except KeyError:
|
|
|
|
# If there is no title column in the CSV we will print
|
|
|
|
# the fix without the title instead of crashing.
|
|
|
|
print(
|
|
|
|
f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}<title field not present>"
|
|
|
|
)
|
|
|
|
|
2022-07-28 15:52:43 +02:00
|
|
|
missing_regions.append(un_region)
|
|
|
|
|
2022-09-01 15:15:32 +02:00
|
|
|
if len(missing_regions) > 0:
|
|
|
|
# Add the missing regions back to the row, paying attention to whether
|
|
|
|
# or not the row's region column is None (aka null) or just an empty
|
|
|
|
# string (length would be 0).
|
|
|
|
if row[region_column_name] is not None and len(row[region_column_name]) > 0:
|
|
|
|
row[region_column_name] = (
|
|
|
|
row[region_column_name] + "||" + "||".join(missing_regions)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
row[region_column_name] = "||".join(missing_regions)
|
2022-07-28 15:52:43 +02:00
|
|
|
|
|
|
|
return row
|
2024-04-25 11:49:19 +02:00
|
|
|
|
|
|
|
|
|
|
|
def normalize_dois(field):
|
|
|
|
"""Normalize DOIs.
|
|
|
|
|
|
|
|
DOIs are meant to be globally unique identifiers. They are case insensitive,
|
|
|
|
but in order to compare them robustly they should be normalized to a common
|
|
|
|
format:
|
|
|
|
|
|
|
|
- strip leading and trailing whitespace
|
|
|
|
- lowercase all ASCII characters
|
|
|
|
- convert all variations to https://doi.org/10.xxxx/xxxx URI format
|
|
|
|
|
|
|
|
Return string with normalized DOI.
|
|
|
|
|
|
|
|
See: https://www.crossref.org/documentation/member-setup/constructing-your-dois/
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Try to split multi-value field on "||" separator
|
|
|
|
values = field.split("||")
|
|
|
|
|
|
|
|
# Initialize an empty list to hold the de-duplicated values
|
|
|
|
new_values = []
|
|
|
|
|
|
|
|
# Iterate over all values (most items will only have one DOI)
|
|
|
|
for value in values:
|
|
|
|
# Strip leading and trailing whitespace
|
|
|
|
new_value = value.strip()
|
|
|
|
|
|
|
|
new_value = new_value.lower()
|
|
|
|
|
|
|
|
# Convert to HTTPS
|
|
|
|
pattern = re.compile(r"^http://")
|
|
|
|
match = re.findall(pattern, new_value)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
new_value = re.sub(pattern, "https://", new_value)
|
|
|
|
|
|
|
|
# Convert dx.doi.org to doi.org
|
|
|
|
pattern = re.compile(r"dx\.doi\.org")
|
|
|
|
match = re.findall(pattern, new_value)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
new_value = re.sub(pattern, "doi.org", new_value)
|
|
|
|
|
|
|
|
# Replace values like doi: 10.11648/j.jps.20140201.14
|
|
|
|
pattern = re.compile(r"^doi: 10\.")
|
|
|
|
match = re.findall(pattern, new_value)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
new_value = re.sub(pattern, "https://doi.org/10.", new_value)
|
|
|
|
|
|
|
|
# Replace values like 10.3390/foods12010115
|
|
|
|
pattern = re.compile(r"^10\.")
|
|
|
|
match = re.findall(pattern, new_value)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
new_value = re.sub(pattern, "https://doi.org/10.", new_value)
|
|
|
|
|
|
|
|
if new_value != value:
|
|
|
|
print(f"{Fore.GREEN}Normalized DOI: {Fore.RESET}{value}")
|
|
|
|
|
|
|
|
new_values.append(new_value)
|
|
|
|
|
|
|
|
new_field = "||".join(new_values)
|
|
|
|
|
|
|
|
return new_field
|