2019-07-26 18:08:28 +02:00
|
|
|
import pandas as pd
|
2019-07-26 21:11:10 +02:00
|
|
|
import re
|
|
|
|
|
2019-07-28 16:47:28 +02:00
|
|
|
|
2019-07-26 21:11:10 +02:00
|
|
|
def whitespace(field):
|
2019-07-26 18:08:28 +02:00
|
|
|
"""Fix whitespace issues.
|
|
|
|
|
|
|
|
Return string with leading, trailing, and consecutive whitespace trimmed.
|
|
|
|
"""
|
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
2019-07-26 18:08:28 +02:00
|
|
|
return
|
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Initialize an empty list to hold the cleaned values
|
|
|
|
values = list()
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Try to split multi-value field on "||" separator
|
|
|
|
for value in field.split('||'):
|
|
|
|
# Strip leading and trailing whitespace
|
|
|
|
value = value.strip()
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Replace excessive whitespace (>2) with one space
|
|
|
|
pattern = re.compile(r'\s{2,}')
|
|
|
|
match = re.findall(pattern, value)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-29 15:16:30 +02:00
|
|
|
if match:
|
2019-07-29 16:14:48 +02:00
|
|
|
print(f'Excessive whitespace: {value}')
|
2019-07-26 18:31:55 +02:00
|
|
|
value = re.sub(pattern, ' ', value)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Save cleaned value
|
|
|
|
values.append(value)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
# Create a new field consisting of all values joined with "||"
|
|
|
|
new_field = '||'.join(values)
|
2019-07-26 18:08:28 +02:00
|
|
|
|
2019-07-26 18:31:55 +02:00
|
|
|
return new_field
|
2019-07-28 21:53:39 +02:00
|
|
|
|
|
|
|
|
|
|
|
def separators(field):
|
|
|
|
"""Fix for invalid multi-value separators (ie "|")."""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Initialize an empty list to hold the cleaned values
|
|
|
|
values = list()
|
|
|
|
|
|
|
|
# Try to split multi-value field on "||" separator
|
|
|
|
for value in field.split('||'):
|
|
|
|
# After splitting, see if there are any remaining "|" characters
|
|
|
|
pattern = re.compile(r'\|')
|
|
|
|
match = re.findall(pattern, value)
|
|
|
|
|
2019-07-29 15:16:30 +02:00
|
|
|
if match:
|
2019-07-28 21:53:39 +02:00
|
|
|
print(f'Fixing invalid multi-value separator: {value}')
|
|
|
|
|
|
|
|
value = re.sub(pattern, '||', value)
|
|
|
|
|
|
|
|
# Save cleaned value
|
|
|
|
values.append(value)
|
|
|
|
|
|
|
|
# Create a new field consisting of all values joined with "||"
|
|
|
|
new_field = '||'.join(values)
|
|
|
|
|
|
|
|
return new_field
|
2019-07-29 15:38:10 +02:00
|
|
|
|
|
|
|
|
|
|
|
def unnecessary_unicode(field):
|
|
|
|
"""Remove unnecessary Unicode characters.
|
|
|
|
|
|
|
|
Removes unnecessary Unicode characters like:
|
|
|
|
- Zero-width space (U+200B)
|
|
|
|
- Replacement character (U+FFFD)
|
|
|
|
- No-break space (U+00A0)
|
|
|
|
|
|
|
|
Return string with characters removed.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check for zero-width space characters (U+200B)
|
|
|
|
pattern = re.compile(r'\u200B')
|
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
print(f'Removing unnecessary Unicode (U+200B): {field}')
|
|
|
|
field = re.sub(pattern, '', field)
|
|
|
|
|
|
|
|
# Check for replacement characters (U+FFFD)
|
|
|
|
pattern = re.compile(r'\uFFFD')
|
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
print(f'Removing unnecessary Unicode (U+FFFD): {field}')
|
|
|
|
field = re.sub(pattern, '', field)
|
|
|
|
|
|
|
|
# Check for no-break spaces (U+00A0)
|
|
|
|
pattern = re.compile(r'\u00A0')
|
|
|
|
match = re.findall(pattern, field)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
print(f'Removing unnecessary Unicode (U+00A0): {field}')
|
|
|
|
field = re.sub(pattern, '', field)
|
|
|
|
|
|
|
|
return field
|
2019-07-29 17:05:03 +02:00
|
|
|
|
|
|
|
|
|
|
|
def duplicates(field):
|
|
|
|
"""Remove duplicate metadata values."""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Try to split multi-value field on "||" separator
|
|
|
|
values = field.split('||')
|
|
|
|
|
|
|
|
# Initialize an empty list to hold the de-duplicated values
|
|
|
|
new_values = list()
|
|
|
|
|
|
|
|
# Iterate over all values
|
|
|
|
for value in values:
|
|
|
|
# Check if each value exists in our list of values already
|
|
|
|
if value not in new_values:
|
|
|
|
new_values.append(value)
|
|
|
|
else:
|
|
|
|
print(f'Dropping duplicate value: {value}')
|
|
|
|
|
|
|
|
# Create a new field consisting of all values joined with "||"
|
|
|
|
new_field = '||'.join(new_values)
|
|
|
|
|
|
|
|
return new_field
|
2019-07-30 19:05:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
def newlines(field):
|
|
|
|
"""Fix newlines.
|
|
|
|
|
|
|
|
Single metadata values should not span multiple lines because this is not
|
|
|
|
rendered properly in DSpace's XMLUI and even causes issues during import.
|
|
|
|
|
|
|
|
Implementation note: this currently only detects Unix line feeds (0x0a).
|
|
|
|
This is essentially when a user presses "Enter" to move to the next line.
|
|
|
|
Other newlines like the Windows carriage return are already handled with
|
|
|
|
the string stipping performed in the whitespace fixes.
|
|
|
|
|
|
|
|
Confusingly, in Vim '\n' matches a line feed when searching, but you must
|
|
|
|
use '\r' to *insert* a line feed, ie in a search and replace expression.
|
|
|
|
|
|
|
|
Return string with newlines removed.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Skip fields with missing values
|
|
|
|
if pd.isna(field):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check for Unix line feed (LF)
|
|
|
|
match = re.findall(r'\n', field)
|
|
|
|
|
|
|
|
if match:
|
|
|
|
print(f'Removing newline: {field}')
|
|
|
|
field = field.replace('\n', '')
|
|
|
|
|
|
|
|
return field
|