2018-09-23 15:47:48 +02:00
|
|
|
#
|
2018-09-24 08:18:50 +02:00
|
|
|
# indexer.py
|
|
|
|
#
|
|
|
|
# Copyright 2018 Alan Orth.
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
# ---
|
|
|
|
#
|
|
|
|
# Connects to a DSpace Solr statistics core and ingests item views and downloads
|
2018-09-25 11:23:31 +02:00
|
|
|
# into a PostgreSQL database for use by other applications (like an API).
|
2018-09-24 08:18:50 +02:00
|
|
|
#
|
2018-09-25 11:23:31 +02:00
|
|
|
# This script is written for Python 3.5+ and requires several modules that you
|
|
|
|
# can install with pip (I recommend using a Python virtual environment):
|
2018-09-24 08:18:50 +02:00
|
|
|
#
|
2020-09-24 10:30:31 +02:00
|
|
|
# $ pip install psycopg2-binary
|
2018-09-24 08:18:50 +02:00
|
|
|
#
|
|
|
|
# See: https://wiki.duraspace.org/display/DSPACE/Solr
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2019-01-22 07:39:36 +01:00
|
|
|
import re
|
2019-11-27 11:31:04 +01:00
|
|
|
|
|
|
|
import psycopg2.extras
|
2019-01-22 07:39:36 +01:00
|
|
|
import requests
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2019-11-27 11:31:04 +01:00
|
|
|
from .config import SOLR_SERVER
|
|
|
|
from .database import DatabaseManager
|
|
|
|
|
2018-11-03 22:55:23 +01:00
|
|
|
|
2019-01-22 07:39:36 +01:00
|
|
|
# Enumerate the cores in Solr to determine if statistics have been sharded into
|
|
|
|
# yearly shards by DSpace's stats-util or not (for example: statistics-2018).
|
|
|
|
def get_statistics_shards():
|
|
|
|
# Initialize an empty list for statistics core years
|
|
|
|
statistics_core_years = []
|
|
|
|
|
|
|
|
# URL for Solr status to check active cores
|
2019-11-27 11:30:06 +01:00
|
|
|
solr_query_params = {"action": "STATUS", "wt": "json"}
|
|
|
|
solr_url = SOLR_SERVER + "/admin/cores"
|
2019-04-15 09:19:50 +02:00
|
|
|
res = requests.get(solr_url, params=solr_query_params)
|
2019-01-22 07:39:36 +01:00
|
|
|
|
|
|
|
if res.status_code == requests.codes.ok:
|
|
|
|
data = res.json()
|
|
|
|
|
|
|
|
# Iterate over active cores from Solr's STATUS response (cores are in
|
|
|
|
# the status array of this response).
|
2019-11-27 11:30:06 +01:00
|
|
|
for core in data["status"]:
|
2019-01-22 07:39:36 +01:00
|
|
|
# Pattern to match, for example: statistics-2018
|
2019-11-27 11:30:06 +01:00
|
|
|
pattern = re.compile("^statistics-[0-9]{4}$")
|
2019-01-22 07:39:36 +01:00
|
|
|
|
|
|
|
if not pattern.match(core):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Append current core to list
|
|
|
|
statistics_core_years.append(core)
|
|
|
|
|
|
|
|
# Initialize a string to hold our shards (may end up being empty if the Solr
|
|
|
|
# core has not been processed by stats-util).
|
|
|
|
shards = str()
|
|
|
|
|
|
|
|
if len(statistics_core_years) > 0:
|
|
|
|
# Begin building a string of shards starting with the default one
|
2020-03-02 10:24:29 +01:00
|
|
|
shards = f"{SOLR_SERVER}/statistics"
|
2019-01-22 07:39:36 +01:00
|
|
|
|
|
|
|
for core in statistics_core_years:
|
|
|
|
# Create a comma-separated list of shards to pass to our Solr query
|
|
|
|
#
|
|
|
|
# See: https://wiki.apache.org/solr/DistributedSearch
|
2020-03-02 10:24:29 +01:00
|
|
|
shards += f",{SOLR_SERVER}/{core}"
|
2019-01-22 07:39:36 +01:00
|
|
|
|
|
|
|
# Return the string of shards, which may actually be empty. Solr doesn't
|
|
|
|
# seem to mind if the shards query parameter is empty and I haven't seen
|
|
|
|
# any negative performance impact so this should be fine.
|
|
|
|
return shards
|
|
|
|
|
|
|
|
|
2018-09-23 15:47:48 +02:00
|
|
|
def index_views():
|
2018-09-26 01:41:10 +02:00
|
|
|
# get total number of distinct facets for items with a minimum of 1 view,
|
|
|
|
# otherwise Solr returns all kinds of weird ids that are actually not in
|
|
|
|
# the database. Also, stats are expensive, but we need stats.calcdistinct
|
|
|
|
# so we can get the countDistinct summary.
|
|
|
|
#
|
|
|
|
# see: https://lucene.apache.org/solr/guide/6_6/the-stats-component.html
|
2019-04-15 09:19:50 +02:00
|
|
|
solr_query_params = {
|
2019-11-27 11:30:06 +01:00
|
|
|
"q": "type:2",
|
|
|
|
"fq": "isBot:false AND statistics_type:view",
|
|
|
|
"facet": "true",
|
|
|
|
"facet.field": "id",
|
|
|
|
"facet.mincount": 1,
|
|
|
|
"facet.limit": 1,
|
|
|
|
"facet.offset": 0,
|
|
|
|
"stats": "true",
|
|
|
|
"stats.field": "id",
|
|
|
|
"stats.calcdistinct": "true",
|
|
|
|
"shards": shards,
|
|
|
|
"rows": 0,
|
|
|
|
"wt": "json",
|
2019-04-15 09:19:50 +02:00
|
|
|
}
|
|
|
|
|
2019-11-27 11:30:06 +01:00
|
|
|
solr_url = SOLR_SERVER + "/statistics/select"
|
2019-04-15 09:19:50 +02:00
|
|
|
|
|
|
|
res = requests.get(solr_url, params=solr_query_params)
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2019-01-22 08:00:22 +01:00
|
|
|
try:
|
|
|
|
# get total number of distinct facets (countDistinct)
|
2019-11-27 11:30:06 +01:00
|
|
|
results_totalNumFacets = res.json()["stats"]["stats_fields"]["id"][
|
|
|
|
"countDistinct"
|
|
|
|
]
|
2019-01-22 08:00:22 +01:00
|
|
|
except TypeError:
|
2019-11-27 11:30:06 +01:00
|
|
|
print("No item views to index, exiting.")
|
2019-01-22 08:00:22 +01:00
|
|
|
|
|
|
|
exit(0)
|
2018-09-26 01:41:10 +02:00
|
|
|
|
|
|
|
# divide results into "pages" (cast to int to effectively round down)
|
2018-09-23 15:47:48 +02:00
|
|
|
results_per_page = 100
|
2018-09-26 01:41:10 +02:00
|
|
|
results_num_pages = int(results_totalNumFacets / results_per_page)
|
2018-09-23 15:47:48 +02:00
|
|
|
results_current_page = 0
|
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
with DatabaseManager() as db:
|
|
|
|
with db.cursor() as cursor:
|
|
|
|
# create an empty list to store values for batch insertion
|
|
|
|
data = []
|
2018-09-24 23:49:47 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
while results_current_page <= results_num_pages:
|
2019-04-15 09:25:54 +02:00
|
|
|
# "pages" are zero based, but one based is more human readable
|
2019-11-27 11:30:06 +01:00
|
|
|
print(
|
2020-03-02 10:24:29 +01:00
|
|
|
f"Indexing item views (page {results_current_page + 1} of {results_num_pages + 1})"
|
2019-11-27 11:30:06 +01:00
|
|
|
)
|
2018-09-26 22:10:29 +02:00
|
|
|
|
2019-04-15 09:19:50 +02:00
|
|
|
solr_query_params = {
|
2019-11-27 11:30:06 +01:00
|
|
|
"q": "type:2",
|
|
|
|
"fq": "isBot:false AND statistics_type:view",
|
|
|
|
"facet": "true",
|
|
|
|
"facet.field": "id",
|
|
|
|
"facet.mincount": 1,
|
|
|
|
"facet.limit": results_per_page,
|
|
|
|
"facet.offset": results_current_page * results_per_page,
|
|
|
|
"shards": shards,
|
|
|
|
"rows": 0,
|
|
|
|
"wt": "json",
|
|
|
|
"json.nl": "map", # return facets as a dict instead of a flat list
|
2019-04-15 09:19:50 +02:00
|
|
|
}
|
|
|
|
|
2019-11-27 11:30:06 +01:00
|
|
|
solr_url = SOLR_SERVER + "/statistics/select"
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2019-04-15 09:19:50 +02:00
|
|
|
res = requests.get(solr_url, params=solr_query_params)
|
|
|
|
|
|
|
|
# Solr returns facets as a dict of dicts (see json.nl parameter)
|
2019-11-27 11:30:06 +01:00
|
|
|
views = res.json()["facet_counts"]["facet_fields"]
|
2019-04-15 09:19:50 +02:00
|
|
|
# iterate over the 'id' dict and get the item ids and views
|
2019-11-27 11:30:06 +01:00
|
|
|
for item_id, item_views in views["id"].items():
|
2018-11-07 16:41:21 +01:00
|
|
|
data.append((item_id, item_views))
|
2018-09-26 01:41:10 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
# do a batch insert of values from the current "page" of results
|
2019-11-27 11:30:06 +01:00
|
|
|
sql = "INSERT INTO items(id, views) VALUES %s ON CONFLICT(id) DO UPDATE SET views=excluded.views"
|
|
|
|
psycopg2.extras.execute_values(cursor, sql, data, template="(%s, %s)")
|
2018-11-07 16:41:21 +01:00
|
|
|
db.commit()
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
# clear all items from the list so we can populate it with the next batch
|
|
|
|
data.clear()
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
results_current_page += 1
|
2018-09-24 23:49:47 +02:00
|
|
|
|
2018-11-03 22:55:23 +01:00
|
|
|
|
2018-09-23 15:47:48 +02:00
|
|
|
def index_downloads():
|
2018-09-26 01:41:10 +02:00
|
|
|
# get the total number of distinct facets for items with at least 1 download
|
2019-11-27 11:12:05 +01:00
|
|
|
solr_query_params = {
|
2019-11-27 11:30:06 +01:00
|
|
|
"q": "type:0",
|
|
|
|
"fq": "isBot:false AND statistics_type:view AND bundleName:ORIGINAL",
|
|
|
|
"facet": "true",
|
|
|
|
"facet.field": "owningItem",
|
|
|
|
"facet.mincount": 1,
|
|
|
|
"facet.limit": 1,
|
|
|
|
"facet.offset": 0,
|
|
|
|
"stats": "true",
|
|
|
|
"stats.field": "owningItem",
|
|
|
|
"stats.calcdistinct": "true",
|
|
|
|
"shards": shards,
|
|
|
|
"rows": 0,
|
|
|
|
"wt": "json",
|
2019-04-15 09:19:50 +02:00
|
|
|
}
|
|
|
|
|
2019-11-27 11:30:06 +01:00
|
|
|
solr_url = SOLR_SERVER + "/statistics/select"
|
2019-04-15 09:19:50 +02:00
|
|
|
|
|
|
|
res = requests.get(solr_url, params=solr_query_params)
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2019-01-22 08:00:22 +01:00
|
|
|
try:
|
|
|
|
# get total number of distinct facets (countDistinct)
|
2019-11-27 11:30:06 +01:00
|
|
|
results_totalNumFacets = res.json()["stats"]["stats_fields"]["owningItem"][
|
|
|
|
"countDistinct"
|
|
|
|
]
|
2019-01-22 08:00:22 +01:00
|
|
|
except TypeError:
|
2019-11-27 11:30:06 +01:00
|
|
|
print("No item downloads to index, exiting.")
|
2019-01-22 08:00:22 +01:00
|
|
|
|
|
|
|
exit(0)
|
2018-09-26 01:41:10 +02:00
|
|
|
|
|
|
|
# divide results into "pages" (cast to int to effectively round down)
|
2018-09-23 15:47:48 +02:00
|
|
|
results_per_page = 100
|
2018-09-26 01:41:10 +02:00
|
|
|
results_num_pages = int(results_totalNumFacets / results_per_page)
|
2018-09-23 15:47:48 +02:00
|
|
|
results_current_page = 0
|
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
with DatabaseManager() as db:
|
|
|
|
with db.cursor() as cursor:
|
|
|
|
# create an empty list to store values for batch insertion
|
|
|
|
data = []
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
while results_current_page <= results_num_pages:
|
2019-04-15 09:25:54 +02:00
|
|
|
# "pages" are zero based, but one based is more human readable
|
2019-11-27 11:30:06 +01:00
|
|
|
print(
|
2020-03-02 10:24:29 +01:00
|
|
|
f"Indexing item downloads (page {results_current_page + 1} of {results_num_pages + 1})"
|
2019-11-27 11:30:06 +01:00
|
|
|
)
|
2018-09-26 01:41:10 +02:00
|
|
|
|
2019-04-15 09:19:50 +02:00
|
|
|
solr_query_params = {
|
2019-11-27 11:30:06 +01:00
|
|
|
"q": "type:0",
|
|
|
|
"fq": "isBot:false AND statistics_type:view AND bundleName:ORIGINAL",
|
|
|
|
"facet": "true",
|
|
|
|
"facet.field": "owningItem",
|
|
|
|
"facet.mincount": 1,
|
|
|
|
"facet.limit": results_per_page,
|
|
|
|
"facet.offset": results_current_page * results_per_page,
|
|
|
|
"shards": shards,
|
|
|
|
"rows": 0,
|
|
|
|
"wt": "json",
|
|
|
|
"json.nl": "map", # return facets as a dict instead of a flat list
|
2019-04-15 09:19:50 +02:00
|
|
|
}
|
|
|
|
|
2019-11-27 11:30:06 +01:00
|
|
|
solr_url = SOLR_SERVER + "/statistics/select"
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2019-04-15 09:19:50 +02:00
|
|
|
res = requests.get(solr_url, params=solr_query_params)
|
|
|
|
|
|
|
|
# Solr returns facets as a dict of dicts (see json.nl parameter)
|
2019-11-27 11:30:06 +01:00
|
|
|
downloads = res.json()["facet_counts"]["facet_fields"]
|
2019-04-15 09:19:50 +02:00
|
|
|
# iterate over the 'owningItem' dict and get the item ids and downloads
|
2019-11-27 11:30:06 +01:00
|
|
|
for item_id, item_downloads in downloads["owningItem"].items():
|
2018-11-07 16:41:21 +01:00
|
|
|
data.append((item_id, item_downloads))
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
# do a batch insert of values from the current "page" of results
|
2019-11-27 11:30:06 +01:00
|
|
|
sql = "INSERT INTO items(id, downloads) VALUES %s ON CONFLICT(id) DO UPDATE SET downloads=excluded.downloads"
|
|
|
|
psycopg2.extras.execute_values(cursor, sql, data, template="(%s, %s)")
|
2018-11-07 16:41:21 +01:00
|
|
|
db.commit()
|
2018-09-26 22:10:29 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
# clear all items from the list so we can populate it with the next batch
|
|
|
|
data.clear()
|
2018-09-23 15:47:48 +02:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
results_current_page += 1
|
2018-09-24 23:49:47 +02:00
|
|
|
|
2018-11-03 22:55:23 +01:00
|
|
|
|
2018-11-07 16:41:21 +01:00
|
|
|
with DatabaseManager() as db:
|
|
|
|
with db.cursor() as cursor:
|
|
|
|
# create table to store item views and downloads
|
2019-11-27 11:30:06 +01:00
|
|
|
cursor.execute(
|
|
|
|
"""CREATE TABLE IF NOT EXISTS items
|
2020-03-01 14:42:38 +01:00
|
|
|
(id UUID PRIMARY KEY, views INT DEFAULT 0, downloads INT DEFAULT 0)"""
|
2019-11-27 11:30:06 +01:00
|
|
|
)
|
2018-11-07 16:41:21 +01:00
|
|
|
|
|
|
|
# commit the table creation before closing the database connection
|
|
|
|
db.commit()
|
|
|
|
|
2019-01-22 07:39:36 +01:00
|
|
|
shards = get_statistics_shards()
|
|
|
|
|
2018-09-23 15:47:48 +02:00
|
|
|
index_views()
|
|
|
|
index_downloads()
|
|
|
|
|
|
|
|
# vim: set sw=4 ts=4 expandtab:
|