1
0
mirror of https://github.com/ilri/dspace-statistics-api.git synced 2025-05-09 06:36:01 +02:00

Refactor project structure

This follows guidance from several well-known Python best practices
guides. Basically, the idea is create a package for the application
that is comprised of several re-usable modules.

See: https://docs.python-guide.org/writing/structure/
See: https://realpython.com/python-application-layouts/
This commit is contained in:
2018-10-26 18:14:27 +02:00
parent 754663f062
commit c027f01b48
7 changed files with 10 additions and 10 deletions

View File

View File

@ -0,0 +1,73 @@
from .database import database_connection
import falcon
db = database_connection()
db.set_session(readonly=True)
class AllItemsResource:
def on_get(self, req, resp):
"""Handles GET requests"""
# Return HTTPBadRequest if id parameter is not present and valid
limit = req.get_param_as_int("limit", min=0, max=100) or 100
page = req.get_param_as_int("page", min=0) or 0
offset = limit * page
cursor = db.cursor()
# get total number of items so we can estimate the pages
cursor.execute('SELECT COUNT(id) FROM items')
pages = round(cursor.fetchone()[0] / limit)
# get statistics, ordered by id, and use limit and offset to page through results
cursor.execute('SELECT id, views, downloads FROM items ORDER BY id ASC LIMIT {} OFFSET {}'.format(limit, offset))
# create a list to hold dicts of item stats
statistics = list()
# iterate over results and build statistics object
for item in cursor:
statistics.append({ 'id': item['id'], 'views': item['views'], 'downloads': item['downloads'] })
cursor.close()
message = {
'currentPage': page,
'totalPages': pages,
'limit': limit,
'statistics': statistics
}
resp.media = message
class ItemResource:
def on_get(self, req, resp, item_id):
"""Handles GET requests"""
cursor = db.cursor()
cursor.execute('SELECT views, downloads FROM items WHERE id={}'.format(item_id))
if cursor.rowcount == 0:
raise falcon.HTTPNotFound(
title='Item not found',
description='The item with id "{}" was not found.'.format(item_id)
)
else:
results = cursor.fetchone()
statistics = {
'id': item_id,
'views': results['views'],
'downloads': results['downloads']
}
resp.media = statistics
cursor.close()
def on_exit(api):
print("Shutting down DB")
api = falcon.API()
api.add_route('/items', AllItemsResource())
api.add_route('/item/{item_id:int}', ItemResource())
# vim: set sw=4 ts=4 expandtab:

View File

@ -0,0 +1,11 @@
import os
# Check if Solr connection information was provided in the environment
SOLR_SERVER = os.environ.get('SOLR_SERVER', 'http://localhost:8080/solr')
DATABASE_NAME = os.environ.get('DATABASE_NAME', 'dspacestatistics')
DATABASE_USER = os.environ.get('DATABASE_USER', 'dspacestatistics')
DATABASE_PASS = os.environ.get('DATABASE_PASS', 'dspacestatistics')
DATABASE_HOST = os.environ.get('DATABASE_HOST', 'localhost')
# vim: set sw=4 ts=4 expandtab:

View File

@ -0,0 +1,12 @@
from .config import DATABASE_NAME
from .config import DATABASE_USER
from .config import DATABASE_PASS
from .config import DATABASE_HOST
import psycopg2, psycopg2.extras
def database_connection():
connection = psycopg2.connect("dbname={} user={} password={} host='{}'".format(DATABASE_NAME, DATABASE_USER, DATABASE_PASS, DATABASE_HOST), cursor_factory=psycopg2.extras.DictCursor)
return connection
# vim: set sw=4 ts=4 expandtab:

173
dspace_statistics_api/indexer.py Executable file
View File

@ -0,0 +1,173 @@
#!/usr/bin/env python
#
# indexer.py
#
# Copyright 2018 Alan Orth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---
#
# Connects to a DSpace Solr statistics core and ingests item views and downloads
# into a PostgreSQL database for use by other applications (like an API).
#
# This script is written for Python 3.5+ and requires several modules that you
# can install with pip (I recommend using a Python virtual environment):
#
# $ pip install SolrClient psycopg2-binary
#
# See: https://solrclient.readthedocs.io/en/latest/SolrClient.html
# See: https://wiki.duraspace.org/display/DSPACE/Solr
from .database import database_connection
import json
import psycopg2.extras
from .solr import solr_connection
def index_views():
# get total number of distinct facets for items with a minimum of 1 view,
# otherwise Solr returns all kinds of weird ids that are actually not in
# the database. Also, stats are expensive, but we need stats.calcdistinct
# so we can get the countDistinct summary.
#
# see: https://lucene.apache.org/solr/guide/6_6/the-stats-component.html
res = solr.query('statistics', {
'q':'type:2',
'fq':'isBot:false AND statistics_type:view',
'facet':True,
'facet.field':'id',
'facet.mincount':1,
'facet.limit':1,
'facet.offset':0,
'stats':True,
'stats.field':'id',
'stats.calcdistinct':True
}, rows=0)
# get total number of distinct facets (countDistinct)
results_totalNumFacets = json.loads(res.get_json())['stats']['stats_fields']['id']['countDistinct']
# divide results into "pages" (cast to int to effectively round down)
results_per_page = 100
results_num_pages = int(results_totalNumFacets / results_per_page)
results_current_page = 0
cursor = db.cursor()
# create an empty list to store values for batch insertion
data = []
while results_current_page <= results_num_pages:
print('Indexing item views (page {} of {})'.format(results_current_page, results_num_pages))
res = solr.query('statistics', {
'q':'type:2',
'fq':'isBot:false AND statistics_type:view',
'facet':True,
'facet.field':'id',
'facet.mincount':1,
'facet.limit':results_per_page,
'facet.offset':results_current_page * results_per_page
}, rows=0)
# SolrClient's get_facets() returns a dict of dicts
views = res.get_facets()
# in this case iterate over the 'id' dict and get the item ids and views
for item_id, item_views in views['id'].items():
data.append((item_id, item_views))
# do a batch insert of values from the current "page" of results
sql = 'INSERT INTO items(id, views) VALUES %s ON CONFLICT(id) DO UPDATE SET views=excluded.views'
psycopg2.extras.execute_values(cursor, sql, data, template='(%s, %s)')
db.commit()
# clear all items from the list so we can populate it with the next batch
data.clear()
results_current_page += 1
cursor.close()
def index_downloads():
# get the total number of distinct facets for items with at least 1 download
res = solr.query('statistics', {
'q':'type:0',
'fq':'isBot:false AND statistics_type:view AND bundleName:ORIGINAL',
'facet':True,
'facet.field':'owningItem',
'facet.mincount':1,
'facet.limit':1,
'facet.offset':0,
'stats':True,
'stats.field':'owningItem',
'stats.calcdistinct':True
}, rows=0)
# get total number of distinct facets (countDistinct)
results_totalNumFacets = json.loads(res.get_json())['stats']['stats_fields']['owningItem']['countDistinct']
# divide results into "pages" (cast to int to effectively round down)
results_per_page = 100
results_num_pages = int(results_totalNumFacets / results_per_page)
results_current_page = 0
cursor = db.cursor()
# create an empty list to store values for batch insertion
data = []
while results_current_page <= results_num_pages:
print('Indexing item downloads (page {} of {})'.format(results_current_page, results_num_pages))
res = solr.query('statistics', {
'q':'type:0',
'fq':'isBot:false AND statistics_type:view AND bundleName:ORIGINAL',
'facet':True,
'facet.field':'owningItem',
'facet.mincount':1,
'facet.limit':results_per_page,
'facet.offset':results_current_page * results_per_page
}, rows=0)
# SolrClient's get_facets() returns a dict of dicts
downloads = res.get_facets()
# in this case iterate over the 'owningItem' dict and get the item ids and downloads
for item_id, item_downloads in downloads['owningItem'].items():
data.append((item_id, item_downloads))
# do a batch insert of values from the current "page" of results
sql = 'INSERT INTO items(id, downloads) VALUES %s ON CONFLICT(id) DO UPDATE SET downloads=excluded.downloads'
psycopg2.extras.execute_values(cursor, sql, data, template='(%s, %s)')
db.commit()
# clear all items from the list so we can populate it with the next batch
data.clear()
results_current_page += 1
cursor.close()
db = database_connection()
solr = solr_connection()
# create table to store item views and downloads
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS items
(id INT PRIMARY KEY, views INT DEFAULT 0, downloads INT DEFAULT 0)''')
index_views()
index_downloads()
db.close()
# vim: set sw=4 ts=4 expandtab:

View File

@ -0,0 +1,9 @@
from .config import SOLR_SERVER
from SolrClient import SolrClient
def solr_connection():
connection = SolrClient(SOLR_SERVER)
return connection
# vim: set sw=4 ts=4 expandtab: