Skip to content
Snippets Groups Projects
Commit 82c85a74 authored by anonymous's avatar anonymous
Browse files

adds a label with text NEW to new datasets

parent d1840222
Branches
Tags
No related merge requests found
......@@ -266,3 +266,30 @@ def odsh_is_slave():
if c is None or (c != 'True' and c != 'False'):
return -1
return 1 if c == 'True' else 0
def is_within_last_month(date, date_ref=None):
'''
date is a datetime.date object containing the date to be checked
date_ref is a datetime.date object containing the reference date
if date_ref is not specified, the date of today is used
this method is needed by the method OdshPlugin.before_view in plugin.py
'''
if not date_ref:
date_ref = datetime.date.today()
[year_ref, month_ref, day_ref] = [date_ref.year, date_ref.month, date_ref.day]
try:
if month_ref > 1:
one_month_ago = datetime.date(year_ref, month_ref-1, day_ref)
else:
one_month_ago = datetime.date(year_ref-1, 12, day_ref)
except ValueError:
# this happens if month before month_ref has less days than month_ref
one_month_ago = datetime.date(year_ref, month_ref, 1) - datetime.timedelta(days=1)
if date > one_month_ago:
return True
return False
No preview for this file type
......@@ -420,3 +420,6 @@ msgstr "Neuen Datensatz vorschlagen"
msgid "Suggest New Data Request"
msgstr "Datensatz vorschlagen"
msgid "NEW"
msgstr "NEU"
\ No newline at end of file
......@@ -526,3 +526,32 @@ class OdshPlugin(plugins.SingletonPlugin, DefaultTranslation, DefaultDatasetForm
return dict_pkg
# IPackageController
def before_view(self, pkg_dict):
'''
add a key 'is_new' to pkg_dict
the value for this key is True if the dataset has been modified within the last month
the value is used in the snippet package_item.html
'''
is_new = self._is_package_new(pkg_dict)
pkg_dict.update({'is_new':is_new})
return pkg_dict
def _is_package_new(self, pkg_dict):
date_last_modified = self._get_date_from_string(pkg_dict['metadata_modified'])
is_new = odsh_helpers.date_checker.is_within_last_month(date_last_modified)
return is_new
def _get_date_from_string(self, date_time_str):
# todo: update this function if used in different context
date_time_format = '%Y-%m-%dT%H:%M:%S.%f' #e.g. u'2019-06-12T11:56:25.059563'
try:
date_time = datetime.datetime.strptime(date_time_str, date_time_format)
except ValueError:
# if date cannot be converted from string fall back to 1.1.2000
date = datetime.date(2000, 1, 1)
date = date_time.date()
return date
......@@ -1980,3 +1980,11 @@ p.package-info-categorie
{
padding-left: 4px;
}
.new-dataset-label
{
background-color: #d4004b!important;;
padding: 3px 3px 1px 3px;
font-size: 14px;
margin-right: 4px;
}
\ No newline at end of file
......@@ -43,6 +43,9 @@ Example:
<p>{{org}}</p>
<h3 class="dataset-heading">
{% block heading_private %}
{% if package.is_new %}
<span class='label new-dataset-label'>{{ _('NEW') }}</span>
{% endif %}
{% if package.private %}
<span class="dataset-private label label-inverse">
<i class="fa fa-lock"></i>
......
# This Python file uses the following encoding: utf-8
import logging
import csv
import re
import urllib2
import json
from itertools import count
from dateutil.parser import parse
import ckan.plugins.toolkit as toolkit
import ckan.model as model
from ckan.lib.navl.dictization_functions import Missing
from pylons import config
import pdb
_ = toolkit._
log = logging.getLogger(__name__)
def _extract_value(data, field):
key = None
for k in data.keys():
if data[k] == field:
key = k
break
if key is None:
return None
return data[(key[0], key[1], 'value')]
def validate_extra_groups(data, requireAtLeastOne, errors):
value = _extract_value(data, 'groups')
if value != None:
# 'value != None' means the extra key 'groups' was found,
# so the dataset came from manual editing via the web-frontend.
if not value:
if requireAtLeastOne:
errors['groups'] = 'at least one group needed'
data[('groups', 0, 'id')] = ''
return
groups = [g.strip() for g in value.split(',') if value.strip()]
for k in data.keys():
if len(k) == 3 and k[0] == 'groups':
data[k] = ''
# del data[k]
if len(groups) == 0:
if requireAtLeastOne:
errors['groups'] = 'at least one group needed'
return
for num, group in zip(range(len(groups)), groups):
data[('groups', num, 'id')] = group
else: # no extra-field 'groups'
# dataset might come from a harvest process
if not data.get(('groups', 0, 'id'), False) and \
not data.get(('groups', 0, 'name'), False):
errors['groups'] = 'at least one group needed'
def validate_extras(key, data, errors, context):
extra_errors = {}
isStaNord = ('id',) in data and data[('id',)][:7] == 'StaNord'
validate_extra_groups(data, True, extra_errors)
validate_extra_date_new(key, 'issued', data, isStaNord, extra_errors)
validate_extra_date_new(key, 'temporal_start',
data, isStaNord, extra_errors)
validate_extra_date_new(key, 'temporal_end', data, True, extra_errors)
if len(extra_errors.values()):
raise toolkit.Invalid(extra_errors)
def _set_value(data, field, value):
key = None
for k in data.keys():
if data[k] == field:
key = k
break
if key is None:
return None
data[(key[0], key[1], 'value')] = value
def validate_extra_date_new(key, field, data, optional, errors):
value = _extract_value(data, field)
if not value:
if not optional:
errors[field] = 'empty'
return
else:
if re.match(r'\d\d\d\d-\d\d-\d\d', value):
try:
dt = parse(value)
_set_value(data, field, dt.isoformat())
return
except ValueError:
pass
errors[field] = 'not a valid date'
def validate_licenseAttributionByText(key, data, errors, context):
register = model.Package.get_license_register()
isByLicense = False
for k in data:
if len(k) > 0 and k[0] == 'license_id' and data[k] and not isinstance(data[k], Missing) and \
'Namensnennung' in register[data[k]].title:
isByLicense = True
break
hasAttribution = False
for k in data:
if data[k] == 'licenseAttributionByText':
if isinstance(data[(k[0], k[1], 'value')], Missing) or (k[0], k[1], 'value') not in data:
del data[(k[0], k[1], 'value')]
del data[(k[0], k[1], 'key')]
break
else:
value = data[(k[0], k[1], 'value')]
hasAttribution = value != ''
break
if not hasAttribution:
current_indexes = [k[1] for k in data.keys()
if len(k) > 1 and k[0] == 'extras']
new_index = max(current_indexes) + 1 if current_indexes else 0
data[('extras', new_index, 'key')] = 'licenseAttributionByText'
data[('extras', new_index, 'value')] = ''
if isByLicense and not hasAttribution:
raise toolkit.Invalid(
'licenseAttributionByText: empty not allowed')
if not isByLicense and hasAttribution:
raise toolkit.Invalid(
'licenseAttributionByText: text not allowed for this license')
def known_spatial_uri(key, data, errors, context):
value = _extract_value(data, 'spatial_uri')
if not value:
poly = None
# some harvesters might import a polygon directly...
# pdb.set_trace()
poly = _extract_value(data, 'spatial')
has_old_uri = False
pkg = context.get('package', None)
if pkg:
old_uri = pkg.extras.get('spatial_uri', None)
has_old_uri = old_uri != None and len(old_uri) > 0
if not poly:
poly = pkg.extras.get('spatial', None)
if not poly or has_old_uri:
raise toolkit.Invalid('spatial_uri: empty not allowed')
else:
if poly:
new_index = next_extra_index(data)
data[('extras', new_index+1, 'key')] = 'spatial'
data[('extras', new_index+1, 'value')] = poly
return
mapping_file = config.get('ckanext.odsh.spatial.mapping')
try:
mapping_file = urllib2.urlopen(mapping_file)
except Exception:
raise Exception("Could not load spatial mapping file!")
not_found = True
spatial_text = str()
spatial = str()
cr = csv.reader(mapping_file, delimiter="\t")
for row in cr:
if row[0].encode('UTF-8') == value:
not_found = False
spatial_text = row[1]
loaded = json.loads(row[2])
spatial = json.dumps(loaded['geometry'])
break
if not_found:
raise toolkit.Invalid(
'spatial_uri: uri unknown')
new_index = next_extra_index(data)
data[('extras', new_index, 'key')] = 'spatial_text'
data[('extras', new_index, 'value')] = spatial_text
data[('extras', new_index+1, 'key')] = 'spatial'
data[('extras', new_index+1, 'value')] = spatial
def next_extra_index(data):
current_indexes = [k[1] for k in data.keys()
if len(k) > 1 and k[0] == 'extras']
return max(current_indexes) + 1 if current_indexes else 0
def tag_name_validator(value, context):
tagname_match = re.compile('[\w \-.\:\(\)\´\`]*$', re.UNICODE)
if not tagname_match.match(value):
raise toolkit.Invalid(_('Tag "%s" must be alphanumeric '
'characters or symbols: -_.:()') % (value))
return value
def tag_string_convert(key, data, errors, context):
'''Takes a list of tags that is a comma-separated string (in data[key])
and parses tag names. These are added to the data dict, enumerated. They
are also validated.'''
if isinstance(data[key], basestring):
tags = [tag.strip()
for tag in data[key].split(',')
if tag.strip()]
else:
tags = data[key]
current_index = max([int(k[1]) for k in data.keys()
if len(k) == 3 and k[0] == 'tags'] + [-1])
for num, tag in zip(count(current_index+1), tags):
data[('tags', num, 'name')] = tag
for tag in tags:
toolkit.get_validator('tag_length_validator')(tag, context)
tag_name_validator(tag, context)
def get_validators():
return {
'known_spatial_uri': known_spatial_uri,
'odsh_tag_name_validator': tag_name_validator,
'odsh_validate_extras': validate_extras,
'validate_licenseAttributionByText': validate_licenseAttributionByText
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment