diff --git a/ckanext/odsh/helpers.py b/ckanext/odsh/helpers.py
index 1fe0abf4fb514c785b90e1f1461659691d15d463..4acbb1902493fdb026ab908bb670cc49d8d62932 100644
--- a/ckanext/odsh/helpers.py
+++ b/ckanext/odsh/helpers.py
@@ -266,3 +266,30 @@ def odsh_is_slave():
     if c is None or (c != 'True' and c != 'False'):
         return -1 
     return 1 if c == 'True' else 0
+
+
+def is_within_last_month(date, date_ref=None):
+    '''
+    date is a datetime.date object containing the date to be checked
+    date_ref is a datetime.date object containing the reference date
+    if date_ref is not specified, the date of today is used
+    this method is needed by the method OdshPlugin.before_view in plugin.py
+    '''
+    
+    if not date_ref:
+        date_ref = datetime.date.today()
+    
+    [year_ref, month_ref, day_ref] = [date_ref.year, date_ref.month, date_ref.day]
+
+    try:
+        if month_ref > 1:
+            one_month_ago = datetime.date(year_ref, month_ref-1, day_ref)
+        else:
+            one_month_ago = datetime.date(year_ref-1, 12, day_ref)
+    except ValueError:
+        # this happens if month before month_ref has less days than month_ref
+        one_month_ago = datetime.date(year_ref, month_ref, 1) - datetime.timedelta(days=1)
+    
+    if date > one_month_ago:
+        return True
+    return False
diff --git a/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.mo b/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.mo
index ab0a6177c3cc3782e3ad5f9f36bd1a9e3e1f5af1..293456c5b0934cc0dd8b703e467bce532a80f134 100644
Binary files a/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.mo and b/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.mo differ
diff --git a/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.po b/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.po
index b09176f7ebfd5b11c0e4dd8f0f2e3861d4c9a458..7357bf8832d1fb0a5b70fdd2e4ff08f3f0994787 100644
--- a/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.po
+++ b/ckanext/odsh/i18n/de/LC_MESSAGES/ckanext-odsh.po
@@ -419,4 +419,7 @@ msgid "Create New Data Request"
 msgstr "Neuen Datensatz vorschlagen"
 
 msgid "Suggest New Data Request"
-msgstr "Datensatz vorschlagen"
\ No newline at end of file
+msgstr "Datensatz vorschlagen"
+
+msgid "NEW"
+msgstr "NEU"
\ No newline at end of file
diff --git a/ckanext/odsh/plugin.py b/ckanext/odsh/plugin.py
index b8f211afbfa74dfb885195c9d6022e1d6bec1b1c..c27478a9d3873f05b543aebb40017069ef7104c3 100644
--- a/ckanext/odsh/plugin.py
+++ b/ckanext/odsh/plugin.py
@@ -525,4 +525,33 @@ class OdshPlugin(plugins.SingletonPlugin, DefaultTranslation, DefaultDatasetForm
         self.map_qa_score(dict_pkg)
 
         return dict_pkg
+    
+
+    # IPackageController
+
+    def before_view(self, pkg_dict):
+        '''
+        add a key 'is_new' to pkg_dict
+        the value for this key is True if the dataset has been modified within the last month
+        the value is used in the snippet package_item.html
+        '''
+        is_new = self._is_package_new(pkg_dict)
+        pkg_dict.update({'is_new':is_new})
+        return pkg_dict
+    
+    def _is_package_new(self, pkg_dict):
+        date_last_modified = self._get_date_from_string(pkg_dict['metadata_modified'])
+        is_new = odsh_helpers.date_checker.is_within_last_month(date_last_modified)
+        return is_new
+    
+    def _get_date_from_string(self, date_time_str):
+        # todo: update this function if used in different context
+        date_time_format = '%Y-%m-%dT%H:%M:%S.%f' #e.g. u'2019-06-12T11:56:25.059563'
+        try:
+            date_time = datetime.datetime.strptime(date_time_str, date_time_format)
+        except ValueError:
+            # if date cannot be converted from string fall back to 1.1.2000
+            date = datetime.date(2000, 1, 1)
+        date = date_time.date()
+        return date
 
diff --git a/ckanext/odsh/public/odsh.css b/ckanext/odsh/public/odsh.css
index 893d2a6448f901338b316cee8c5b20b39af43432..e5b8e4ee2bf5ee1a0c7feaa1658df1653c76f0ef 100644
--- a/ckanext/odsh/public/odsh.css
+++ b/ckanext/odsh/public/odsh.css
@@ -1979,4 +1979,12 @@ p.package-info-categorie
 .datarequest-item-autor-name
 {
     padding-left: 4px;
+}
+
+.new-dataset-label
+{
+    background-color: #d4004b!important;;
+    padding: 3px 3px 1px 3px;
+    font-size: 14px;
+    margin-right: 4px;
 }
\ No newline at end of file
diff --git a/ckanext/odsh/templates/snippets/package_item.html b/ckanext/odsh/templates/snippets/package_item.html
index ff1ef8d446aebbe60d021c198a70ff8586a75248..d1a84c3e442076b49baef7108d0a385d6f9e613c 100644
--- a/ckanext/odsh/templates/snippets/package_item.html
+++ b/ckanext/odsh/templates/snippets/package_item.html
@@ -43,6 +43,9 @@ Example:
     <p>{{org}}</p>
     <h3 class="dataset-heading">
       {% block heading_private %}
+      {% if package.is_new %}
+        <span class='label new-dataset-label'>{{ _('NEW') }}</span>
+      {% endif %}
       {% if package.private %}
       <span class="dataset-private label label-inverse">
         <i class="fa fa-lock"></i>
diff --git a/validation.py b/validation.py
new file mode 100644
index 0000000000000000000000000000000000000000..e50cbb4394ddbcc447a7fe87d200e738ba6019e6
--- /dev/null
+++ b/validation.py
@@ -0,0 +1,241 @@
+# This Python file uses the following encoding: utf-8
+import logging
+import csv
+import re
+import urllib2
+import json
+from itertools import count
+from dateutil.parser import parse
+
+import ckan.plugins.toolkit as toolkit
+import ckan.model as model
+from ckan.lib.navl.dictization_functions import Missing
+
+from pylons import config
+
+import pdb
+
+_ = toolkit._
+
+log = logging.getLogger(__name__)
+
+
+def _extract_value(data, field):
+    key = None
+    for k in data.keys():
+        if data[k] == field:
+            key = k
+            break
+    if key is None:
+        return None
+    return data[(key[0], key[1], 'value')]
+
+
+def validate_extra_groups(data, requireAtLeastOne, errors):
+    value = _extract_value(data, 'groups')
+    if value != None:
+        # 'value != None' means the extra key 'groups' was found,
+        # so the dataset came from manual editing via the web-frontend.
+        if not value:
+            if requireAtLeastOne:
+                errors['groups'] = 'at least one group needed'
+            data[('groups', 0, 'id')] = ''
+            return
+
+        groups = [g.strip() for g in value.split(',') if value.strip()]
+        for k in data.keys():
+            if len(k) == 3 and k[0] == 'groups':
+                data[k] = ''
+                # del data[k]
+        if len(groups) == 0:
+            if requireAtLeastOne:
+                errors['groups'] = 'at least one group needed'
+            return
+
+        for num, group in zip(range(len(groups)), groups):
+            data[('groups', num, 'id')] = group
+    else:  # no extra-field 'groups'
+        # dataset might come from a harvest process
+        if not data.get(('groups', 0, 'id'), False) and \
+           not data.get(('groups', 0, 'name'), False):
+            errors['groups'] = 'at least one group needed'
+
+
+def validate_extras(key, data, errors, context):
+    extra_errors = {}
+    isStaNord = ('id',) in data and data[('id',)][:7] == 'StaNord'
+
+    validate_extra_groups(data, True, extra_errors)
+    validate_extra_date_new(key, 'issued', data, isStaNord, extra_errors)
+    validate_extra_date_new(key, 'temporal_start',
+                            data, isStaNord, extra_errors)
+    validate_extra_date_new(key, 'temporal_end', data, True, extra_errors)
+
+    if len(extra_errors.values()):
+        raise toolkit.Invalid(extra_errors)
+
+
+def _set_value(data, field, value):
+    key = None
+    for k in data.keys():
+        if data[k] == field:
+            key = k
+            break
+    if key is None:
+        return None
+    data[(key[0], key[1], 'value')] = value
+
+
+def validate_extra_date_new(key, field, data, optional, errors):
+    value = _extract_value(data, field)
+
+    if not value:
+        if not optional:
+            errors[field] = 'empty'
+        return
+    else:
+        if re.match(r'\d\d\d\d-\d\d-\d\d', value):
+            try:
+                dt = parse(value)
+                _set_value(data, field, dt.isoformat())
+                return
+            except ValueError:
+                pass
+        errors[field] = 'not a valid date'
+
+
+def validate_licenseAttributionByText(key, data, errors, context):
+    register = model.Package.get_license_register()
+    isByLicense = False
+    for k in data:
+        if len(k) > 0 and k[0] == 'license_id' and data[k] and not isinstance(data[k], Missing) and \
+                'Namensnennung' in register[data[k]].title:
+            isByLicense = True
+            break
+    hasAttribution = False
+    for k in data:
+        if data[k] == 'licenseAttributionByText':
+            if isinstance(data[(k[0], k[1], 'value')], Missing) or (k[0], k[1], 'value') not in data:
+                del data[(k[0], k[1], 'value')]
+                del data[(k[0], k[1], 'key')]
+                break
+            else:
+                value = data[(k[0], k[1], 'value')]
+                hasAttribution = value != ''
+                break
+    if not hasAttribution:
+        current_indexes = [k[1] for k in data.keys()
+                           if len(k) > 1 and k[0] == 'extras']
+
+        new_index = max(current_indexes) + 1 if current_indexes else 0
+        data[('extras', new_index, 'key')] = 'licenseAttributionByText'
+        data[('extras', new_index, 'value')] = ''
+
+    if isByLicense and not hasAttribution:
+        raise toolkit.Invalid(
+            'licenseAttributionByText: empty not allowed')
+
+    if not isByLicense and hasAttribution:
+        raise toolkit.Invalid(
+            'licenseAttributionByText: text not allowed for this license')
+
+
+def known_spatial_uri(key, data, errors, context):
+    value = _extract_value(data, 'spatial_uri')
+
+    if not value:
+        poly = None
+
+        # some harvesters might import a polygon directly...
+        # pdb.set_trace()
+        poly = _extract_value(data, 'spatial')
+
+        has_old_uri = False
+        pkg = context.get('package', None)
+        if pkg:
+            old_uri = pkg.extras.get('spatial_uri', None)
+            has_old_uri = old_uri != None and len(old_uri) > 0
+            if not poly:
+                poly = pkg.extras.get('spatial', None)
+        if not poly or has_old_uri:
+            raise toolkit.Invalid('spatial_uri: empty not allowed')
+        else:
+            if poly:
+                new_index = next_extra_index(data)
+                data[('extras', new_index+1, 'key')] = 'spatial'
+                data[('extras', new_index+1, 'value')] = poly
+            return
+
+    mapping_file = config.get('ckanext.odsh.spatial.mapping')
+    try:
+        mapping_file = urllib2.urlopen(mapping_file)
+    except Exception:
+        raise Exception("Could not load spatial mapping file!")
+
+    not_found = True
+    spatial_text = str()
+    spatial = str()
+    cr = csv.reader(mapping_file, delimiter="\t")
+    for row in cr:
+        if row[0].encode('UTF-8') == value:
+            not_found = False
+            spatial_text = row[1]
+            loaded = json.loads(row[2])
+            spatial = json.dumps(loaded['geometry'])
+            break
+    if not_found:
+        raise toolkit.Invalid(
+            'spatial_uri: uri unknown')
+
+    new_index = next_extra_index(data)
+
+    data[('extras', new_index, 'key')] = 'spatial_text'
+    data[('extras', new_index, 'value')] = spatial_text
+    data[('extras', new_index+1, 'key')] = 'spatial'
+    data[('extras', new_index+1, 'value')] = spatial
+
+
+def next_extra_index(data):
+    current_indexes = [k[1] for k in data.keys()
+                       if len(k) > 1 and k[0] == 'extras']
+
+    return max(current_indexes) + 1 if current_indexes else 0
+
+
+def tag_name_validator(value, context):
+    tagname_match = re.compile('[\w \-.\:\(\)\ยด\`]*$', re.UNICODE)
+    if not tagname_match.match(value):
+        raise toolkit.Invalid(_('Tag "%s" must be alphanumeric '
+                                'characters or symbols: -_.:()') % (value))
+    return value
+
+
+def tag_string_convert(key, data, errors, context):
+    '''Takes a list of tags that is a comma-separated string (in data[key])
+    and parses tag names. These are added to the data dict, enumerated. They
+    are also validated.'''
+    if isinstance(data[key], basestring):
+        tags = [tag.strip()
+                for tag in data[key].split(',')
+                if tag.strip()]
+    else:
+        tags = data[key]
+
+    current_index = max([int(k[1]) for k in data.keys()
+                         if len(k) == 3 and k[0] == 'tags'] + [-1])
+
+    for num, tag in zip(count(current_index+1), tags):
+        data[('tags', num, 'name')] = tag
+
+    for tag in tags:
+        toolkit.get_validator('tag_length_validator')(tag, context)
+        tag_name_validator(tag, context)
+
+
+def get_validators():
+    return {
+        'known_spatial_uri': known_spatial_uri,
+        'odsh_tag_name_validator': tag_name_validator,
+        'odsh_validate_extras': validate_extras,
+        'validate_licenseAttributionByText': validate_licenseAttributionByText
+    }