Skip to content

Commit

Permalink
Adding help_text_check application
Browse files Browse the repository at this point in the history
  • Loading branch information
john-westcott-iv committed Oct 29, 2024
1 parent d013e48 commit 4af406b
Show file tree
Hide file tree
Showing 9 changed files with 341 additions and 0 deletions.
Empty file.
8 changes: 8 additions & 0 deletions ansible_base/help_text_check/apps.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from django.apps import AppConfig


class HelpTextCheckConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'ansible_base.help_text_check'
label = 'dab_help_text_check'
verbose_name = 'Django Model Help Text Checker'
Empty file.
Empty file.
101 changes: 101 additions & 0 deletions ansible_base/help_text_check/management/commands/help_text_check.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
from sys import exit

from django.apps import apps
from django.core.management.base import BaseCommand


class Command(BaseCommand):
help = "Ensure models have help_text fields"
ignore_reasons = {}
global_ignore_fields = ['id']
indentation = " "

def add_arguments(self, parser):
parser.add_argument(
"--applications",
type=str,
help="Comma delimited list of the django application to check. If not specified all applications will be checked",
required=False,
)
parser.add_argument("--ignore-file", type=str, help="The path to a file containing entries like: app.model.field to ignore", required=False)
parser.add_argument("--skip-global-ignore", action="store_true", help="Don't ignore the global ignore fields", required=False)

def get_models(self, applications):
installed_applications = apps.app_configs.keys()
models = []
for requested_application in applications.split(','):
found_app = False
for installed_application in installed_applications:
if requested_application in installed_application:
found_app = True
for model in apps.get_app_config(installed_application).get_models():
if model not in models:
models.append(model)
if not found_app:
self.stderr.write(self.style.WARNING(f"Specified application {requested_application} is not in INSTALLED_APPS"))
return models

def handle(self, *args, **options):
ignore_file = options.get('ignore_file', None)
if ignore_file:
try:
with open(ignore_file, 'r') as f:
for line in f.readlines():
elements = line.strip().split('#', 2)
line = elements[0].strip()
if line:
self.ignore_reasons[line] = elements[1] if len(elements) == 2 else 'Not specified'
except FileNotFoundError:
self.stderr.write(self.style.ERROR(f"Ignore file {ignore_file} does not exist"))
exit(255)
except PermissionError:
self.stderr.write(self.style.ERROR(f"No permission to read {ignore_file}"))
exit(255)
except Exception as e:
self.stderr.write(self.style.ERROR(f"Failed to read {ignore_file}: {e}"))
exit(255)

if len(self.ignore_reasons) > 0:
self.stdout.write(f"Ignoring {len(self.ignore_reasons)} field(s):")
for field in self.ignore_reasons.keys():
self.stdout.write(f"{self.indentation}- {field}")
print("")

applications = options.get('applications', None)
if applications:
models = self.get_models(applications)
else:
models = apps.get_models()

scanned_models = 0
return_code = 0
results = {}
for model in models:
scanned_models = scanned_models + 1

model_name = f"{model._meta.app_label}.{model.__name__}"
results[model_name] = {}
for field in model._meta.concrete_fields:
field_name = f"{model_name}.{field.name}"

help_text = getattr(field, 'help_text', '')
if field_name in self.ignore_reasons:
message = self.style.WARNING(f"{self.indentation}{field.name}: {self.ignore_reasons[field_name]}")
elif field.name in self.global_ignore_fields and not options.get('skip_global_ignore', False):
message = self.style.WARNING(f"{self.indentation}{field.name}: global ignore field")
elif not help_text:
return_code = 1
message = self.style.MIGRATE_HEADING(f"{self.indentation}{field.name}: ") + self.style.ERROR("missing help_text")
else:
message = self.style.SUCCESS(f"{self.indentation}{field.name}") + f": {help_text}"

results[model_name][field.name] = message
self.stdout.write(f"Scanned: {scanned_models} model(s)")

for model_name in sorted(results.keys()):
self.stdout.write(self.style.SQL_TABLE(model_name))
for field_name in sorted(results[model_name].keys()):
self.stdout.write(results[model_name][field_name])
self.stdout.write("")

exit(return_code)
2 changes: 2 additions & 0 deletions ansible_base/help_text_check/urls.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
api_version_urls = []
api_urls = []
79 changes: 79 additions & 0 deletions docs/apps/help_text_check.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
# Help Text Checker

A simple application to provide a management command which can inspect django models to see if all fields have help_text related to them.

## Settings

Add `ansible_base.help_text_check` to your installed apps:

```
INSTALLED_APPS = [
...
'ansible_base.help_text_check',
]
```

### Additional Settings

There are no additional settings required.

## URLS

This feature does not require any URLs.

## Using the management command

The management command can be run on its own as:

```
manage.py help_text_check
```

By default this will report on all models the ORM knows about.

### Restricting which applications are searched

If you would like to restrict which models will be queried you can do so on a per-application basis by passing in a comma separated value like:

```
manage.py help_text_check --applications=<application1>,<application2>,...
```

Note, each entry in the passed applications is compared to the installed applications and if an installed application name contains an entry specified in applications it will be added to the list of applications to check.

For example, DAB has a number of applications. These can all be tested with the following:

```
manage.py help_text_check --applications=dab
```

This is because the name of all applications in DAB start with `dab_`. If you only wanted to test a single application in DAB you do that like:

```
manage.py help_text_check --application=dab_authentication
```

### Ignoring specific fields

If there are specific fields you want to ignore on a model you can create an "ignore file" where each line in the file is in the syntax of:
```
application.model.field_name
```

Once the file is created you can pass that as the `--ignore-file` parameter like:
```
manage.py help_text_check --ignore-file=<path to file>
```

### Global ignore

The `id` field of all models is ignored by default

If you want to report on the globally ignored fields you can pass in `--skip-global-ignore`

### Return codes

This script returns 3 possible return codes
0 - everything is fine
1 - One or more field is missing help_text
255 - The ignore file was unable to be read for some reason (see output)
1 change: 1 addition & 0 deletions test_app/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
'django_extensions',
'debug_toolbar',
'ansible_base.activitystream',
'ansible_base.help_text_check',
]

MIDDLEWARE = [
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
from io import StringIO
from unittest import mock

import pytest
from django.core.management import call_command
from django.db.models import CharField, Model


@pytest.mark.parametrize(
'exception_type,message',
[
(FileNotFoundError, "does not exist"),
(PermissionError, "No permission to read"),
(IndexError, "Failed to read"),
],
)
def test_exception_on_ignore_file_read(exception_type, message):
out = StringIO()
err = StringIO()

with mock.patch("builtins.open", mock.mock_open()) as mock_file:
mock_file.side_effect = exception_type('Testing perm error')
with pytest.raises(SystemExit) as pytest_wrapped_e:
call_command('help_text_check', ignore_file='junk.dne', stdout=out, stderr=err)

assert pytest_wrapped_e.value.code == 255
assert message in err.getvalue()


@pytest.mark.parametrize(
"read_data,has_message",
[
('', False),
('asdf', True),
],
)
def test_valid_exception_types(read_data, has_message):
out = StringIO()
err = StringIO()

with mock.patch('ansible_base.help_text_check.management.commands.help_text_check.apps.get_models', return_value=[]):
with mock.patch("builtins.open", mock.mock_open(read_data=read_data)):
with pytest.raises(SystemExit) as pytest_wrapped_e:
call_command('help_text_check', ignore_file='junk.dne', stdout=out, stderr=err)

assert pytest_wrapped_e.value.code == 0
if has_message:
assert 'Ignoring 1 field(s)' in out.getvalue()
else:
assert 'Ignoring' not in out.getvalue()


def test_missing_application():
out = StringIO()
err = StringIO()

with pytest.raises(SystemExit) as pytest_wrapped_e:
call_command('help_text_check', applications='App3', stdout=out, stderr=err)

assert pytest_wrapped_e.value.code == 0
assert 'is not in INSTALLED_APPS' in err.getvalue()


def get_app_config_mock(app_name):
class mock_app_config:
def __init__(self, app_name):
self.app_name = app_name

def get_models(self):
if self.app_name == 'App1':
return ['App1.model1', 'App1.model2', 'App1.model1']
elif self.app_name == 'App2':
return ['App2.model1']
else:
raise Exception("This has to be called with either App1 or App2")

return mock_app_config(app_name)


def test_app_limit():
from ansible_base.help_text_check.management.commands.help_text_check import Command

command = Command()

with mock.patch.dict('ansible_base.help_text_check.management.commands.help_text_check.apps.app_configs', {'App1': [], 'App2': [], 'App3': []}):
with mock.patch('ansible_base.help_text_check.management.commands.help_text_check.apps.get_app_config') as get_app_config:
get_app_config.side_effect = get_app_config_mock
models = command.get_models('App1,App2')
assert models == ['App1.model1', 'App1.model2', 'App2.model1']


class GoodModel(Model):
class Meta:
app_label = 'Testing'

test_field = CharField(
help_text='Testing help_text',
)


class BadModel(Model):
class Meta:
app_label = 'Testing'

test_field = CharField()


def get_app_config_actual_models(app_name):
class mock_app_config:
def __init__(self, app_name):
self.app_name = app_name

def get_models(self):
if app_name == 'good':
return [GoodModel]
elif app_name == 'bad':
return [BadModel]
else:
return [GoodModel, BadModel]

return mock_app_config(app_name)


@pytest.mark.parametrize(
'test_type',
[
"good",
"bad",
],
)
def test_models(test_type):
out = StringIO()
err = StringIO()

with mock.patch.dict('ansible_base.help_text_check.management.commands.help_text_check.apps.app_configs', {test_type: []}):
with mock.patch('ansible_base.help_text_check.management.commands.help_text_check.apps.get_app_config') as get_app_config:
get_app_config.side_effect = get_app_config_actual_models
with pytest.raises(SystemExit) as pytest_wrapped_e:
call_command('help_text_check', applications=test_type, stdout=out, stderr=err)

if test_type == 'good':
assert pytest_wrapped_e.value.code == 0
assert 'Testing.GoodModel' in out.getvalue()
assert 'Testing help_text' in out.getvalue()
elif test_type == 'bad':
assert pytest_wrapped_e.value.code == 1
assert 'Testing.BadModel' in out.getvalue()
assert 'test_field: missing help_text' in out.getvalue()
else:
assert False, "This test can only do good and bad models right now"

0 comments on commit 4af406b

Please sign in to comment.