summaryrefslogtreecommitdiffstats
path: root/debian/uncrustify-trinity/uncrustify-trinity-0.73.0/tests/test_uncrustify/utilities.py
blob: 2d9302224e84e5bf50519faf80ca3a5dd6ddf600 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
# Logic for listing and running tests.
#
# * @author  Ben Gardner        October 2009
# * @author  Guy Maurel         October 2015
# * @author  Matthew Woehlke    June 2018
#

import argparse
import os
import subprocess
import sys

from .ansicolor import printc
from .config import config, all_tests, FAIL_ATTRS, PASS_ATTRS, SKIP_ATTRS
from .failure import (Failure, MismatchFailure, UnexpectedlyPassingFailure,
                      UnstableFailure)
from .test import FormatTest


# -----------------------------------------------------------------------------
def _add_common_arguments(parser):
    parser.add_argument('-c', '--show-commands', action='store_true',
                        help='show commands')

    parser.add_argument('-v', '--verbose', action='store_true',
                        help='show detailed test information')

    parser.add_argument('-d', '--diff', action='store_true',
                        help='show diff on failure')

    parser.add_argument('-x', '--xdiff', action='store_true',
                        help='show diff on expected failure')

    parser.add_argument('-g', '--debug', action='store_true',
                        help='generate debug files (.log, .unc)')

    parser.add_argument('-e', '--executable', type=str, required=True,
                        metavar='PATH',
                        help='uncrustify executable to test')

    parser.add_argument('--git', type=str, default=config.git_exe,
                        metavar='PATH',
                        help='git executable to use to generate diffs')

    parser.add_argument('--result-dir', type=str, default=os.getcwd(),
                        metavar='DIR',
                        help='location to which results will be written')


# -----------------------------------------------------------------------------
def add_test_arguments(parser):
    _add_common_arguments(parser)

    parser.add_argument("name",                 type=str, metavar='NAME')
    parser.add_argument("--lang",               type=str, required=True)
    parser.add_argument("--input",              type=str, required=True)
    parser.add_argument("--config",             type=str, required=True)
    parser.add_argument("--expected",           type=str, required=True)
    parser.add_argument("--rerun-config",       type=str, metavar='INPUT')
    parser.add_argument("--rerun-expected",     type=str, metavar='CONFIG')
    parser.add_argument("--xfail",              action='store_true')


# -----------------------------------------------------------------------------
def add_source_tests_arguments(parser):
    _add_common_arguments(parser)

    parser.add_argument('-p', '--show-all', action='store_true',
                        help='show passed/skipped tests')


# -----------------------------------------------------------------------------
def add_format_tests_arguments(parser):
    _add_common_arguments(parser)

    parser.add_argument('-p', '--show-all', action='store_true',
                        help='show passed/skipped tests')

    parser.add_argument('-r', '--select', metavar='CASE(S)', type=str,
                        help='select tests to be executed')

    parser.add_argument('tests', metavar='TEST', type=str, nargs='*',
                        default=all_tests,
                        help='test(s) to run (default all)')

    # Arguments for generating the CTest script; users should not use these
    # directly
    parser.add_argument("--write-ctest", type=str, help=argparse.SUPPRESS)
    parser.add_argument("--cmake-config", type=str, help=argparse.SUPPRESS)
    parser.add_argument("--python", type=str, help=argparse.SUPPRESS)


# -----------------------------------------------------------------------------
def parse_args(parser):
    args = parser.parse_args()

    if args.git is not None:
        config.git_exe = args.git

    config.uncrustify_exe = args.executable
    if not os.path.exists(config.uncrustify_exe):
        msg = 'Specified uncrustify executable {!r} does not exist'.format(
            config.uncrustify_exe)
        printc("FAILED: ", msg, **FAIL_ATTRS)
        sys.exit(-1)

    # Do a sanity check on the executable
    try:
        with open(os.devnull, 'w') as bitbucket:
            subprocess.check_call([config.uncrustify_exe, '--help'],
                                  stdout=bitbucket)
    except Exception as exc:
        msg = ('Specified uncrustify executable {!r} ' +
               'does not appear to be usable: {!s}').format(
            config.uncrustify_exe, exc)
        printc("FAILED: ", msg, **FAIL_ATTRS)
        sys.exit(-1)

    return args


# -----------------------------------------------------------------------------
def run_tests(tests, args, selector=None):
    pass_count = 0
    fail_count = 0
    mismatch_count = 0
    unstable_count = 0
    unexpectedly_passing_count = 0

    for test in tests:
        if selector is not None and not selector.test(test.test_name):
            if args.show_all:
                printc("SKIPPED: ", test.test_name, **SKIP_ATTRS)
            continue

        try:
            test.run(args)
            if args.show_all:
                outcome = 'XFAILED' if test.test_xfail else 'PASSED'
                printc('{}: '.format(outcome), test.test_name, **PASS_ATTRS)
            pass_count += 1
        except UnstableFailure:
            unstable_count += 1
        except MismatchFailure:
            mismatch_count += 1
        except UnexpectedlyPassingFailure:
            unexpectedly_passing_count += 1
        except Failure:
            fail_count += 1

    return {
        'passing': pass_count,
        'failing': fail_count,
        'mismatch': mismatch_count,
        'unstable': unstable_count,
        'xpass': unexpectedly_passing_count
    }


# -----------------------------------------------------------------------------
def report(counts):
    total = sum(counts.values())
    print('{passing} / {total} tests passed'.format(total=total, **counts))
    if counts['failing'] > 0:
        printc('{failing} tests failed to execute'.format(**counts),
               **FAIL_ATTRS)
    if counts['mismatch'] > 0:
        printc(
            '{mismatch} tests did not match the expected output'.format(
                **counts),
            **FAIL_ATTRS)
    if counts['unstable'] > 0:
        printc('{unstable} tests were unstable'.format(**counts),
               **FAIL_ATTRS)
    if counts['xpass'] > 0:
        printc('{xpass} tests passed but were expected to fail'
            .format(**counts), **FAIL_ATTRS)


# -----------------------------------------------------------------------------
def read_format_tests(filename, group):
    tests = []

    print("Processing " + filename)
    with open(filename, 'rt') as f:
        for line_number, line in enumerate(f, 1):
            line = line.strip()
            if not len(line):
                continue
            if line.startswith('#'):
                continue

            test = FormatTest()
            test.build_from_declaration(line, group, line_number)
            tests.append(test)

    return tests


# -----------------------------------------------------------------------------
def fixup_ctest_path(path, config):
    if config is None:
        return path

    dirname, basename = os.path.split(path)
    if os.path.basename(dirname).lower() == config.lower():
        dirname, junk = os.path.split(dirname)
        return os.path.join(dirname, '${CTEST_CONFIGURATION_TYPE}', basename)

    return path