configuration.py
5.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
"""
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Provides the configuration class, which holds all information related to
how this invocation of the test suite should be run.
"""
from __future__ import absolute_import
from __future__ import print_function
# System modules
import os
# Third-party modules
import unittest2
# LLDB Modules
import lldbsuite
# The test suite.
suite = unittest2.TestSuite()
# The list of categories we said we care about
categories_list = None
# set to true if we are going to use categories for cherry-picking test cases
use_categories = False
# Categories we want to skip
skip_categories = ["darwin-log"]
# Categories we expect to fail
xfail_categories = []
# use this to track per-category failures
failures_per_category = {}
# The path to LLDB.framework is optional.
lldb_framework_path = None
# Test suite repeat count. Can be overwritten with '-# count'.
count = 1
# The 'arch' and 'compiler' can be specified via command line.
arch = None # Must be initialized after option parsing
compiler = None # Must be initialized after option parsing
# The overriden dwarf verison.
dwarf_version = 0
# Any overridden settings.
# Always disable default dynamic types for testing purposes.
settings = [('target.prefer-dynamic-value', 'no-dynamic-values')]
# Path to the FileCheck testing tool. Not optional.
filecheck = None
# The arch might dictate some specific CFLAGS to be passed to the toolchain to build
# the inferior programs. The global variable cflags_extras provides a hook to do
# just that.
cflags_extras = ''
# The filters (testclass.testmethod) used to admit tests into our test suite.
filters = []
# The regular expression pattern to match against eligible filenames as
# our test cases.
regexp = None
# Sets of tests which are excluded at runtime
skip_tests = None
xfail_tests = None
# By default, recorded session info for errored/failed test are dumped into its
# own file under a session directory named after the timestamp of the test suite
# run. Use '-s session-dir-name' to specify a specific dir name.
sdir_name = None
# Valid options:
# f - test file name (without extension)
# n - test class name
# m - test method name
# a - architecture
# c - compiler path
# The default is to write all fields.
session_file_format = 'fnmac'
# Set this flag if there is any session info dumped during the test run.
sdir_has_content = False
# svn_info stores the output from 'svn info lldb.base.dir'.
svn_info = ''
# Default verbosity is 0.
verbose = 0
# By default, search from the script directory.
# We can't use sys.path[0] to determine the script directory
# because it doesn't work under a debugger
testdirs = [os.path.dirname(os.path.realpath(__file__))]
# Separator string.
separator = '-' * 70
failed = False
# LLDB Remote platform setting
lldb_platform_name = None
lldb_platform_url = None
lldb_platform_working_dir = None
# The base directory in which the tests are being built.
test_build_dir = None
# The clang module cache directory used by lldb.
lldb_module_cache_dir = None
# The clang module cache directory used by clang.
clang_module_cache_dir = None
# The only directory to scan for tests. If multiple test directories are
# specified, and an exclusive test subdirectory is specified, the latter option
# takes precedence.
exclusive_test_subdir = None
# Test results handling globals
results_filename = None
results_formatter_name = None
results_formatter_object = None
results_formatter_options = None
test_result = None
# Test rerun configuration vars
rerun_all_issues = False
# The names of all tests. Used to assert we don't have two tests with the
# same base name.
all_tests = set()
def shouldSkipBecauseOfCategories(test_categories):
if use_categories:
if len(test_categories) == 0 or len(
categories_list & set(test_categories)) == 0:
return True
for category in skip_categories:
if category in test_categories:
return True
return False
def get_absolute_path_to_exclusive_test_subdir():
"""
If an exclusive test subdirectory is specified, return its absolute path.
Otherwise return None.
"""
test_directory = os.path.dirname(os.path.realpath(__file__))
if not exclusive_test_subdir:
return
if len(exclusive_test_subdir) > 0:
test_subdir = os.path.join(test_directory, exclusive_test_subdir)
if os.path.isdir(test_subdir):
return test_subdir
print('specified test subdirectory {} is not a valid directory\n'
.format(test_subdir))
def get_absolute_path_to_root_test_dir():
"""
If an exclusive test subdirectory is specified, return its absolute path.
Otherwise, return the absolute path of the root test directory.
"""
test_subdir = get_absolute_path_to_exclusive_test_subdir()
if test_subdir:
return test_subdir
return os.path.dirname(os.path.realpath(__file__))
def get_filecheck_path():
"""
Get the path to the FileCheck testing tool.
"""
if filecheck and os.path.lexists(filecheck):
return filecheck