@dataclass
 class KunitParseRequest:
        raw_output: Optional[str]
-       build_dir: str
        json: Optional[str]
 
 @dataclass
 class KunitExecRequest(KunitParseRequest):
+       build_dir: str
        timeout: int
        alltests: bool
        filter_glob: str
                                test_glob = request.filter_glob.split('.', maxsplit=2)[1]
                                filter_globs = [g + '.'+ test_glob for g in filter_globs]
 
+       metadata = kunit_json.Metadata(build_dir=request.build_dir)
+
        test_counts = kunit_parser.TestCounts()
        exec_time = 0.0
        for i, filter_glob in enumerate(filter_globs):
                        filter_glob=filter_glob,
                        build_dir=request.build_dir)
 
-               _, test_result = parse_tests(request, run_result)
+               _, test_result = parse_tests(request, metadata, run_result)
                # run_kernel() doesn't block on the kernel exiting.
                # That only happens after we get the last line of output from `run_result`.
                # So exec_time here actually contains parsing + execution time, which is fine.
        else:
                return KunitStatus.TEST_FAILURE
 
-def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
+def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
        parse_start = time.time()
 
        test_result = kunit_parser.Test()
        if request.json:
                json_str = kunit_json.get_json_result(
                                        test=test_result,
-                                       def_config='kunit_defconfig',
-                                       build_dir=request.build_dir)
+                                       metadata=metadata)
                if request.json == 'stdout':
                        print(json_str)
                else:
                else:
                        with open(cli_args.file, 'r', errors='backslashreplace') as f:
                                kunit_output = f.read().splitlines()
+               # We know nothing about how the result was created!
+               metadata = kunit_json.Metadata()
                request = KunitParseRequest(raw_output=cli_args.raw_output,
-                                           build_dir='',
                                            json=cli_args.json)
-               result, _ = parse_tests(request, kunit_output)
+               result, _ = parse_tests(request, metadata, kunit_output)
                if result.status != KunitStatus.SUCCESS:
                        sys.exit(1)
        else:
 
 # Copyright (C) 2020, Google LLC.
 # Author: Heidi Fahim <heidifahim@google.com>
 
+from dataclasses import dataclass
 import json
 import os
 
 from kunit_parser import Test, TestStatus
 from typing import Any, Dict
 
+@dataclass
+class Metadata:
+       """Stores metadata about this run to include in get_json_result()."""
+       arch: str = 'UM'
+       def_config: str = 'kunit_defconfig'
+       build_dir: str = ''
+
 JsonObj = Dict[str, Any]
 
 _status_map: Dict[TestStatus, str] = {
        TestStatus.TEST_CRASHED: "ERROR",
 }
 
-def _get_group_json(test: Test, def_config: str, build_dir: str) -> JsonObj:
+def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj:
        sub_groups = []  # List[JsonObj]
        test_cases = []  # List[JsonObj]
 
        for subtest in test.subtests:
                if subtest.subtests:
-                       sub_group = _get_group_json(subtest, def_config,
-                               build_dir)
+                       sub_group = _get_group_json(subtest, common_fields)
                        sub_groups.append(sub_group)
                        continue
                status = _status_map.get(subtest.status, "FAIL")
 
        test_group = {
                "name": test.name,
-               "arch": "UM",
-               "defconfig": def_config,
-               "build_environment": build_dir,
                "sub_groups": sub_groups,
                "test_cases": test_cases,
+       }
+       test_group.update(common_fields)
+       return test_group
+
+def get_json_result(test: Test, metadata: Metadata) -> str:
+       common_fields = {
+               "arch": metadata.arch,
+               "defconfig": metadata.def_config,
+               "build_environment": metadata.build_dir,
                "lab_name": None,
                "kernel": None,
                "job": None,
                "git_branch": "kselftest",
        }
-       return test_group
 
-def get_json_result(test: Test, def_config: str, build_dir: str) -> str:
-       test_group = _get_group_json(test, def_config, build_dir)
+       test_group = _get_group_json(test, common_fields)
        test_group["name"] = "KUnit Test Group"
        return json.dumps(test_group, indent=4)
 
                        test_result = kunit_parser.parse_run_tests(file)
                        json_obj = kunit_json.get_json_result(
                                test=test_result,
-                               def_config='kunit_defconfig',
-                               build_dir='.kunit')
+                               metadata=kunit_json.Metadata())
                return json.loads(json_obj)
 
        def test_failed_test_json(self):
                self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
 
                got = kunit._list_tests(self.linux_source_mock,
-                                    kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*', None, 'suite'))
+                                    kunit.KunitExecRequest(None, None, '.kunit', 300, False, 'suite*', None, 'suite'))
 
                self.assertEqual(got, want)
                # Should respect the user's filter glob when listing tests.
 
                # Should respect the user's filter glob when listing tests.
                mock_tests.assert_called_once_with(mock.ANY,
-                                    kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*.test*', None, 'suite'))
+                                    kunit.KunitExecRequest(None, None, '.kunit', 300, False, 'suite*.test*', None, 'suite'))
                self.linux_source_mock.run_kernel.assert_has_calls([
                        mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300),
                        mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300),
 
                # Should respect the user's filter glob when listing tests.
                mock_tests.assert_called_once_with(mock.ANY,
-                                    kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*', None, 'test'))
+                                    kunit.KunitExecRequest(None, None, '.kunit', 300, False, 'suite*', None, 'test'))
                self.linux_source_mock.run_kernel.assert_has_calls([
                        mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300),
                        mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300),