Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

#!/usr/bin/env python 

# -*- coding: utf-8 -*- 

""" 

A command-line program that runs all ObsPy tests. 

 

All tests in ObsPy are located in the tests directory of the each specific 

module. The __init__.py of the tests directory itself as well as every test 

file located in the tests directory has a function called suite, which is 

executed using this script. Running the script with the verbose keyword exposes 

the names of all available test cases. 

 

:copyright: 

    The ObsPy Development Team (devs@obspy.org) 

:license: 

    GNU Lesser General Public License, Version 3 

    (http://www.gnu.org/copyleft/lesser.html) 

 

.. rubric:: Examples 

 

(1) Run all local tests (ignoring tests requiring a network connection) on 

    command line:: 

 

        $ obspy-runtests 

 

    or via Python interpreter 

 

    >>> import obspy.core 

    >>> obspy.core.runTests()  # DOCTEST: +SKIP 

 

(2) Run all tests on command line:: 

 

        $ obspy-runtests --all 

 

    or via Python interpreter 

 

    >>> import obspy.core 

    >>> obspy.core.runTests(all=True)  # DOCTEST: +SKIP 

 

(3) Verbose output:: 

 

        $ obspy-runtests -v 

 

    or 

 

    >>> import obspy.core 

    >>> obspy.core.runTests(verbosity=2)"  # DOCTEST: +SKIP 

 

(4) Run tests of module :mod:`obspy.mseed`:: 

 

        $ obspy-runtests obspy.mseed.tests.suite 

 

    or as shortcut:: 

 

        $ obspy-runtests mseed 

 

(5) Run tests of multiple modules, e.g. :mod:`obspy.wav` and :mod:`obspy.sac`:: 

 

        $ obspy-runtests wav sac 

 

(6) Run a specific test case:: 

 

        $ obspy-runtests obspy.core.tests.test_stats.StatsTestCase.test_init 

 

    or 

 

    >>> import obspy.core 

    >>> tests = ['obspy.core.tests.test_stats.StatsTestCase.test_init'] 

    >>> obspy.core.runTests(verbosity=2, tests=tests)  # DOCTEST: +SKIP 

 

(7) Report test results to http://tests.obspy.org/:: 

 

        $ obspy-runtests -r 

 

(8) To get a full list of all options, use:: 

 

        $ obspy-runtests --help 

 

Of course you may combine most of the options here, e.g. in order to test 

all modules except the module obspy.sh and obspy.seishub, have a verbose output 

and report everything, you would run:: 

 

        $ obspy-runtests -r -v -x seishub -x sh --all 

""" 

 

from obspy.core.util import DEFAULT_MODULES, ALL_MODULES, NETWORK_MODULES 

from obspy.core.util.version import get_git_version 

from optparse import OptionParser, OptionGroup 

import copy 

import doctest 

import glob 

import numpy as np 

import operator 

import os 

import sys 

import time 

import unittest 

import warnings 

import platform 

 

 

DEPENDENCIES = ['numpy', 'scipy', 'matplotlib', 'lxml.etree', 'sqlalchemy', 

                'suds', 'mpl_toolkits.basemap'] 

 

PSTATS_HELP = """ 

Call "python -m pstats obspy.pstats" for an interactive profiling session. 

 

The following commands will produce the same output as shown above: 

  sort cumulative 

  stats obspy. 20 

 

Type "help" to see all available options. 

""" 

 

HOSTNAME = platform.node().split('.', 1)[0] 

 

 

#XXX: start of ugly monkey patch for Python 2.7 

# classes _TextTestRunner and _WritelnDecorator have been marked as depreciated 

class _WritelnDecorator(object): 

    """ 

    Used to decorate file-like objects with a handy 'writeln' method 

    """ 

    def __init__(self, stream): 

        self.stream = stream 

 

    def __getattr__(self, attr): 

        if attr in ('stream', '__getstate__'): 

            raise AttributeError(attr) 

        return getattr(self.stream, attr) 

 

    def writeln(self, arg=None): 

        if arg: 

            self.write(arg) 

        self.write('\n')  # text-mode streams translate to \r\n if needed 

 

unittest._WritelnDecorator = _WritelnDecorator 

#XXX: end of ugly monkey patch 

 

 

def _getSuites(verbosity=1, names=[]): 

    """ 

    The ObsPy test suite. 

    """ 

    # Construct the test suite from the given names. Modules 

    # need not be imported before in this case 

    suites = {} 

    ut = unittest.TestLoader() 

    for name in names: 

        suite = [] 

        if name in ALL_MODULES: 

            # Search for short cuts in tests 

            test = 'obspy.%s.tests.suite' % name 

        else: 

            # If no short cuts names variable = test variable 

            test = name 

        try: 

            suite.append(ut.loadTestsFromName(test, None)) 

        except Exception, e: 

            if verbosity: 

                print(e) 

                print("Cannot import test suite for module obspy.%s" % name) 

        else: 

            suites[name] = ut.suiteClass(suite) 

    return suites 

 

 

def _createReport(ttrs, timetaken, log, server, hostname): 

    # import additional libraries here to speed up normal tests 

    import httplib 

    import urllib 

    from urlparse import urlparse 

    from xml.sax.saxutils import escape 

    import codecs 

    from xml.etree import ElementTree as etree 

    timestamp = int(time.time()) 

    result = {'timestamp': timestamp} 

    result['timetaken'] = timetaken 

    if log: 

        try: 

            data = codecs.open(log, 'r', encoding='UTF-8').read() 

            result['install_log'] = escape(data) 

        except: 

            print("Cannot open log file %s" % log) 

    # get ObsPy module versions 

    result['obspy'] = {} 

    tests = 0 

    errors = 0 

    failures = 0 

    skipped = 0 

    try: 

        installed = get_git_version() 

    except: 

        installed = '' 

    result['obspy']['installed'] = installed 

    for module in sorted(ALL_MODULES): 

        result['obspy'][module] = {} 

        if module not in ttrs: 

            continue 

        result['obspy'][module]['installed'] = installed 

        # test results 

        ttr = ttrs[module] 

        result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken'] 

        result['obspy'][module]['tested'] = True 

        result['obspy'][module]['tests'] = ttr.testsRun 

        # skipped is not supported for Python < 2.7 

        try: 

            skipped += len(ttr.skipped) 

            result['obspy'][module]['skipped'] = len(ttr.skipped) 

        except AttributeError: 

            skipped = '' 

            result['obspy'][module]['skipped'] = '' 

        tests += ttr.testsRun 

        # depending on module type either use failure (network related modules) 

        # or errors (all others) 

        result['obspy'][module]['errors'] = {} 

        result['obspy'][module]['failures'] = {} 

        if module in NETWORK_MODULES: 

            for _, text in ttr.errors: 

                result['obspy'][module]['failures']['f%s' % (failures)] = text 

                failures += 1 

            for _, text in ttr.failures: 

                result['obspy'][module]['failures']['f%s' % (failures)] = text 

                failures += 1 

        else: 

            for _, text in ttr.errors: 

                result['obspy'][module]['errors']['f%s' % (errors)] = text 

                errors += 1 

            for _, text in ttr.failures: 

                result['obspy'][module]['errors']['f%s' % (errors)] = text 

                errors += 1 

    # get dependencies 

    result['dependencies'] = {} 

    for module in DEPENDENCIES: 

        temp = module.split('.') 

        try: 

            mod = __import__(module, fromlist=temp[1:]) 

            if module == '_omnipy': 

                result['dependencies'][module] = mod.coreVersion() 

            else: 

                result['dependencies'][module] = mod.__version__ 

        except: 

            result['dependencies'][module] = '' 

    # get system / environment settings 

    result['platform'] = {} 

    for func in ['system', 'release', 'version', 'machine', 

                 'processor', 'python_version', 'python_implementation', 

                 'python_compiler', 'architecture']: 

        try: 

            temp = getattr(platform, func)() 

            if isinstance(temp, tuple): 

                temp = temp[0] 

            result['platform'][func] = temp 

        except: 

            result['platform'][func] = '' 

    # set node name to hostname if set 

    result['platform']['node'] = hostname 

    # post only the first part of the node name (only applies to MacOS X) 

    try: 

        result['platform']['node'] = result['platform']['node'].split('.')[0] 

    except: 

        pass 

    # test results 

    result['tests'] = tests 

    result['errors'] = errors 

    result['failures'] = failures 

    result['skipped'] = skipped 

 

    # generate XML document 

    def _dict2xml(doc, result): 

        for key, value in result.iteritems(): 

            key = key.split('(')[0].strip() 

            if isinstance(value, dict): 

                child = etree.SubElement(doc, key) 

                _dict2xml(child, value) 

            elif value is not None: 

                if isinstance(value, unicode): 

                    etree.SubElement(doc, key).text = value 

                elif isinstance(value, str): 

                    etree.SubElement(doc, key).text = unicode(value, 'utf-8') 

                else: 

                    etree.SubElement(doc, key).text = str(value) 

            else: 

                etree.SubElement(doc, key) 

    root = etree.Element("report") 

    _dict2xml(root, result) 

    xml_doc = etree.tostring(root) 

    print 

    # send result to report server 

    params = urllib.urlencode({ 

        'timestamp': timestamp, 

        'system': result['platform']['system'], 

        'python_version': result['platform']['python_version'], 

        'architecture': result['platform']['architecture'], 

        'tests': tests, 

        'failures': failures, 

        'errors': errors, 

        'modules': len(ttrs), 

        'xml': xml_doc 

    }) 

    headers = {"Content-type": "application/x-www-form-urlencoded", 

               "Accept": "text/plain"} 

    conn = httplib.HTTPConnection(server) 

    conn.request("POST", "/", params, headers) 

    # get the response 

    response = conn.getresponse() 

    # handle redirect 

    if response.status == 301: 

        o = urlparse(response.msg['location']) 

        conn = httplib.HTTPConnection(o.netloc) 

        conn.request("POST", o.path, params, headers) 

        # get the response 

        response = conn.getresponse() 

    # handle errors 

    if response.status == 200: 

        print("Test report has been sent to %s. Thank you!" % (server)) 

    else: 

        print("Error: Could not sent a test report to %s." % (server)) 

        print(response.reason) 

 

 

class _TextTestResult(unittest._TextTestResult): 

    """ 

    A custom test result class that can print formatted text results to a 

    stream. Used by TextTestRunner. 

    """ 

    timer = [] 

 

    def startTest(self, test): 

        self.start = time.time() 

        super(_TextTestResult, self).startTest(test) 

 

    def stopTest(self, test): 

        super(_TextTestResult, self).stopTest(test) 

        self.timer.append((test, time.time() - self.start)) 

 

 

class _TextTestRunner: 

    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1, 

                 timeit=False): 

        self.stream = unittest._WritelnDecorator(stream)  # @UndefinedVariable 

        self.descriptions = descriptions 

        self.verbosity = verbosity 

        self.timeit = timeit 

 

    def _makeResult(self): 

        return _TextTestResult(self.stream, self.descriptions, self.verbosity) 

 

    def run(self, suites): 

        """ 

        Run the given test case or test suite. 

        """ 

        results = {} 

        time_taken = 0 

        keys = sorted(suites.keys()) 

        for id in keys: 

            test = suites[id] 

            result = self._makeResult() 

            start = time.time() 

            test(result) 

            stop = time.time() 

            results[id] = result 

            total = stop - start 

            results[id].__dict__['timetaken'] = total 

            if self.timeit: 

                self.stream.writeln('') 

                self.stream.write("obspy.%s: " % (id)) 

                num = test.countTestCases() 

                try: 

                    avg = float(total) / num 

                except: 

                    avg = 0 

                msg = '%d tests in %.3fs (average of %.4fs per test)' 

                self.stream.writeln(msg % (num, total, avg)) 

                self.stream.writeln('') 

            time_taken += total 

        runs = 0 

        faileds = 0 

        erroreds = 0 

        wasSuccessful = True 

        if self.verbosity: 

            self.stream.writeln() 

        for result in results.values(): 

            failed, errored = map(len, (result.failures, result.errors)) 

            faileds += failed 

            erroreds += errored 

            if not result.wasSuccessful(): 

                wasSuccessful = False 

                result.printErrors() 

            runs += result.testsRun 

        if self.verbosity: 

            self.stream.writeln(unittest._TextTestResult.separator2) 

            self.stream.writeln("Ran %d test%s in %.3fs" % 

                                (runs, runs != 1 and "s" or "", time_taken)) 

            self.stream.writeln() 

        if not wasSuccessful: 

            self.stream.write("FAILED (") 

            if faileds: 

                self.stream.write("failures=%d" % faileds) 

            if erroreds: 

                if faileds: 

                    self.stream.write(", ") 

                self.stream.write("errors=%d" % erroreds) 

            self.stream.writeln(")") 

        elif self.verbosity: 

            self.stream.writeln("OK") 

        return results, time_taken, (faileds + erroreds) 

 

 

def runTests(verbosity=1, tests=[], report=False, log=None, 

             server="tests.obspy.org", all=False, timeit=False, 

             interactive=False, slowest=0, exclude=[], tutorial=False, 

             hostname=HOSTNAME): 

    """ 

    This function executes ObsPy test suites. 

 

    :type verbosity: int, optional 

    :param verbosity: Run tests in verbose mode (``0``=quiet, ``1``=normal, 

        ``2``=verbose, default is ``1``). 

    :type tests: list of strings, optional 

    :param tests: Test suites to run. If no suite is given all installed tests 

        suites will be started (default is a empty list). 

        Example ``['obspy.core.tests.suite']``. 

    :type report: boolean, optional 

    :param report: Submits a test report if enabled (default is ``False``). 

    :type log: string, optional 

    :param log: Filename of install log file to append to report. 

    :type server: string, optional 

    :param server: Report server URL (default is ``"tests.obspy.org"``). 

    """ 

    if all: 

        tests = copy.copy(ALL_MODULES) 

    elif not tests: 

        tests = copy.copy(DEFAULT_MODULES) 

    # remove any excluded module 

    if exclude: 

        for name in exclude: 

            try: 

                tests.remove(name) 

            except ValueError: 

                pass 

    # fetch tests suites 

    suites = _getSuites(verbosity, tests) 

    # add testsuite for all of the tutorial's rst files 

    if tutorial: 

        try: 

            # assume we are in the trunk 

            tut_path = os.path.dirname(__file__) 

            tut_path = os.path.join(tut_path, '..', '..', '..', '..', 'misc', 

                                    'docs', 'source', 'tutorial', '*.rst') 

            tut_suite = unittest.TestSuite() 

            for file in glob.glob(tut_path): 

                filesuite = doctest.DocFileSuite(file, module_relative=False) 

                tut_suite.addTest(filesuite) 

            suites['tutorial'] = tut_suite 

        except: 

            msg = "Could not add tutorial files to tests." 

            warnings.warn(msg) 

    # run test suites 

    ttr, total_time, errors = _TextTestRunner(verbosity=verbosity, 

                                              timeit=timeit).run(suites) 

    if slowest: 

        mydict = {} 

        # loop over modules 

        for mod in ttr.values(): 

            mydict.update(dict(mod.timer)) 

        sorted_tests = sorted(mydict.iteritems(), key=operator.itemgetter(1)) 

        sorted_tests = sorted_tests[::-1][:slowest] 

        sorted_tests = ["%0.3fs: %s" % (dt, desc) 

                        for (desc, dt) in sorted_tests] 

        print 

        print "Slowest Tests" 

        print "-------------" 

        print os.linesep.join(sorted_tests) 

        print 

        print 

    if interactive and not report: 

        msg = "Do you want to report this to tests.obspy.org? [n]: " 

        var = raw_input(msg).lower() 

        if var in ('y', 'yes', 'yoah', 'hell yeah!'): 

            report = True 

    if report: 

        _createReport(ttr, total_time, log, server, hostname) 

    if errors: 

        return errors 

 

 

def run(interactive=True): 

    try: 

        import matplotlib 

        matplotlib.use("AGG") 

    except ImportError: 

        msg = "unable to change backend to 'AGG' (to avoid windows popping up)" 

        warnings.warn(msg) 

    usage = "USAGE: %prog [options] module1 module2 ...\n\n" 

    parser = OptionParser(usage.strip()) 

    parser.add_option("-v", "--verbose", default=False, 

                      action="store_true", dest="verbose", 

                      help="verbose mode") 

    parser.add_option("-q", "--quiet", default=False, 

                      action="store_true", dest="quiet", 

                      help="quiet mode") 

    # filter options 

    filter = OptionGroup(parser, "Module Filter", "Providing no modules " + \ 

        "will test all ObsPy modules which don't require a " + \ 

        "active network connection.") 

    filter.add_option("--all", default=False, 

                      action="store_true", dest="all", 

                      help="test all modules (including network modules)") 

    filter.add_option("-x", "--exclude", 

                      action="append", type="str", dest="module", 

                      help="exclude given module from test") 

    filter.add_option("--tutorial", default=False, 

                      action="store_true", dest="tutorial", 

                      help="add doctests in tutorial") 

    parser.add_option_group(filter) 

    # timing / profile options 

    timing = OptionGroup(parser, "Timing/Profile Options") 

    timing.add_option("-t", "--timeit", default=False, 

                      action="store_true", dest="timeit", 

                      help="shows accumulated run times of each module") 

    timing.add_option("-s", "--slowest", default=0, 

                      type='int', dest="n", 

                      help="lists n slowest test cases") 

    timing.add_option("-p", "--profile", default=False, 

                      action="store_true", dest="profile", 

                      help="uses cProfile, saves the results to file " + \ 

                           "obspy.pstats and prints some profiling numbers") 

    parser.add_option_group(timing) 

    # reporting options 

    report = OptionGroup(parser, "Reporting Options") 

    report.add_option("-r", "--report", default=False, 

                      action="store_true", dest="report", 

                      help="automatically submit a test report") 

    report.add_option("-d", "--dontask", default=False, 

                      action="store_true", dest="dontask", 

                      help="don't explicitly ask for submitting a test report") 

    report.add_option("-u", "--server", default="tests.obspy.org", 

                      type="string", dest="server", 

                      help="report server (default is tests.obspy.org)") 

    report.add_option("-n", "--node", default=HOSTNAME, 

                      type="string", dest="hostname", 

                      help="nodename visible at the report server") 

    report.add_option("-l", "--log", default=None, 

                      type="string", dest="log", 

                      help="append log file to test report") 

    parser.add_option_group(report) 

    (options, _) = parser.parse_args() 

    # set correct verbosity level 

    if options.verbose: 

        verbosity = 2 

        # raise all numpy warnings 

        np.seterr(all='raise') 

        # raise user and deprecation warnings 

        warnings.simplefilter("error", UserWarning) 

    elif options.quiet: 

        verbosity = 0 

        # ignore user and deprecation warnings 

        warnings.simplefilter("ignore", DeprecationWarning) 

        warnings.simplefilter("ignore", UserWarning) 

        # don't ask to send a report 

        options.dontask = True 

    else: 

        verbosity = 1 

        # show all NumPy warnings 

        np.seterr(all='print') 

        # ignore user warnings 

        warnings.simplefilter("ignore", UserWarning) 

    # check for send report option or environmental settings 

    if options.report or 'OBSPY_REPORT' in os.environ.keys(): 

        report = True 

    else: 

        report = False 

    if 'OBSPY_REPORT_SERVER' in os.environ.keys(): 

        options.server = os.environ['OBSPY_REPORT_SERVER'] 

    # check interactivity settings 

    if interactive and options.dontask: 

        interactive = False 

    return runTests(verbosity, parser.largs, report, options.log, 

        options.server, options.all, options.timeit, interactive, options.n, 

        exclude=options.module, tutorial=options.tutorial, 

        hostname=options.hostname) 

 

 

def main(interactive=True): 

    """ 

    Entry point for setup.py. 

 

    Wrapper for a profiler if requested otherwise just call run() directly. 

    If profiling is enabled we disable interactivity as it would wait for user 

    input and influence the statistics. However the -r option still works. 

    """ 

    if '-p' in sys.argv or '--profile' in sys.argv: 

        try: 

            import cProfile as Profile 

        except ImportError: 

            import Profile 

        Profile.run('from obspy.core.scripts.runtests import run; run()', 

                    'obspy.pstats') 

        import pstats 

        stats = pstats.Stats('obspy.pstats') 

        print 

        print "Profiling:" 

        stats.sort_stats('cumulative').print_stats('obspy.', 20) 

        print PSTATS_HELP 

    else: 

        errors = run(interactive) 

        if errors: 

            sys.exit(1) 

 

 

if __name__ == "__main__": 

    # It is not possible to add the code of main directly to here. 

    # This script is automatically installed with name obspy-runtests by 

    # setup.py to the Scripts or bin directory of your Python distribution 

    # setup.py needs a function to which it's scripts can be linked. 

    run(interactive=False)