diff mbox series

[v3,1/4] scripts/simplebench: add simplebench.py

Message ID 20200228071914.11746-2-vsementsov@virtuozzo.com (mailing list archive)
State New, archived
Headers show
Series benchmark util | expand

Commit Message

Vladimir Sementsov-Ogievskiy Feb. 28, 2020, 7:19 a.m. UTC
Add simple benchmark table creator.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 scripts/simplebench/simplebench.py | 128 +++++++++++++++++++++++++++++
 1 file changed, 128 insertions(+)
 create mode 100644 scripts/simplebench/simplebench.py

Comments

Aleksandar Markovic Feb. 28, 2020, 1:03 p.m. UTC | #1
On Fri, Feb 28, 2020 at 8:19 AM Vladimir Sementsov-Ogievskiy
<vsementsov@virtuozzo.com> wrote:
>
> Add simple benchmark table creator.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
>  scripts/simplebench/simplebench.py | 128 +++++++++++++++++++++++++++++
>  1 file changed, 128 insertions(+)
>  create mode 100644 scripts/simplebench/simplebench.py
>
> diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
> new file mode 100644
> index 0000000000..59e7314ff6
> --- /dev/null
> +++ b/scripts/simplebench/simplebench.py
> @@ -0,0 +1,128 @@
> +#!/usr/bin/env python
> +#
> +# Simple benchmarking framework
> +#
> +# Copyright (c) 2019 Virtuozzo International GmbH.
> +#
> +# This program is free software; you can redistribute it and/or modify
> +# it under the terms of the GNU General Public License as published by
> +# the Free Software Foundation; either version 2 of the License, or
> +# (at your option) any later version.
> +#
> +# This program is distributed in the hope that it will be useful,
> +# but WITHOUT ANY WARRANTY; without even the implied warranty of
> +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> +# GNU General Public License for more details.
> +#
> +# You should have received a copy of the GNU General Public License
> +# along with this program.  If not, see <http://www.gnu.org/licenses/>.
> +#
> +
> +
> +def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
> +    """Benchmark one test-case
> +
> +    test_func   -- benchmarking function with prototype
> +                   test_func(env, case), which takes test_env and test_case
> +                   arguments and returns {'seconds': int} (which is benchmark
> +                   result) on success and {'error': str} on error. Returned
> +                   dict may contain any other additional fields.
> +    test_env    -- test environment - opaque first argument for test_func
> +    test_case   -- test case - opaque second argument for test_func
> +    count       -- how many times to call test_func, to calculate average
> +    initial_run -- do initial run of test_func, which don't get into result
> +
> +    Returns dict with the following fields:
> +        'runs':     list of test_func results
> +        'average':  average seconds per run (exists only if at least one run
> +                    succeeded)
> +        'delta':    maximum delta between test_func result and the average
> +                    (exists only if at least one run succeeded)
> +        'n-failed': number of failed runs (exists only if at least one run
> +                    failed)
> +    """
> +    if initial_run:
> +        print('  #initial run:')
> +        print('   ', test_func(test_env, test_case))
> +
> +    runs = []
> +    for i in range(count):
> +        print('  #run {}'.format(i+1))
> +        res = test_func(test_env, test_case)
> +        print('   ', res)
> +        runs.append(res)
> +
> +    result = {'runs': runs}
> +
> +    successed = [r for r in runs if ('seconds' in r)]
> +    if successed:
> +        avg = sum(r['seconds'] for r in successed) / len(successed)
> +        result['average'] = avg
> +        result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
> +
> +    if len(successed) < count:
> +        result['n-failed'] = count - len(successed)
> +
> +    return result
> +
> +
> +def ascii_one(result):
> +    """Return ASCII representation of bench_one() returned dict."""
> +    if 'average' in result:
> +        s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
> +        if 'n-failed' in result:
> +            s += '\n({} failed)'.format(result['n-failed'])
> +        return s
> +    else:
> +        return 'FAILED'

I think it would be visually clearer if "+-" was printed without any
space between it and the following number, using something
like this:

s = ' {:.2f} +-{:.2f}'.format(result['average'], result['delta'])

The resulting table would look like:

----------  -------------  -------------  -------------
            backup-1       backup-2       mirror
ssd -> ssd   0.43 +-0.00    4.48 +-0.06    4.38 +-0.02
ssd -> hdd   10.60 +-0.08   10.69 +-0.18   10.57 +-0.05
ssd -> nbd   33.81 +-0.37   10.67 +-0.17   10.07 +-0.07
----------  -------------  -------------  -------------

But, this is just cosmetics.

With or without the suggestion above:

Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>

> +
> +
> +def bench(test_func, test_envs, test_cases, *args, **vargs):
> +    """Fill benchmark table
> +
> +    test_func -- benchmarking function, see bench_one for description
> +    test_envs -- list of test environments, see bench_one
> +    test_cases -- list of test cases, see bench_one
> +    args, vargs -- additional arguments for bench_one
> +
> +    Returns dict with the following fields:
> +        'envs':  test_envs
> +        'cases': test_cases
> +        'tab':   filled 2D array, where cell [i][j] is bench_one result for
> +                 test_cases[i] for test_envs[j] (i.e., rows are test cases and
> +                 columns are test environments)
> +    """
> +    tab = {}
> +    results = {
> +        'envs': test_envs,
> +        'cases': test_cases,
> +        'tab': tab
> +    }
> +    n = 1
> +    n_tests = len(test_envs) * len(test_cases)
> +    for env in test_envs:
> +        for case in test_cases:
> +            print('Testing {}/{}: {} :: {}'.format(n, n_tests,
> +                                                   env['id'], case['id']))
> +            if case['id'] not in tab:
> +                tab[case['id']] = {}
> +            tab[case['id']][env['id']] = bench_one(test_func, env, case,
> +                                                   *args, **vargs)
> +            n += 1
> +
> +    print('Done')
> +    return results
> +
> +
> +def ascii(results):
> +    """Return ASCII representation of bench() returned dict."""
> +    from tabulate import tabulate
> +
> +    tab = [[""] + [c['id'] for c in results['envs']]]
> +    for case in results['cases']:
> +        row = [case['id']]
> +        for env in results['envs']:
> +            row.append(ascii_one(results['tab'][case['id']][env['id']]))
> +        tab.append(row)
> +
> +    return tabulate(tab)
> --
> 2.21.0
>
>
Vladimir Sementsov-Ogievskiy Feb. 28, 2020, 1:48 p.m. UTC | #2
28.02.2020 16:03, Aleksandar Markovic wrote:
> On Fri, Feb 28, 2020 at 8:19 AM Vladimir Sementsov-Ogievskiy
> <vsementsov@virtuozzo.com> wrote:
>>
>> Add simple benchmark table creator.
>>
>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>> ---
>>   scripts/simplebench/simplebench.py | 128 +++++++++++++++++++++++++++++
>>   1 file changed, 128 insertions(+)
>>   create mode 100644 scripts/simplebench/simplebench.py
>>
>> diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
>> new file mode 100644
>> index 0000000000..59e7314ff6
>> --- /dev/null
>> +++ b/scripts/simplebench/simplebench.py
>> @@ -0,0 +1,128 @@
>> +#!/usr/bin/env python
>> +#
>> +# Simple benchmarking framework
>> +#
>> +# Copyright (c) 2019 Virtuozzo International GmbH.
>> +#
>> +# This program is free software; you can redistribute it and/or modify
>> +# it under the terms of the GNU General Public License as published by
>> +# the Free Software Foundation; either version 2 of the License, or
>> +# (at your option) any later version.
>> +#
>> +# This program is distributed in the hope that it will be useful,
>> +# but WITHOUT ANY WARRANTY; without even the implied warranty of
>> +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> +# GNU General Public License for more details.
>> +#
>> +# You should have received a copy of the GNU General Public License
>> +# along with this program.  If not, see <http://www.gnu.org/licenses/>.
>> +#
>> +
>> +
>> +def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
>> +    """Benchmark one test-case
>> +
>> +    test_func   -- benchmarking function with prototype
>> +                   test_func(env, case), which takes test_env and test_case
>> +                   arguments and returns {'seconds': int} (which is benchmark
>> +                   result) on success and {'error': str} on error. Returned
>> +                   dict may contain any other additional fields.
>> +    test_env    -- test environment - opaque first argument for test_func
>> +    test_case   -- test case - opaque second argument for test_func
>> +    count       -- how many times to call test_func, to calculate average
>> +    initial_run -- do initial run of test_func, which don't get into result
>> +
>> +    Returns dict with the following fields:
>> +        'runs':     list of test_func results
>> +        'average':  average seconds per run (exists only if at least one run
>> +                    succeeded)
>> +        'delta':    maximum delta between test_func result and the average
>> +                    (exists only if at least one run succeeded)
>> +        'n-failed': number of failed runs (exists only if at least one run
>> +                    failed)
>> +    """
>> +    if initial_run:
>> +        print('  #initial run:')
>> +        print('   ', test_func(test_env, test_case))
>> +
>> +    runs = []
>> +    for i in range(count):
>> +        print('  #run {}'.format(i+1))
>> +        res = test_func(test_env, test_case)
>> +        print('   ', res)
>> +        runs.append(res)
>> +
>> +    result = {'runs': runs}
>> +
>> +    successed = [r for r in runs if ('seconds' in r)]
>> +    if successed:
>> +        avg = sum(r['seconds'] for r in successed) / len(successed)
>> +        result['average'] = avg
>> +        result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
>> +
>> +    if len(successed) < count:
>> +        result['n-failed'] = count - len(successed)
>> +
>> +    return result
>> +
>> +
>> +def ascii_one(result):
>> +    """Return ASCII representation of bench_one() returned dict."""
>> +    if 'average' in result:
>> +        s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
>> +        if 'n-failed' in result:
>> +            s += '\n({} failed)'.format(result['n-failed'])
>> +        return s
>> +    else:
>> +        return 'FAILED'
> 
> I think it would be visually clearer if "+-" was printed without any
> space between it and the following number, using something
> like this:
> 
> s = ' {:.2f} +-{:.2f}'.format(result['average'], result['delta'])
> 
> The resulting table would look like:
> 
> ----------  -------------  -------------  -------------
>              backup-1       backup-2       mirror
> ssd -> ssd   0.43 +-0.00    4.48 +-0.06    4.38 +-0.02
> ssd -> hdd   10.60 +-0.08   10.69 +-0.18   10.57 +-0.05
> ssd -> nbd   33.81 +-0.37   10.67 +-0.17   10.07 +-0.07
> ----------  -------------  -------------  -------------
> 
> But, this is just cosmetics.
> 
> With or without the suggestion above:
> 
> Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>

Thanks for reviewing!

Agree with this change, but I don't think it worth to resend the series for this one space)
Hope it may be applied with pull request..

> 
>> +
>> +
>> +def bench(test_func, test_envs, test_cases, *args, **vargs):
>> +    """Fill benchmark table
>> +
>> +    test_func -- benchmarking function, see bench_one for description
>> +    test_envs -- list of test environments, see bench_one
>> +    test_cases -- list of test cases, see bench_one
>> +    args, vargs -- additional arguments for bench_one
>> +
>> +    Returns dict with the following fields:
>> +        'envs':  test_envs
>> +        'cases': test_cases
>> +        'tab':   filled 2D array, where cell [i][j] is bench_one result for
>> +                 test_cases[i] for test_envs[j] (i.e., rows are test cases and
>> +                 columns are test environments)
>> +    """
>> +    tab = {}
>> +    results = {
>> +        'envs': test_envs,
>> +        'cases': test_cases,
>> +        'tab': tab
>> +    }
>> +    n = 1
>> +    n_tests = len(test_envs) * len(test_cases)
>> +    for env in test_envs:
>> +        for case in test_cases:
>> +            print('Testing {}/{}: {} :: {}'.format(n, n_tests,
>> +                                                   env['id'], case['id']))
>> +            if case['id'] not in tab:
>> +                tab[case['id']] = {}
>> +            tab[case['id']][env['id']] = bench_one(test_func, env, case,
>> +                                                   *args, **vargs)
>> +            n += 1
>> +
>> +    print('Done')
>> +    return results
>> +
>> +
>> +def ascii(results):
>> +    """Return ASCII representation of bench() returned dict."""
>> +    from tabulate import tabulate
>> +
>> +    tab = [[""] + [c['id'] for c in results['envs']]]
>> +    for case in results['cases']:
>> +        row = [case['id']]
>> +        for env in results['envs']:
>> +            row.append(ascii_one(results['tab'][case['id']][env['id']]))
>> +        tab.append(row)
>> +
>> +    return tabulate(tab)
>> --
>> 2.21.0
>>
>>
Aleksandar Markovic March 2, 2020, 9:05 p.m. UTC | #3
> >> +
> >> +
> >> +def ascii_one(result):
> >> +    """Return ASCII representation of bench_one() returned dict."""
> >> +    if 'average' in result:
> >> +        s = '{:.2f} +- {:.2f}'.format(result['average'],
result['delta'])
> >> +        if 'n-failed' in result:
> >> +            s += '\n({} failed)'.format(result['n-failed'])
> >> +        return s
> >> +    else:
> >> +        return 'FAILED'
> >
> > I think it would be visually clearer if "+-" was printed without any
> > space between it and the following number, using something
> > like this:
> >
> > s = ' {:.2f} +-{:.2f}'.format(result['average'], result['delta'])
> >
> > The resulting table would look like:
> >
> > ----------  -------------  -------------  -------------
> >              backup-1       backup-2       mirror
> > ssd -> ssd   0.43 +-0.00    4.48 +-0.06    4.38 +-0.02
> > ssd -> hdd   10.60 +-0.08   10.69 +-0.18   10.57 +-0.05
> > ssd -> nbd   33.81 +-0.37   10.67 +-0.17   10.07 +-0.07
> > ----------  -------------  -------------  -------------
> >
> > But, this is just cosmetics.
> >
> > With or without the suggestion above:
> >
> > Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
>
> Thanks for reviewing!
>
> Agree with this change, but I don't think it worth to resend the series
for this one space)
> Hope it may be applied with pull request..
>

I am an occasional Python programmer, and I felt comfortable
reviewing your series, but I am not a maintainer of this directory,
and I believe Eduardo or Cleber or other more active Python
contributors would be better choice for selecting this series in
their pull request.

So, I can't send this series to Peter - Cleber, Eduardo, please
see to it.

Yours,
Aleksandar

> >
> >> +
> >> +
> >> +def bench(test_func, test_envs, test_cases, *args, **vargs):
> >> +    """Fill benchmark table
> >> +
> >> +    test_func -- benchmarking function, see bench_one for description
> >> +    test_envs -- list of test environments, see bench_one
> >> +    test_cases -- list of test cases, see bench_one
> >> +    args, vargs -- additional arguments for bench_one
> >> +
> >> +    Returns dict with the following fields:
> >> +        'envs':  test_envs
> >> +        'cases': test_cases
> >> +        'tab':   filled 2D array, where cell [i][j] is bench_one
result for
> >> +                 test_cases[i] for test_envs[j] (i.e., rows are test
cases and
> >> +                 columns are test environments)
> >> +    """
> >> +    tab = {}
> >> +    results = {
> >> +        'envs': test_envs,
> >> +        'cases': test_cases,
> >> +        'tab': tab
> >> +    }
> >> +    n = 1
> >> +    n_tests = len(test_envs) * len(test_cases)
> >> +    for env in test_envs:
> >> +        for case in test_cases:
> >> +            print('Testing {}/{}: {} :: {}'.format(n, n_tests,
> >> +                                                   env['id'],
case['id']))
> >> +            if case['id'] not in tab:
> >> +                tab[case['id']] = {}
> >> +            tab[case['id']][env['id']] = bench_one(test_func, env,
case,
> >> +                                                   *args, **vargs)
> >> +            n += 1
> >> +
> >> +    print('Done')
> >> +    return results
> >> +
> >> +
> >> +def ascii(results):
> >> +    """Return ASCII representation of bench() returned dict."""
> >> +    from tabulate import tabulate
> >> +
> >> +    tab = [[""] + [c['id'] for c in results['envs']]]
> >> +    for case in results['cases']:
> >> +        row = [case['id']]
> >> +        for env in results['envs']:
> >> +
 row.append(ascii_one(results['tab'][case['id']][env['id']]))
> >> +        tab.append(row)
> >> +
> >> +    return tabulate(tab)
> >> --
> >> 2.21.0
> >>
> >>
>
>
> --
> Best regards,
> Vladimir
Aleksandar Markovic March 17, 2020, 2:40 p.m. UTC | #4
On Mon, Mar 2, 2020 at 10:05 PM Aleksandar Markovic
<aleksandar.m.mail@gmail.com> wrote:
>
>
>
>
> > >> +
> > >> +
> > >> +def ascii_one(result):
> > >> +    """Return ASCII representation of bench_one() returned dict."""
> > >> +    if 'average' in result:
> > >> +        s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
> > >> +        if 'n-failed' in result:
> > >> +            s += '\n({} failed)'.format(result['n-failed'])
> > >> +        return s
> > >> +    else:
> > >> +        return 'FAILED'
> > >
> > > I think it would be visually clearer if "+-" was printed without any
> > > space between it and the following number, using something
> > > like this:
> > >
> > > s = ' {:.2f} +-{:.2f}'.format(result['average'], result['delta'])
> > >
> > > The resulting table would look like:
> > >
> > > ----------  -------------  -------------  -------------
> > >              backup-1       backup-2       mirror
> > > ssd -> ssd   0.43 +-0.00    4.48 +-0.06    4.38 +-0.02
> > > ssd -> hdd   10.60 +-0.08   10.69 +-0.18   10.57 +-0.05
> > > ssd -> nbd   33.81 +-0.37   10.67 +-0.17   10.07 +-0.07
> > > ----------  -------------  -------------  -------------
> > >
> > > But, this is just cosmetics.
> > >
> > > With or without the suggestion above:
> > >
> > > Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
> >
> > Thanks for reviewing!
> >
> > Agree with this change, but I don't think it worth to resend the series for this one space)
> > Hope it may be applied with pull request..
> >
>
> I am an occasional Python programmer, and I felt comfortable
> reviewing your series, but I am not a maintainer of this directory,
> and I believe Eduardo or Cleber or other more active Python
> contributors would be better choice for selecting this series in
> their pull request.
>
> So, I can't send this series to Peter - Cleber, Eduardo, please
> see to it.
>

Eduardo, can you perhaps consider this series for selecting
into your pull request?

Thanks,
Aleksandar

> Yours,
> Aleksandar
>
> > >
> > >> +
> > >> +
> > >> +def bench(test_func, test_envs, test_cases, *args, **vargs):
> > >> +    """Fill benchmark table
> > >> +
> > >> +    test_func -- benchmarking function, see bench_one for description
> > >> +    test_envs -- list of test environments, see bench_one
> > >> +    test_cases -- list of test cases, see bench_one
> > >> +    args, vargs -- additional arguments for bench_one
> > >> +
> > >> +    Returns dict with the following fields:
> > >> +        'envs':  test_envs
> > >> +        'cases': test_cases
> > >> +        'tab':   filled 2D array, where cell [i][j] is bench_one result for
> > >> +                 test_cases[i] for test_envs[j] (i.e., rows are test cases and
> > >> +                 columns are test environments)
> > >> +    """
> > >> +    tab = {}
> > >> +    results = {
> > >> +        'envs': test_envs,
> > >> +        'cases': test_cases,
> > >> +        'tab': tab
> > >> +    }
> > >> +    n = 1
> > >> +    n_tests = len(test_envs) * len(test_cases)
> > >> +    for env in test_envs:
> > >> +        for case in test_cases:
> > >> +            print('Testing {}/{}: {} :: {}'.format(n, n_tests,
> > >> +                                                   env['id'], case['id']))
> > >> +            if case['id'] not in tab:
> > >> +                tab[case['id']] = {}
> > >> +            tab[case['id']][env['id']] = bench_one(test_func, env, case,
> > >> +                                                   *args, **vargs)
> > >> +            n += 1
> > >> +
> > >> +    print('Done')
> > >> +    return results
> > >> +
> > >> +
> > >> +def ascii(results):
> > >> +    """Return ASCII representation of bench() returned dict."""
> > >> +    from tabulate import tabulate
> > >> +
> > >> +    tab = [[""] + [c['id'] for c in results['envs']]]
> > >> +    for case in results['cases']:
> > >> +        row = [case['id']]
> > >> +        for env in results['envs']:
> > >> +            row.append(ascii_one(results['tab'][case['id']][env['id']]))
> > >> +        tab.append(row)
> > >> +
> > >> +    return tabulate(tab)
> > >> --
> > >> 2.21.0
> > >>
> > >>
> >
> >
> > --
> > Best regards,
> > Vladimir
Vladimir Sementsov-Ogievskiy March 17, 2020, 3:01 p.m. UTC | #5
17.03.2020 17:40, Aleksandar Markovic wrote:
> On Mon, Mar 2, 2020 at 10:05 PM Aleksandar Markovic
> <aleksandar.m.mail@gmail.com> wrote:
>>
>>
>>
>>
>>>>> +
>>>>> +
>>>>> +def ascii_one(result):
>>>>> +    """Return ASCII representation of bench_one() returned dict."""
>>>>> +    if 'average' in result:
>>>>> +        s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
>>>>> +        if 'n-failed' in result:
>>>>> +            s += '\n({} failed)'.format(result['n-failed'])
>>>>> +        return s
>>>>> +    else:
>>>>> +        return 'FAILED'
>>>>
>>>> I think it would be visually clearer if "+-" was printed without any
>>>> space between it and the following number, using something
>>>> like this:
>>>>
>>>> s = ' {:.2f} +-{:.2f}'.format(result['average'], result['delta'])
>>>>
>>>> The resulting table would look like:
>>>>
>>>> ----------  -------------  -------------  -------------
>>>>               backup-1       backup-2       mirror
>>>> ssd -> ssd   0.43 +-0.00    4.48 +-0.06    4.38 +-0.02
>>>> ssd -> hdd   10.60 +-0.08   10.69 +-0.18   10.57 +-0.05
>>>> ssd -> nbd   33.81 +-0.37   10.67 +-0.17   10.07 +-0.07
>>>> ----------  -------------  -------------  -------------
>>>>
>>>> But, this is just cosmetics.
>>>>
>>>> With or without the suggestion above:
>>>>
>>>> Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
>>>
>>> Thanks for reviewing!
>>>
>>> Agree with this change, but I don't think it worth to resend the series for this one space)
>>> Hope it may be applied with pull request..
>>>
>>
>> I am an occasional Python programmer, and I felt comfortable
>> reviewing your series, but I am not a maintainer of this directory,
>> and I believe Eduardo or Cleber or other more active Python
>> contributors would be better choice for selecting this series in
>> their pull request.
>>
>> So, I can't send this series to Peter - Cleber, Eduardo, please
>> see to it.
>>
> 
> Eduardo, can you perhaps consider this series for selecting
> into your pull request?

I saw, he has taken the patches into python-next in:
https://github.com/ehabkost/qemu/commits/python-next

Hope, pull request will come)


> 
> Thanks,
> Aleksandar
> 
>> Yours,
>> Aleksandar
>>
>>>>
>>>>> +
>>>>> +
>>>>> +def bench(test_func, test_envs, test_cases, *args, **vargs):
>>>>> +    """Fill benchmark table
>>>>> +
>>>>> +    test_func -- benchmarking function, see bench_one for description
>>>>> +    test_envs -- list of test environments, see bench_one
>>>>> +    test_cases -- list of test cases, see bench_one
>>>>> +    args, vargs -- additional arguments for bench_one
>>>>> +
>>>>> +    Returns dict with the following fields:
>>>>> +        'envs':  test_envs
>>>>> +        'cases': test_cases
>>>>> +        'tab':   filled 2D array, where cell [i][j] is bench_one result for
>>>>> +                 test_cases[i] for test_envs[j] (i.e., rows are test cases and
>>>>> +                 columns are test environments)
>>>>> +    """
>>>>> +    tab = {}
>>>>> +    results = {
>>>>> +        'envs': test_envs,
>>>>> +        'cases': test_cases,
>>>>> +        'tab': tab
>>>>> +    }
>>>>> +    n = 1
>>>>> +    n_tests = len(test_envs) * len(test_cases)
>>>>> +    for env in test_envs:
>>>>> +        for case in test_cases:
>>>>> +            print('Testing {}/{}: {} :: {}'.format(n, n_tests,
>>>>> +                                                   env['id'], case['id']))
>>>>> +            if case['id'] not in tab:
>>>>> +                tab[case['id']] = {}
>>>>> +            tab[case['id']][env['id']] = bench_one(test_func, env, case,
>>>>> +                                                   *args, **vargs)
>>>>> +            n += 1
>>>>> +
>>>>> +    print('Done')
>>>>> +    return results
>>>>> +
>>>>> +
>>>>> +def ascii(results):
>>>>> +    """Return ASCII representation of bench() returned dict."""
>>>>> +    from tabulate import tabulate
>>>>> +
>>>>> +    tab = [[""] + [c['id'] for c in results['envs']]]
>>>>> +    for case in results['cases']:
>>>>> +        row = [case['id']]
>>>>> +        for env in results['envs']:
>>>>> +            row.append(ascii_one(results['tab'][case['id']][env['id']]))
>>>>> +        tab.append(row)
>>>>> +
>>>>> +    return tabulate(tab)
>>>>> --
>>>>> 2.21.0
>>>>>
>>>>>
>>>
>>>
>>> --
>>> Best regards,
>>> Vladimir
diff mbox series

Patch

diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
new file mode 100644
index 0000000000..59e7314ff6
--- /dev/null
+++ b/scripts/simplebench/simplebench.py
@@ -0,0 +1,128 @@ 
+#!/usr/bin/env python
+#
+# Simple benchmarking framework
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
+    """Benchmark one test-case
+
+    test_func   -- benchmarking function with prototype
+                   test_func(env, case), which takes test_env and test_case
+                   arguments and returns {'seconds': int} (which is benchmark
+                   result) on success and {'error': str} on error. Returned
+                   dict may contain any other additional fields.
+    test_env    -- test environment - opaque first argument for test_func
+    test_case   -- test case - opaque second argument for test_func
+    count       -- how many times to call test_func, to calculate average
+    initial_run -- do initial run of test_func, which don't get into result
+
+    Returns dict with the following fields:
+        'runs':     list of test_func results
+        'average':  average seconds per run (exists only if at least one run
+                    succeeded)
+        'delta':    maximum delta between test_func result and the average
+                    (exists only if at least one run succeeded)
+        'n-failed': number of failed runs (exists only if at least one run
+                    failed)
+    """
+    if initial_run:
+        print('  #initial run:')
+        print('   ', test_func(test_env, test_case))
+
+    runs = []
+    for i in range(count):
+        print('  #run {}'.format(i+1))
+        res = test_func(test_env, test_case)
+        print('   ', res)
+        runs.append(res)
+
+    result = {'runs': runs}
+
+    successed = [r for r in runs if ('seconds' in r)]
+    if successed:
+        avg = sum(r['seconds'] for r in successed) / len(successed)
+        result['average'] = avg
+        result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
+
+    if len(successed) < count:
+        result['n-failed'] = count - len(successed)
+
+    return result
+
+
+def ascii_one(result):
+    """Return ASCII representation of bench_one() returned dict."""
+    if 'average' in result:
+        s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
+        if 'n-failed' in result:
+            s += '\n({} failed)'.format(result['n-failed'])
+        return s
+    else:
+        return 'FAILED'
+
+
+def bench(test_func, test_envs, test_cases, *args, **vargs):
+    """Fill benchmark table
+
+    test_func -- benchmarking function, see bench_one for description
+    test_envs -- list of test environments, see bench_one
+    test_cases -- list of test cases, see bench_one
+    args, vargs -- additional arguments for bench_one
+
+    Returns dict with the following fields:
+        'envs':  test_envs
+        'cases': test_cases
+        'tab':   filled 2D array, where cell [i][j] is bench_one result for
+                 test_cases[i] for test_envs[j] (i.e., rows are test cases and
+                 columns are test environments)
+    """
+    tab = {}
+    results = {
+        'envs': test_envs,
+        'cases': test_cases,
+        'tab': tab
+    }
+    n = 1
+    n_tests = len(test_envs) * len(test_cases)
+    for env in test_envs:
+        for case in test_cases:
+            print('Testing {}/{}: {} :: {}'.format(n, n_tests,
+                                                   env['id'], case['id']))
+            if case['id'] not in tab:
+                tab[case['id']] = {}
+            tab[case['id']][env['id']] = bench_one(test_func, env, case,
+                                                   *args, **vargs)
+            n += 1
+
+    print('Done')
+    return results
+
+
+def ascii(results):
+    """Return ASCII representation of bench() returned dict."""
+    from tabulate import tabulate
+
+    tab = [[""] + [c['id'] for c in results['envs']]]
+    for case in results['cases']:
+        row = [case['id']]
+        for env in results['envs']:
+            row.append(ascii_one(results['tab'][case['id']][env['id']]))
+        tab.append(row)
+
+    return tabulate(tab)