All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [Fuego] [PATCH 1/3] svsematest: Add a new test of the rt-tests
@ 2018-01-25  1:39 Hoang Van Tuyen
  2018-01-27  0:29 ` Tim.Bird
  0 siblings, 1 reply; 2+ messages in thread
From: Hoang Van Tuyen @ 2018-01-25  1:39 UTC (permalink / raw
  To: Bird, Timothy, fuego@lists.linuxfoundation.org

[-- Attachment #1: Type: text/plain, Size: 6464 bytes --]

The svsematest starts two threads or fork two processes and
measure the latency of SYSV semaphores.

Signed-off-by: Hoang Van Tuyen <tuyen.hoangvan@toshiba-tsdv.com>
---
  .../tests/Benchmark.svsematest/chart_config.json   |  5 +++++
  engine/tests/Benchmark.svsematest/criteria.json    | 26 
++++++++++++++++++++++
  engine/tests/Benchmark.svsematest/fuego_test.sh    | 25 
+++++++++++++++++++++
  engine/tests/Benchmark.svsematest/parser.py        | 23 
+++++++++++++++++++
  engine/tests/Benchmark.svsematest/reference.json   | 26 
++++++++++++++++++++++
  engine/tests/Benchmark.svsematest/spec.json        | 14 ++++++++++++
  6 files changed, 119 insertions(+)
  create mode 100644 engine/tests/Benchmark.svsematest/chart_config.json
  create mode 100644 engine/tests/Benchmark.svsematest/criteria.json
  create mode 100755 engine/tests/Benchmark.svsematest/fuego_test.sh
  create mode 100755 engine/tests/Benchmark.svsematest/parser.py
  create mode 100644 engine/tests/Benchmark.svsematest/reference.json
  create mode 100644 engine/tests/Benchmark.svsematest/spec.json

diff --git a/engine/tests/Benchmark.svsematest/chart_config.json 
b/engine/tests/Benchmark.svsematest/chart_config.json
new file mode 100644
index 0000000..cdaf6a2
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/chart_config.json
@@ -0,0 +1,5 @@
+{
+    "chart_type": "measure_plot",
+    "measures": ["default.latencies.max_latency",
+        "default.latencies.avg_latency"]
+}
diff --git a/engine/tests/Benchmark.svsematest/criteria.json 
b/engine/tests/Benchmark.svsematest/criteria.json
new file mode 100644
index 0000000..a023558
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/criteria.json
@@ -0,0 +1,26 @@
+{
+    "schema_version":"1.0",
+    "criteria":[
+        {
+            "tguid":"default.latencies.max_latency",
+            "reference":{
+                "value":100,
+                "operator":"le"
+            }
+        },
+        {
+            "tguid":"default.latencies.min_latency",
+            "reference":{
+                "value":100,
+                "operator":"le"
+            }
+        },
+        {
+            "tguid":"default.latencies.avg_latency",
+            "reference":{
+                "value":100,
+                "operator":"le"
+            }
+        }
+    ]
+}
diff --git a/engine/tests/Benchmark.svsematest/fuego_test.sh 
b/engine/tests/Benchmark.svsematest/fuego_test.sh
new file mode 100755
index 0000000..9e90535
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/fuego_test.sh
@@ -0,0 +1,25 @@
+tarball=../rt-tests/rt-tests-v1.1.1.tar.gz
+
+NEED_ROOT=1
+
+function test_pre_check {
+    assert_define BENCHMARK_SVSEMATEST_PARAMS
+}
+
+function test_build {
+    patch -p1 -N -s < 
$TEST_HOME/../rt-tests/0001-Add-scheduling-policies-for-old-kernels.patch
+    make NUMA=0 svsematest
+}
+
+function test_deploy {
+    put svsematest  $BOARD_TESTDIR/fuego.$TESTDIR/
+}
+
+function test_run {
+    # svsematest does not support a option for printing a summary only 
on exit.
+    # So, We get some lines at the end of the command's output.
+    # The number for getting the lines depends on the cpu number of 
target machine.
+    target_cpu_number=$(cmd "nproc")
+    getting_line_number=$(( expr $target_cpu_number + $target_cpu_number ))
+    report "cd $BOARD_TESTDIR/fuego.$TESTDIR; ./svsematest 
$BENCHMARK_SVSEMATEST_PARAMS | tail -$getting_line_number"
+}
diff --git a/engine/tests/Benchmark.svsematest/parser.py 
b/engine/tests/Benchmark.svsematest/parser.py
new file mode 100755
index 0000000..edc77ff
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/parser.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+import os, re, sys
+sys.path.insert(0, os.environ['FUEGO_CORE'] + '/engine/scripts/parser')
+import common as plib
+
+regex_string = ".*, Min\s+(\d+).*, Avg\s+(\d+), Max\s+(\d+)"
+measurements = {}
+matches = plib.parse_log(regex_string)
+
+if matches:
+    min_latencies = []
+    avg_latencies = []
+    max_latencies = []
+    for thread in matches:
+        min_latencies.append(float(thread[0]))
+        avg_latencies.append(float(thread[1]))
+        max_latencies.append(float(thread[2]))
+    measurements['default.latencies'] = [
+        {"name": "max_latency", "measure" : max(max_latencies)},
+        {"name": "min_latency", "measure" : min(min_latencies)},
+        {"name": "avg_latency", "measure" : 
sum(avg_latencies)/len(avg_latencies)}]
+
+sys.exit(plib.process(measurements))
diff --git a/engine/tests/Benchmark.svsematest/reference.json 
b/engine/tests/Benchmark.svsematest/reference.json
new file mode 100644
index 0000000..415a8dd
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/reference.json
@@ -0,0 +1,26 @@
+{
+    "test_sets":[
+        {
+            "name":"default",
+            "test_cases":[
+                {
+                    "name":"latencies",
+                    "measurements":[
+                        {
+                            "name":"max_latency",
+                            "unit":"us"
+                        },
+                        {
+                            "name":"min_latency",
+                            "unit":"us"
+                        },
+                        {
+                            "name":"avg_latency",
+                            "unit":"us"
+                        }
+                    ]
+                }
+            ]
+        }
+    ]
+}
diff --git a/engine/tests/Benchmark.svsematest/spec.json 
b/engine/tests/Benchmark.svsematest/spec.json
new file mode 100644
index 0000000..1a9a767
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/spec.json
@@ -0,0 +1,14 @@
+{
+    "testName": "Benchmark.svsematest",
+    "specs": {
+        "default": {
+            "PARAMS": "-a -t -p99 -i100 -d25 -l10000"
+        },
+        "latest": {
+            "PER_JOB_BUILD": "true",
+            "gitrepo": 
"https://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git",
+            "gitref": "unstable/devel/v1.1.1",
+            "PARAMS": "-a -t -p99 -i100 -d25 -l10000"
+        }
+    }
+}
-- 
2.1.4



-- 
================================================================
Hoang Van Tuyen (Mr.)
TOSHIBA SOFTWARE DEVELOPMENT (VIETNAM) CO., LTD.
16th Floor, VIT Building, 519 Kim Ma Str., Ba Dinh Dist., Hanoi, Vietnam
Tel: 84-4-22208801 (Company) - Ext.251
Fax: 84-4-22208802 (Company)
Email: tuyen.hoangvan@toshiba-tsdv.com
================================================================


[-- Attachment #2: 0001-svsematest-Add-a-new-test-of-the-rt-tests.patch --]
[-- Type: text/plain, Size: 6220 bytes --]

From a6cc8b4f1bd6bbafec7944735cc18674428acecb Mon Sep 17 00:00:00 2001
From: Hoang Van Tuyen <tuyen.hoangvan@toshiba-tsdv.com>
Date: Tue, 16 Jan 2018 15:48:17 +0700
Subject: [PATCH 1/3] svsematest: Add a new test of the rt-tests

The svsematest starts two threads or fork two processes and
measure the latency of SYSV semaphores.

Signed-off-by: Hoang Van Tuyen <tuyen.hoangvan@toshiba-tsdv.com>
---
 .../tests/Benchmark.svsematest/chart_config.json   |  5 +++++
 engine/tests/Benchmark.svsematest/criteria.json    | 26 ++++++++++++++++++++++
 engine/tests/Benchmark.svsematest/fuego_test.sh    | 25 +++++++++++++++++++++
 engine/tests/Benchmark.svsematest/parser.py        | 23 +++++++++++++++++++
 engine/tests/Benchmark.svsematest/reference.json   | 26 ++++++++++++++++++++++
 engine/tests/Benchmark.svsematest/spec.json        | 14 ++++++++++++
 6 files changed, 119 insertions(+)
 create mode 100644 engine/tests/Benchmark.svsematest/chart_config.json
 create mode 100644 engine/tests/Benchmark.svsematest/criteria.json
 create mode 100755 engine/tests/Benchmark.svsematest/fuego_test.sh
 create mode 100755 engine/tests/Benchmark.svsematest/parser.py
 create mode 100644 engine/tests/Benchmark.svsematest/reference.json
 create mode 100644 engine/tests/Benchmark.svsematest/spec.json

diff --git a/engine/tests/Benchmark.svsematest/chart_config.json b/engine/tests/Benchmark.svsematest/chart_config.json
new file mode 100644
index 0000000..cdaf6a2
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/chart_config.json
@@ -0,0 +1,5 @@
+{
+	"chart_type": "measure_plot",
+	"measures": ["default.latencies.max_latency",
+        "default.latencies.avg_latency"]
+}
diff --git a/engine/tests/Benchmark.svsematest/criteria.json b/engine/tests/Benchmark.svsematest/criteria.json
new file mode 100644
index 0000000..a023558
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/criteria.json
@@ -0,0 +1,26 @@
+{
+    "schema_version":"1.0",
+    "criteria":[
+        {
+            "tguid":"default.latencies.max_latency",
+            "reference":{
+                "value":100,
+                "operator":"le"
+            }
+        },
+        {
+            "tguid":"default.latencies.min_latency",
+            "reference":{
+                "value":100,
+                "operator":"le"
+            }
+        },
+        {
+            "tguid":"default.latencies.avg_latency",
+            "reference":{
+                "value":100,
+                "operator":"le"
+            }
+        }
+    ]
+}
diff --git a/engine/tests/Benchmark.svsematest/fuego_test.sh b/engine/tests/Benchmark.svsematest/fuego_test.sh
new file mode 100755
index 0000000..9e90535
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/fuego_test.sh
@@ -0,0 +1,25 @@
+tarball=../rt-tests/rt-tests-v1.1.1.tar.gz
+
+NEED_ROOT=1
+
+function test_pre_check {
+    assert_define BENCHMARK_SVSEMATEST_PARAMS
+}
+
+function test_build {
+    patch -p1 -N -s < $TEST_HOME/../rt-tests/0001-Add-scheduling-policies-for-old-kernels.patch
+    make NUMA=0 svsematest
+}
+
+function test_deploy {
+    put svsematest  $BOARD_TESTDIR/fuego.$TESTDIR/
+}
+
+function test_run {
+    # svsematest does not support a option for printing a summary only on exit.
+    # So, We get some lines at the end of the command's output.
+    # The number for getting the lines depends on the cpu number of target machine.
+    target_cpu_number=$(cmd "nproc")
+    getting_line_number=$(( expr $target_cpu_number + $target_cpu_number ))
+    report "cd $BOARD_TESTDIR/fuego.$TESTDIR; ./svsematest $BENCHMARK_SVSEMATEST_PARAMS | tail -$getting_line_number"
+}
diff --git a/engine/tests/Benchmark.svsematest/parser.py b/engine/tests/Benchmark.svsematest/parser.py
new file mode 100755
index 0000000..edc77ff
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/parser.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+import os, re, sys
+sys.path.insert(0, os.environ['FUEGO_CORE'] + '/engine/scripts/parser')
+import common as plib
+
+regex_string = ".*, Min\s+(\d+).*, Avg\s+(\d+), Max\s+(\d+)"
+measurements = {}
+matches = plib.parse_log(regex_string)
+
+if matches:
+	min_latencies = []
+	avg_latencies = []
+	max_latencies = []
+	for thread in matches:
+		min_latencies.append(float(thread[0]))
+		avg_latencies.append(float(thread[1]))
+		max_latencies.append(float(thread[2]))
+	measurements['default.latencies'] = [
+		{"name": "max_latency", "measure" : max(max_latencies)},
+		{"name": "min_latency", "measure" : min(min_latencies)},
+		{"name": "avg_latency", "measure" : sum(avg_latencies)/len(avg_latencies)}]
+
+sys.exit(plib.process(measurements))
diff --git a/engine/tests/Benchmark.svsematest/reference.json b/engine/tests/Benchmark.svsematest/reference.json
new file mode 100644
index 0000000..415a8dd
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/reference.json
@@ -0,0 +1,26 @@
+{
+    "test_sets":[
+        {
+            "name":"default",
+            "test_cases":[
+                {
+                    "name":"latencies",
+                    "measurements":[
+                        {
+                            "name":"max_latency",
+                            "unit":"us"
+                        },
+                        {
+                            "name":"min_latency",
+                            "unit":"us"
+                        },
+                        {
+                            "name":"avg_latency",
+                            "unit":"us"
+                        }
+                    ]
+                }
+            ]
+        }
+    ]
+}
diff --git a/engine/tests/Benchmark.svsematest/spec.json b/engine/tests/Benchmark.svsematest/spec.json
new file mode 100644
index 0000000..1a9a767
--- /dev/null
+++ b/engine/tests/Benchmark.svsematest/spec.json
@@ -0,0 +1,14 @@
+{
+    "testName": "Benchmark.svsematest",
+    "specs": {
+        "default": {
+            "PARAMS": "-a -t -p99 -i100 -d25 -l10000"
+        },
+        "latest": {
+            "PER_JOB_BUILD": "true",
+            "gitrepo": "https://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git",
+            "gitref": "unstable/devel/v1.1.1",
+            "PARAMS": "-a -t -p99 -i100 -d25 -l10000"
+        }
+    }
+}
-- 
2.1.4


[-- Attachment #3: Bitdefender.txt --]
[-- Type: text/plain, Size: 102 bytes --]

-- 
This mail was scanned by BitDefender
For more information please visit http://www.bitdefender.com

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [Fuego] [PATCH 1/3] svsematest: Add a new test of the rt-tests
  2018-01-25  1:39 [Fuego] [PATCH 1/3] svsematest: Add a new test of the rt-tests Hoang Van Tuyen
@ 2018-01-27  0:29 ` Tim.Bird
  0 siblings, 0 replies; 2+ messages in thread
From: Tim.Bird @ 2018-01-27  0:29 UTC (permalink / raw
  To: tuyen.hoangvan, fuego

> -----Original Message-----
> From: Hoang Van Tuyen 
> The svsematest starts two threads or fork two processes and
> measure the latency of SYSV semaphores.
> 
> Signed-off-by: Hoang Van Tuyen <tuyen.hoangvan@toshiba-tsdv.com>
> ---
>   .../tests/Benchmark.svsematest/chart_config.json   |  5 +++++
>   engine/tests/Benchmark.svsematest/criteria.json    | 26
> ++++++++++++++++++++++
>   engine/tests/Benchmark.svsematest/fuego_test.sh    | 25
> +++++++++++++++++++++
>   engine/tests/Benchmark.svsematest/parser.py        | 23
> +++++++++++++++++++
>   engine/tests/Benchmark.svsematest/reference.json   | 26
> ++++++++++++++++++++++
>   engine/tests/Benchmark.svsematest/spec.json        | 14 ++++++++++++
>   6 files changed, 119 insertions(+)
>   create mode 100644
> engine/tests/Benchmark.svsematest/chart_config.json
>   create mode 100644 engine/tests/Benchmark.svsematest/criteria.json
>   create mode 100755 engine/tests/Benchmark.svsematest/fuego_test.sh
>   create mode 100755 engine/tests/Benchmark.svsematest/parser.py
>   create mode 100644 engine/tests/Benchmark.svsematest/reference.json
>   create mode 100644 engine/tests/Benchmark.svsematest/spec.json
> 
> diff --git a/engine/tests/Benchmark.svsematest/chart_config.json
> b/engine/tests/Benchmark.svsematest/chart_config.json
> new file mode 100644
> index 0000000..cdaf6a2
> --- /dev/null
> +++ b/engine/tests/Benchmark.svsematest/chart_config.json
> @@ -0,0 +1,5 @@
> +{
> +    "chart_type": "measure_plot",
> +    "measures": ["default.latencies.max_latency",
> +        "default.latencies.avg_latency"]
> +}
> diff --git a/engine/tests/Benchmark.svsematest/criteria.json
> b/engine/tests/Benchmark.svsematest/criteria.json
> new file mode 100644
> index 0000000..a023558
> --- /dev/null
> +++ b/engine/tests/Benchmark.svsematest/criteria.json
> @@ -0,0 +1,26 @@
> +{
> +    "schema_version":"1.0",
> +    "criteria":[
> +        {
> +            "tguid":"default.latencies.max_latency",
> +            "reference":{
> +                "value":100,
> +                "operator":"le"
> +            }
> +        },
> +        {
> +            "tguid":"default.latencies.min_latency",
> +            "reference":{
> +                "value":100,
> +                "operator":"le"
> +            }
> +        },
> +        {
> +            "tguid":"default.latencies.avg_latency",
> +            "reference":{
> +                "value":100,
> +                "operator":"le"
> +            }
> +        }
> +    ]
> +}
> diff --git a/engine/tests/Benchmark.svsematest/fuego_test.sh
> b/engine/tests/Benchmark.svsematest/fuego_test.sh
> new file mode 100755
> index 0000000..9e90535
> --- /dev/null
> +++ b/engine/tests/Benchmark.svsematest/fuego_test.sh
> @@ -0,0 +1,25 @@
> +tarball=../rt-tests/rt-tests-v1.1.1.tar.gz
> +
> +NEED_ROOT=1
> +
> +function test_pre_check {
> +    assert_define BENCHMARK_SVSEMATEST_PARAMS
> +}
> +
> +function test_build {
> +    patch -p1 -N -s <
> $TEST_HOME/../rt-tests/0001-Add-scheduling-policies-for-old-kernels.patch
> +    make NUMA=0 svsematest
> +}
> +
> +function test_deploy {
> +    put svsematest  $BOARD_TESTDIR/fuego.$TESTDIR/
> +}
> +
> +function test_run {
> +    # svsematest does not support a option for printing a summary only
> on exit.
> +    # So, We get some lines at the end of the command's output.
> +    # The number for getting the lines depends on the cpu number of
> target machine.
> +    target_cpu_number=$(cmd "nproc")
> +    getting_line_number=$(( expr $target_cpu_number +
> $target_cpu_number ))

$(( )) is processed by bash to do arithmetic expansion.
There's no need to use 'expr' inside it.

I replaced with with:
getting_line_number=$(( $target_cpu_number +  $target_cpu_number ))

> +    report "cd $BOARD_TESTDIR/fuego.$TESTDIR; ./svsematest
> $BENCHMARK_SVSEMATEST_PARAMS | tail -$getting_line_number"
> +}
> diff --git a/engine/tests/Benchmark.svsematest/parser.py
> b/engine/tests/Benchmark.svsematest/parser.py
> new file mode 100755
> index 0000000..edc77ff
> --- /dev/null
> +++ b/engine/tests/Benchmark.svsematest/parser.py
> @@ -0,0 +1,23 @@
> +#!/usr/bin/python
> +import os, re, sys
> +sys.path.insert(0, os.environ['FUEGO_CORE'] + '/engine/scripts/parser')
> +import common as plib
> +
> +regex_string = ".*, Min\s+(\d+).*, Avg\s+(\d+), Max\s+(\d+)"
> +measurements = {}
> +matches = plib.parse_log(regex_string)
> +
> +if matches:
> +    min_latencies = []
> +    avg_latencies = []
> +    max_latencies = []
> +    for thread in matches:
> +        min_latencies.append(float(thread[0]))
> +        avg_latencies.append(float(thread[1]))
> +        max_latencies.append(float(thread[2]))
> +    measurements['default.latencies'] = [
> +        {"name": "max_latency", "measure" : max(max_latencies)},
> +        {"name": "min_latency", "measure" : min(min_latencies)},
> +        {"name": "avg_latency", "measure" :
> sum(avg_latencies)/len(avg_latencies)}]
> +
> +sys.exit(plib.process(measurements))
> diff --git a/engine/tests/Benchmark.svsematest/reference.json
> b/engine/tests/Benchmark.svsematest/reference.json
> new file mode 100644
> index 0000000..415a8dd
> --- /dev/null
> +++ b/engine/tests/Benchmark.svsematest/reference.json
> @@ -0,0 +1,26 @@
> +{
> +    "test_sets":[
> +        {
> +            "name":"default",
> +            "test_cases":[
> +                {
> +                    "name":"latencies",
> +                    "measurements":[
> +                        {
> +                            "name":"max_latency",
> +                            "unit":"us"
> +                        },
> +                        {
> +                            "name":"min_latency",
> +                            "unit":"us"
> +                        },
> +                        {
> +                            "name":"avg_latency",
> +                            "unit":"us"
> +                        }
> +                    ]
> +                }
> +            ]
> +        }
> +    ]
> +}
> diff --git a/engine/tests/Benchmark.svsematest/spec.json
> b/engine/tests/Benchmark.svsematest/spec.json
> new file mode 100644
> index 0000000..1a9a767
> --- /dev/null
> +++ b/engine/tests/Benchmark.svsematest/spec.json
> @@ -0,0 +1,14 @@
> +{
> +    "testName": "Benchmark.svsematest",
> +    "specs": {
> +        "default": {
> +            "PARAMS": "-a -t -p99 -i100 -d25 -l10000"
> +        },
> +        "latest": {
> +            "PER_JOB_BUILD": "true",
> +            "gitrepo":
> "https://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git",
> +            "gitref": "unstable/devel/v1.1.1",
> +            "PARAMS": "-a -t -p99 -i100 -d25 -l10000"
> +        }
> +    }
> +}
> --
> 2.1.4
Looks good.  Applied.
 -- Tim

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-01-27  0:29 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-01-25  1:39 [Fuego] [PATCH 1/3] svsematest: Add a new test of the rt-tests Hoang Van Tuyen
2018-01-27  0:29 ` Tim.Bird

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.