aboutsummaryrefslogtreecommitdiffstats
path: root/docs/testing/user/userguide/opnfv_yardstick_tc057.rst
blob: 2a4ce40c0d06f4ccd13f4d8139840ec2e7e1ef9c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
.. This work is licensed under a Creative Commons Attribution 4.0 International
.. License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Yin Kanglin and others.
.. 14_ykl@tongji.edu.cn

*************************************
Yardstick Test Case Description TC057
*************************************

+-----------------------------------------------------------------------------+
|OpenStack Controller Cluster Management Service High Availability            |
+==============+==============================================================+
|test case id  |                                                              |
+--------------+--------------------------------------------------------------+
|test purpose  | This test case will verify the quorum configuration of the   |
|              | cluster manager(pacemaker) on controller nodes. When a       |
|              | controller node , which holds all active application         |
|              | resources, failed to communicate with other cluster nodes    |
|              | (via corosync), the test case will check whether the standby |
|              | application resources will take place of those active        |
|              | application resources which should be regarded to be down in |
|              | the cluster manager.                                         |
+--------------+--------------------------------------------------------------+
|test method   | This test case kills the processes of cluster messaging      |
|              | service(corosync) on a selected controller node(the node     |
|              | holds the active application resources), then checks whether |
|              | active application resources are switched to other           |
|              | controller nodes and whether the Openstack commands are OK.  |
+--------------+--------------------------------------------------------------+
|attackers     | In this test case, an attacker called "kill-process" is      |
|              | needed. This attacker includes three parameters:             |
|              | 1) fault_type: which is used for finding the attacker's      |
|              | scripts. It should be always set to "kill-process" in this   |
|              | test case.                                                   |
|              | 2) process_name: which is the process name of the load       |
|              | balance service. If there are multiple processes use the     |
|              | same name on the host, all of them are killed by this        |
|              | attacker.                                                    |
|              | 3) host: which is the name of a control node being attacked. |
|              |                                                              |
|              | In this case, this process name should set to "corosync" ,   |
|              | for example                                                  |
|              | -fault_type: "kill-process"                                  |
|              | -process_name: "corosync"                                    |
|              | -host: node1                                                 |
+--------------+--------------------------------------------------------------+
|monitors      | In this test case, a kind of monitor is needed:              |
|              | 1. the "openstack-cmd" monitor constantly request a specific |
|              |    Openstack command, which needs two parameters:            |
|              | 1) monitor_type: which is used for finding the monitor class |
|              | and related scripts. It should be always set to              |
|              | "openstack-cmd" for this monitor.                            |
|              | 2) command_name: which is the command name used for request  |
|              |                                                              |
|              | In this case, the command_name of monitor1 should be services|
|              | that are managed by the cluster manager. (Since rabbitmq and |
|              | haproxy are managed by pacemaker, most Openstack Services    |
|              | can be used to check high availability in this case)         |
|              |                                                              |
|              | (e.g.)                                                       |
|              | monitor1:                                                    |
|              | -monitor_type: "openstack-cmd"                               |
|              | -command_name: "nova image-list"                             |
|              | monitor2:                                                    |
|              | -monitor_type: "openstack-cmd"                               |
|              | -command_name: "neutron router-list"                         |
|              | monitor3:                                                    |
|              | -monitor_type: "openstack-cmd"                               |
|              | -command_name: "heat stack-list"                             |
|              | monitor4:                                                    |
|              | -monitor_type: "openstack-cmd"                               |
|              | -command_name: "cinder list"                                 |
|              |                                                              |
+--------------+--------------------------------------------------------------+
|checkers      | In this test case, a checker is needed, the checker will     |
|              | the status of application resources in pacemaker and the     |
|              | checker have three parameters:                               |
|              | 1) checker_type: which is used for finding the result        |
|              | checker class and related scripts. In this case the checker  |
|              | type will be "pacemaker-check-resource"                      |
|              | 2) resource_name: the application resource name              |
|              | 3) resource_status: the expected status of the resource      |
|              | 4) expectedValue: the expected value for the output of the   |
|              | checker script, in the case the expected value will be the   |
|              | identifier in the cluster manager                            |
|              | 3) condition: whether the expected value is in the output of |
|              | checker script or is totally same with the output.           |
|              | (note: pcs is required to installed on controller node in    |
|              | order to run this checker)                                   |
|              |                                                              |
|              | (e.g.)                                                       |
|              | checker1:                                                    |
|              | -checker_type: "pacemaker-check-resource"                    |
|              | -resource_name: "p_rabbitmq-server"                          |
|              | -resource_status: "Stopped"                                  |
|              | -expectedValue: "node-1"                                     |
|              | -condition: "in"                                             |
|              | checker2:                                                    |
|              | -checker_type: "pacemaker-check-resource"                    |
|              | -resource_name: "p_rabbitmq-server"                          |
|              | -resource_status: "Master"                                   |
|              | -expectedValue: "node-2"                                     |
|              | -condition: "in"                                             |
+--------------+--------------------------------------------------------------+
|metrics       | In this test case, there are two metrics:                    |
|              | 1)service_outage_time: which indicates the maximum outage    |
|              | time (seconds) of the specified Openstack command request.   |
+--------------+--------------------------------------------------------------+
|test tool     | None. Self-developed.                                        |
+--------------+--------------------------------------------------------------+
|references    | ETSI NFV REL001                                              |
+--------------+--------------------------------------------------------------+
|configuration | This test case needs two configuration files:                |
|              | 1) test case file: opnfv_yardstick_tc057.yaml                |
|              | -Attackers: see above "attackers" description                |
|              | -Monitors: see above "monitors" description                  |
|              | -Checkers: see above "checkers" description                  |
|              | -Steps: the test case execution step, see "test sequence"    |
|              | description below                                            |
|              |                                                              |
|              | 2)POD file: pod.yaml                                         |
|              | The POD configuration should record on pod.yaml first.       |
|              | the "host" item in this test case will use the node name in  |
|              | the pod.yaml.                                                |
+--------------+------+----------------------------------+--------------------+
|test sequence | description and expected result                              |
|              |                                                              |
+--------------+--------------------------------------------------------------+
|step 1        | start monitors:                                              |
|              | each monitor will run with independently process             |
|              |                                                              |
|              | Result: The monitor info will be collected.                  |
|              |                                                              |
+--------------+--------------------------------------------------------------+
|step 2        | do attacker: connect the host through SSH, and then execute  |
|              | the kill process script with param value specified by        |
|              | "process_name"                                               |
|              |                                                              |
|              | Result: Process will be killed.                              |
|              |                                                              |
+--------------+--------------------------------------------------------------+
|step 3        | do checker: check whether the status of application          |
|              | resources on different nodes are updated                     |
|              |                                                              |
+--------------+--------------------------------------------------------------+
|step 4        | stop monitors after a period of time specified by            |
|              | "waiting_time"                                               |
|              |                                                              |
|              | Result: The monitor info will be aggregated.                 |
|              |                                                              |
+--------------+--------------------------------------------------------------+
|step 5        | verify the SLA                                               |
|              |                                                              |
|              | Result: The test case is passed or not.                      |
|              |                                                              |
+--------------+------+----------------------------------+--------------------+
|post-action   | It is the action when the test cases exist. It will check the|
|              | status of the cluster messaging process(corosync) on the     |
|              | host, and restart the process if it is not running for next  |
|              | test cases                                                   |
+--------------+------+----------------------------------+--------------------+
|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
|              | execution problem.                                           |
+--------------+--------------------------------------------------------------+
ass="p">(struct super_block *, u32, struct hfs_find_data *); extern int hfs_cat_create(u32, struct inode *, struct qstr *, struct inode *); extern int hfs_cat_delete(u32, struct inode *, struct qstr *); extern int hfs_cat_move(u32, struct inode *, struct qstr *, struct inode *, struct qstr *); extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, struct qstr *); /* dir.c */ extern const struct file_operations hfs_dir_operations; extern const struct inode_operations hfs_dir_inode_operations; /* extent.c */ extern int hfs_ext_keycmp(const btree_key *, const btree_key *); extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int); extern int hfs_ext_write_extent(struct inode *); extern int hfs_extend_file(struct inode *); extern void hfs_file_truncate(struct inode *); extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int); /* inode.c */ extern const struct address_space_operations hfs_aops; extern const struct address_space_operations hfs_btree_aops; extern struct inode *hfs_new_inode(struct inode *, struct qstr *, umode_t); extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *); extern int hfs_write_inode(struct inode *, struct writeback_control *); extern int hfs_inode_setattr(struct dentry *, struct iattr *); extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, __be32 log_size, __be32 phys_size, u32 clump_size); extern struct inode *hfs_iget(struct super_block *, struct hfs_cat_key *, hfs_cat_rec *); extern void hfs_evict_inode(struct inode *); extern void hfs_delete_inode(struct inode *); /* attr.c */ extern int hfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); extern ssize_t hfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size); extern ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size); /* mdb.c */ extern int hfs_mdb_get(struct super_block *); extern void hfs_mdb_commit(struct super_block *); extern void hfs_mdb_close(struct super_block *); extern void hfs_mdb_put(struct super_block *); /* part_tbl.c */ extern int hfs_part_find(struct super_block *, sector_t *, sector_t *); /* string.c */ extern const struct dentry_operations hfs_dentry_operations; extern int hfs_hash_dentry(const struct dentry *, struct qstr *); extern int hfs_strcmp(const unsigned char *, unsigned int, const unsigned char *, unsigned int); extern int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); /* trans.c */ extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *); extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *); /* super.c */ extern void hfs_mark_mdb_dirty(struct super_block *sb); /* * There are two time systems. Both are based on seconds since * a particular time/date. * Unix: unsigned lil-endian since 00:00 GMT, Jan. 1, 1970 * mac: unsigned big-endian since 00:00 GMT, Jan. 1, 1904 * */ #define __hfs_u_to_mtime(sec) cpu_to_be32(sec + 2082844800U - sys_tz.tz_minuteswest * 60) #define __hfs_m_to_utime(sec) (be32_to_cpu(sec) - 2082844800U + sys_tz.tz_minuteswest * 60) #define HFS_I(inode) (list_entry(inode, struct hfs_inode_info, vfs_inode)) #define HFS_SB(sb) ((struct hfs_sb_info *)(sb)->s_fs_info) #define hfs_m_to_utime(time) (struct timespec){ .tv_sec = __hfs_m_to_utime(time) } #define hfs_u_to_mtime(time) __hfs_u_to_mtime((time).tv_sec) #define hfs_mtime() __hfs_u_to_mtime(get_seconds()) static inline const char *hfs_mdb_name(struct super_block *sb) { return sb->s_id; } static inline void hfs_bitmap_dirty(struct super_block *sb) { set_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags); hfs_mark_mdb_dirty(sb); } #define sb_bread512(sb, sec, data) ({ \ struct buffer_head *__bh; \ sector_t __block; \ loff_t __start; \ int __offset; \ \ __start = (loff_t)(sec) << HFS_SECTOR_SIZE_BITS;\ __block = __start >> (sb)->s_blocksize_bits; \ __offset = __start & ((sb)->s_blocksize - 1); \ __bh = sb_bread((sb), __block); \ if (likely(__bh != NULL)) \ data = (void *)(__bh->b_data + __offset);\ else \ data = NULL; \ __bh; \ }) #endif