Browse code

linux-secure: Fix static-call aesni patch and disable RANDSTRUCT

- RANDSTRUCT causes `boot_cpu_data` structure instability when fips_canister
binary is used in other flavors which does not have RANDSTRUCT feature.
Disabling this feature to maintain canister compatibility.

- Update fcw_warn, fcw_warn_on and fcw_warn_on_once calls in canister

Change-Id: I3fdf8e11f97b4419396adf0240d2f0a0792fbe1e
Signed-off-by: Keerthana K <keerthanak@vmware.com>
Reviewed-on: http://photon-jenkins.eng.vmware.com:8082/c/photon/+/20179
Tested-by: gerrit-photon <photon-checkins@vmware.com>

Keerthana K authored on 2023/04/05 17:24:01
Showing 6 changed files
... ...
@@ -161,23 +161,26 @@ void fcw_bug_on(int cond)
161 161
 
162 162
 int fcw_warn_on(int cond)
163 163
 {
164
-	if(unlikely(cond))
164
+	int __ret_warn_on = !!(cond);
165
+	if(unlikely(__ret_warn_on))
165 166
 		__WARN();
166
-	return unlikely(cond);
167
+	return unlikely(__ret_warn_on);
167 168
 }
168 169
 
169 170
 int fcw_warn_on_once(int cond)
170 171
 {
171
-	if(unlikely(cond))
172
+	int __ret_warn_on = !!(cond);
173
+	if(unlikely(__ret_warn_on))
172 174
 		__WARN_FLAGS(BUGFLAG_ONCE | BUGFLAG_TAINT(TAINT_WARN));
173
-	return unlikely(cond);
175
+	return unlikely(__ret_warn_on);
174 176
 }
175 177
 
176 178
 int fcw_warn(int cond, const char *fmt, ...)
177 179
 {
178
-	if(unlikely(cond))
180
+	int __ret_warn_on = !!(cond);
181
+	if(unlikely(__ret_warn_on))
179 182
 		__WARN_printf(TAINT_WARN, fmt);
180
-	return unlikely(cond);
183
+	return unlikely(__ret_warn_on);
181 184
 }
182 185
 
183 186
 void *fcw_memcpy(void *dst, const void *src, size_t len)
184 187
new file mode 100644
... ...
@@ -0,0 +1,155 @@
0
+From 6dca4744eec08e19aaf344399833d9f482924267 Mon Sep 17 00:00:00 2001
1
+From: Keerthana K <keerthanak@vmware.com>
2
+Date: Wed, 29 Mar 2023 10:40:59 +0000
3
+Subject: [PATCH] aesni_intel: Remove static call
4
+
5
+Signed-off-by: Keerthana K <keerthanak@vmware.com>
6
+---
7
+ arch/x86/crypto/aesni-intel_glue.c | 47 ++++++++++++++++++------------
8
+ 1 file changed, 28 insertions(+), 19 deletions(-)
9
+
10
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
11
+index 10420b2aa..0628c2bca 100644
12
+--- a/arch/x86/crypto/aesni-intel_glue.c
13
+@@ -34,7 +34,6 @@
14
+ #include <linux/jump_label.h>
15
+ #include <linux/workqueue.h>
16
+ #include <linux/spinlock.h>
17
+-#include <linux/static_call.h>
18
+ #include <crypto/gf128mul.h>
19
+ 
20
+ void fcw_kernel_fpu_begin(void);
21
+@@ -111,9 +110,11 @@ asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
22
+ 
23
+ #ifdef CONFIG_X86_64
24
+ 
25
++static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
26
++				const u8 *in, unsigned int len, u8 *iv);
27
++
28
+ asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
29
+ 			      const u8 *in, unsigned int len, u8 *iv);
30
+-DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
31
+ 
32
+ /* Scatter / Gather routines, with args similar to above */
33
+ asmlinkage void aesni_gcm_init(void *ctx,
34
+@@ -131,7 +132,6 @@ asmlinkage void aesni_gcm_dec_update(void *ctx,
35
+ asmlinkage void aesni_gcm_finalize(void *ctx,
36
+ 				   struct gcm_context_data *gdata,
37
+ 				   u8 *auth_tag, unsigned long auth_tag_len);
38
+-
39
+ asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
40
+ 		void *keys, u8 *out, unsigned int num_bytes);
41
+ asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
42
+@@ -175,6 +175,15 @@ asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
43
+ 				   struct gcm_context_data *gdata,
44
+ 				   u8 *auth_tag, unsigned long auth_tag_len);
45
+ 
46
++/*
47
++static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
48
++	.init = &aesni_gcm_init_avx_gen2,
49
++	.enc_update = &aesni_gcm_enc_update_avx_gen2,
50
++	.dec_update = &aesni_gcm_dec_update_avx_gen2,
51
++	.finalize = &aesni_gcm_finalize_avx_gen2,
52
++};
53
++*/
54
++
55
+ /*
56
+  * asmlinkage void aesni_gcm_init_avx_gen4()
57
+  * gcm_data *my_ctx_data, context data
58
+@@ -198,8 +207,8 @@ asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
59
+ 				   struct gcm_context_data *gdata,
60
+ 				   u8 *auth_tag, unsigned long auth_tag_len);
61
+ 
62
+-static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
63
+-static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
64
++static __ro_after_init int gcm_use_avx = 0;
65
++static __ro_after_init int gcm_use_avx2 = 0;
66
+ 
67
+ static inline struct
68
+ aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
69
+@@ -524,10 +533,9 @@ static int ctr_crypt(struct skcipher_request *req)
70
+ 	while ((nbytes = walk.nbytes) > 0) {
71
+ 		fcw_kernel_fpu_begin();
72
+ 		if (nbytes & AES_BLOCK_MASK)
73
+-			static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
74
+-						       walk.src.virt.addr,
75
+-						       nbytes & AES_BLOCK_MASK,
76
+-						       walk.iv);
77
++			aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
78
++						nbytes & AES_BLOCK_MASK, walk.iv);
79
++
80
+ 		nbytes &= ~AES_BLOCK_MASK;
81
+ 
82
+ 		if (walk.nbytes == walk.total && nbytes > 0) {
83
+@@ -712,10 +720,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
84
+ 	}
85
+ 
86
+ 	fcw_kernel_fpu_begin();
87
+-	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
88
++	if (likely(gcm_use_avx2) && do_avx2)
89
+ 		aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
90
+ 					assoclen);
91
+-	else if (static_branch_likely(&gcm_use_avx) && do_avx)
92
++	else if (likely(gcm_use_avx) && do_avx)
93
+ 		aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
94
+ 					assoclen);
95
+ 	else
96
+@@ -732,7 +740,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
97
+ 
98
+ 	while (walk.nbytes > 0) {
99
+ 		fcw_kernel_fpu_begin();
100
+-		if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
101
++		if (likely(gcm_use_avx2) && do_avx2) {
102
+ 			if (enc)
103
+ 				aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
104
+ 							      walk.dst.virt.addr,
105
+@@ -743,7 +751,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
106
+ 							      walk.dst.virt.addr,
107
+ 							      walk.src.virt.addr,
108
+ 							      walk.nbytes);
109
+-		} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
110
++		} else if (likely(gcm_use_avx) && do_avx) {
111
+ 			if (enc)
112
+ 				aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
113
+ 							      walk.dst.virt.addr,
114
+@@ -770,10 +778,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
115
+ 		return err;
116
+ 
117
+ 	fcw_kernel_fpu_begin();
118
+-	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
119
++	if (likely(gcm_use_avx2) && do_avx2)
120
+ 		aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
121
+ 					    auth_tag_len);
122
+-	else if (static_branch_likely(&gcm_use_avx) && do_avx)
123
++	else if (likely(gcm_use_avx) && do_avx)
124
+ 		aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
125
+ 					    auth_tag_len);
126
+ 	else
127
+@@ -1248,18 +1256,19 @@ static int __init aesni_init(void)
128
+ #ifdef CONFIG_X86_64
129
+ 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
130
+ 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
131
+-		static_branch_enable(&gcm_use_avx);
132
+-		static_branch_enable(&gcm_use_avx2);
133
++		gcm_use_avx = 1;
134
++		gcm_use_avx2 = 1;
135
+ 	} else
136
+ 	if (boot_cpu_has(X86_FEATURE_AVX)) {
137
+ 		pr_info("AVX version of gcm_enc/dec engaged.\n");
138
+-		static_branch_enable(&gcm_use_avx);
139
++		gcm_use_avx = 1;
140
+ 	} else {
141
+ 		pr_info("SSE version of gcm_enc/dec engaged.\n");
142
+ 	}
143
++	aesni_ctr_enc_tfm = aesni_ctr_enc;
144
+ 	if (boot_cpu_has(X86_FEATURE_AVX)) {
145
+ 		/* optimize performance of ctr mode encryption transform */
146
+-		static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
147
++		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
148
+ 		pr_info("AES CTR mode by8 optimization enabled\n");
149
+ 	}
150
+ #endif /* CONFIG_X86_64 */
151
+-- 
152
+2.19.0
153
+
0 154
deleted file mode 100644
... ...
@@ -1,229 +0,0 @@
1
-From f723aded2fac053eca30c6ca6be0387cab23509b Mon Sep 17 00:00:00 2001
2
-From: Keerthana K <keerthanak@vmware.com>
3
-Date: Wed, 7 Dec 2022 07:10:44 +0000
4
-Subject: [PATCH 4/4] aesni_intel_glue: Revert static calls with indirect calls
5
-
6
-Signed-off-by: Keerthana K <keerthanak@vmware.com>
7
- arch/x86/crypto/aesni-intel_glue.c | 111 +++++++++++++----------------
8
- 1 file changed, 50 insertions(+), 61 deletions(-)
9
-
10
-diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
11
-index 10420b2aa..a6da1abb9 100644
12
-+++ b/arch/x86/crypto/aesni-intel_glue.c
13
-@@ -31,10 +31,8 @@
14
- #include <crypto/internal/aead.h>
15
- #include <crypto/internal/simd.h>
16
- #include <crypto/internal/skcipher.h>
17
--#include <linux/jump_label.h>
18
- #include <linux/workqueue.h>
19
- #include <linux/spinlock.h>
20
--#include <linux/static_call.h>
21
- #include <crypto/gf128mul.h>
22
- 
23
- void fcw_kernel_fpu_begin(void);
24
-@@ -111,9 +109,10 @@ asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
25
- 
26
- #ifdef CONFIG_X86_64
27
- 
28
-+static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
29
-+			      const u8 *in, unsigned int len, u8 *iv);
30
- asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
31
- 			      const u8 *in, unsigned int len, u8 *iv);
32
--DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
33
- 
34
- /* Scatter / Gather routines, with args similar to above */
35
- asmlinkage void aesni_gcm_init(void *ctx,
36
-@@ -132,6 +131,24 @@ asmlinkage void aesni_gcm_finalize(void *ctx,
37
- 				   struct gcm_context_data *gdata,
38
- 				   u8 *auth_tag, unsigned long auth_tag_len);
39
- 
40
-+static const struct aesni_gcm_tfm_s {
41
-+	void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
42
-+		     u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
43
-+	void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
44
-+			   const u8 *in, unsigned long plaintext_len);
45
-+	void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
46
-+			   const u8 *in, unsigned long ciphertext_len);
47
-+	void (*finalize)(void *ctx, struct gcm_context_data *gdata,
48
-+			 u8 *auth_tag, unsigned long auth_tag_len);
49
-+} *aesni_gcm_tfm;
50
-+
51
-+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
52
-+	.init = &aesni_gcm_init,
53
-+	.enc_update = &aesni_gcm_enc_update,
54
-+	.dec_update = &aesni_gcm_dec_update,
55
-+	.finalize = &aesni_gcm_finalize,
56
-+};
57
-+
58
- asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
59
- 		void *keys, u8 *out, unsigned int num_bytes);
60
- asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
61
-@@ -175,6 +192,13 @@ asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
62
- 				   struct gcm_context_data *gdata,
63
- 				   u8 *auth_tag, unsigned long auth_tag_len);
64
- 
65
-+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
66
-+	.init = &aesni_gcm_init_avx_gen2,
67
-+	.enc_update = &aesni_gcm_enc_update_avx_gen2,
68
-+	.dec_update = &aesni_gcm_dec_update_avx_gen2,
69
-+	.finalize = &aesni_gcm_finalize_avx_gen2,
70
-+};
71
-+
72
- /*
73
-  * asmlinkage void aesni_gcm_init_avx_gen4()
74
-  * gcm_data *my_ctx_data, context data
75
-@@ -198,8 +222,12 @@ asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
76
- 				   struct gcm_context_data *gdata,
77
- 				   u8 *auth_tag, unsigned long auth_tag_len);
78
- 
79
--static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
80
--static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
81
-+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
82
-+	.init = &aesni_gcm_init_avx_gen4,
83
-+	.enc_update = &aesni_gcm_enc_update_avx_gen4,
84
-+	.dec_update = &aesni_gcm_dec_update_avx_gen4,
85
-+	.finalize = &aesni_gcm_finalize_avx_gen4,
86
-+};
87
- 
88
- static inline struct
89
- aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
90
-@@ -524,10 +552,8 @@ static int ctr_crypt(struct skcipher_request *req)
91
- 	while ((nbytes = walk.nbytes) > 0) {
92
- 		fcw_kernel_fpu_begin();
93
- 		if (nbytes & AES_BLOCK_MASK)
94
--			static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
95
--						       walk.src.virt.addr,
96
--						       nbytes & AES_BLOCK_MASK,
97
--						       walk.iv);
98
-+			aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
99
-+				              nbytes & AES_BLOCK_MASK, walk.iv);
100
- 		nbytes &= ~AES_BLOCK_MASK;
101
- 
102
- 		if (walk.nbytes == walk.total && nbytes > 0) {
103
-@@ -678,12 +704,12 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
104
- 			      u8 *iv, void *aes_ctx, u8 *auth_tag,
105
- 			      unsigned long auth_tag_len)
106
- {
107
-+	const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
108
- 	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
109
- 	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
110
- 	unsigned long left = req->cryptlen;
111
- 	struct scatter_walk assoc_sg_walk;
112
- 	struct skcipher_walk walk;
113
--	bool do_avx, do_avx2;
114
- 	u8 *assocmem = NULL;
115
- 	u8 *assoc;
116
- 	int err;
117
-@@ -691,8 +717,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
118
- 	if (!enc)
119
- 		left -= auth_tag_len;
120
- 
121
--	do_avx = (left >= AVX_GEN2_OPTSIZE);
122
--	do_avx2 = (left >= AVX_GEN4_OPTSIZE);
123
-+	if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
124
-+		gcm_tfm = &aesni_gcm_tfm_avx_gen2;
125
-+	if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
126
-+		gcm_tfm = &aesni_gcm_tfm_sse;
127
- 
128
- 	/* Linearize assoc, if not already linear */
129
- 	if (req->src->length >= assoclen && req->src->length) {
130
-@@ -712,14 +740,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
131
- 	}
132
- 
133
- 	fcw_kernel_fpu_begin();
134
--	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
135
--		aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
136
--					assoclen);
137
--	else if (static_branch_likely(&gcm_use_avx) && do_avx)
138
--		aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
139
--					assoclen);
140
--	else
141
--		aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
142
-+	gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
143
- 	fcw_kernel_fpu_end();
144
- 
145
- 	if (!assocmem)
146
-@@ -732,35 +753,9 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
147
- 
148
- 	while (walk.nbytes > 0) {
149
- 		fcw_kernel_fpu_begin();
150
--		if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
151
--			if (enc)
152
--				aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
153
--							      walk.dst.virt.addr,
154
--							      walk.src.virt.addr,
155
--							      walk.nbytes);
156
--			else
157
--				aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
158
--							      walk.dst.virt.addr,
159
--							      walk.src.virt.addr,
160
--							      walk.nbytes);
161
--		} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
162
--			if (enc)
163
--				aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
164
--							      walk.dst.virt.addr,
165
--							      walk.src.virt.addr,
166
--							      walk.nbytes);
167
--			else
168
--				aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
169
--							      walk.dst.virt.addr,
170
--							      walk.src.virt.addr,
171
--							      walk.nbytes);
172
--		} else if (enc) {
173
--			aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
174
--					     walk.src.virt.addr, walk.nbytes);
175
--		} else {
176
--			aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
177
--					     walk.src.virt.addr, walk.nbytes);
178
--		}
179
-+		(enc ? gcm_tfm->enc_update
180
-+		     : gcm_tfm->dec_update)(aes_ctx, data, walk.dst.virt.addr,
181
-+					    walk.src.virt.addr, walk.nbytes);
182
- 		fcw_kernel_fpu_end();
183
- 
184
- 		err = skcipher_walk_done(&walk, 0);
185
-@@ -770,14 +765,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
186
- 		return err;
187
- 
188
- 	fcw_kernel_fpu_begin();
189
--	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
190
--		aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
191
--					    auth_tag_len);
192
--	else if (static_branch_likely(&gcm_use_avx) && do_avx)
193
--		aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
194
--					    auth_tag_len);
195
--	else
196
--		aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
197
-+	gcm_tfm->finalize(aes_ctx, data, auth_tag, auth_tag_len);
198
- 	fcw_kernel_fpu_end();
199
- 
200
- 	return 0;
201
-@@ -1248,18 +1236,19 @@ static int __init aesni_init(void)
202
- #ifdef CONFIG_X86_64
203
- 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
204
- 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
205
--		static_branch_enable(&gcm_use_avx);
206
--		static_branch_enable(&gcm_use_avx2);
207
-+		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
208
- 	} else
209
- 	if (boot_cpu_has(X86_FEATURE_AVX)) {
210
- 		pr_info("AVX version of gcm_enc/dec engaged.\n");
211
--		static_branch_enable(&gcm_use_avx);
212
-+		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
213
- 	} else {
214
- 		pr_info("SSE version of gcm_enc/dec engaged.\n");
215
-+		aesni_gcm_tfm = &aesni_gcm_tfm_sse;
216
- 	}
217
-+	aesni_ctr_enc_tfm = aesni_ctr_enc;
218
- 	if (boot_cpu_has(X86_FEATURE_AVX)) {
219
- 		/* optimize performance of ctr mode encryption transform */
220
--		static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
221
-+		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
222
- 		pr_info("AES CTR mode by8 optimization enabled\n");
223
- 	}
224
- #endif /* CONFIG_X86_64 */
225
-2.38.1
226
-
... ...
@@ -684,7 +684,7 @@ index 016f785b8..714ff4e6d 100644
684 684
 -
685 685
 -	for (i = 0; i < ndigits; i++)
686 686
 -		dest[i] = src[i];
687
-+	fcw_memcpy(dest, src, sizeof(u64)*ndigits);
687
++	fcw_memcpy(dest, src, sizeof(u64) * ndigits);
688 688
  }
689 689
  
690 690
  /* Returns sign of left - right. */
... ...
@@ -693,7 +693,7 @@ index 016f785b8..714ff4e6d 100644
693 693
  	u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
694 694
  
695 695
 -	if (WARN_ON(pk->ndigits != curve->g.ndigits))
696
-+	if (fcw_warn_on(!!(pk->ndigits != curve->g.ndigits)))
696
++	if (fcw_warn_on(pk->ndigits != curve->g.ndigits))
697 697
  		return -EINVAL;
698 698
  
699 699
  	/* Check 1: Verify key is not the zero point. */
... ...
@@ -907,7 +907,7 @@ index 826dd0833..88039140f 100644
907 907
  	int err;
908 908
  
909 909
 -	if (WARN_ON(req->dst) || WARN_ON(!digest_size) ||
910
-+	if (fcw_warn_on(!!(req->dst)) || fcw_warn_on(!!(!digest_size)) ||
910
++	if (fcw_warn_on(req->dst) || fcw_warn_on(!digest_size) ||
911 911
  	    !ctx->key_size || sig_size != ctx->key_size)
912 912
  		return -EINVAL;
913 913
  
... ...
@@ -1062,7 +1062,7 @@ index 98422b8da..592ce69c3 100644
1062 1062
  
1063 1063
  	BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl));
1064 1064
 -	if (WARN_ON(ndivs > ARRAY_SIZE(partitions)))
1065
-+	if (fcw_warn_on(!!(ndivs > ARRAY_SIZE(partitions))))
1065
++	if (fcw_warn_on(ndivs > ARRAY_SIZE(partitions)))
1066 1066
  		return -EINVAL;
1067 1067
  
1068 1068
  	/* Calculate the (div, length) pairs */
... ...
@@ -1071,7 +1071,7 @@ index 98422b8da..592ce69c3 100644
1071 1071
  		while (offset + partitions[i].length + TESTMGR_POISON_LEN >
1072 1072
  		       2 * PAGE_SIZE) {
1073 1073
 -			if (WARN_ON(offset <= 0))
1074
-+			if (fcw_warn_on(!!(offset <= 0)))
1074
++			if (fcw_warn_on(offset <= 0))
1075 1075
  				return -EINVAL;
1076 1076
  			offset /= 2;
1077 1077
  		}
... ...
@@ -1089,7 +1089,7 @@ index 98422b8da..592ce69c3 100644
1089 1089
 -			copied = copy_from_iter(addr, copy_len, data);
1090 1090
 -			if (WARN_ON(copied != copy_len))
1091 1091
 +			copied = fcw_copy_from_iter(addr, copy_len, data);
1092
-+			if (fcw_warn_on(!!(copied != copy_len)))
1092
++			if (fcw_warn_on(copied != copy_len))
1093 1093
  				return -EINVAL;
1094 1094
  			testmgr_poison(addr + copy_len, partitions[i].length +
1095 1095
  				       TESTMGR_POISON_LEN - copy_len);
... ...
@@ -1116,7 +1116,7 @@ index 98422b8da..592ce69c3 100644
1116 1116
  		expected_output += len;
1117 1117
  	}
1118 1118
 -	if (WARN_ON(len_to_check != 0))
1119
-+	if (fcw_warn_on(!!(len_to_check != 0)))
1119
++	if (fcw_warn_on(len_to_check != 0))
1120 1120
  		return -EINVAL;
1121 1121
  	return 0;
1122 1122
  }
... ...
@@ -1146,7 +1146,7 @@ index 98422b8da..592ce69c3 100644
1146 1146
  	}
1147 1147
  
1148 1148
 -	WARN_ON_ONCE(!valid_testvec_config(cfg));
1149
-+	fcw_warn_on_once(!!(!valid_testvec_config(cfg)));
1149
++	fcw_warn_on_once((!valid_testvec_config(cfg)));
1150 1150
  }
1151 1151
  
1152 1152
  static void crypto_disable_simd_for_test(void)
... ...
@@ -1191,7 +1191,7 @@ index 98422b8da..592ce69c3 100644
1191 1191
  
1192 1192
  	/* The IV must be copied to a buffer, as the algorithm may modify it */
1193 1193
 -	if (WARN_ON(ivsize > MAX_IVLEN))
1194
-+	if (fcw_warn_on(!!(ivsize > MAX_IVLEN)))
1194
++	if (fcw_warn_on(ivsize > MAX_IVLEN))
1195 1195
  		return -EINVAL;
1196 1196
  	if (vec->iv)
1197 1197
 -		memcpy(iv, vec->iv, ivsize);
... ...
@@ -1230,7 +1230,7 @@ index 98422b8da..592ce69c3 100644
1230 1230
  	if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
1231 1231
  		authsize = MIN_COLLISION_FREE_AUTHSIZE;
1232 1232
 -	if (WARN_ON(authsize > maxdatasize))
1233
-+	if (fcw_warn_on(!!(authsize > maxdatasize)))
1233
++	if (fcw_warn_on(authsize > maxdatasize))
1234 1234
  		authsize = maxdatasize;
1235 1235
  	maxdatasize -= authsize;
1236 1236
  	vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
... ...
@@ -1239,7 +1239,7 @@ index 98422b8da..592ce69c3 100644
1239 1239
  
1240 1240
  		ret = -EINVAL;
1241 1241
 -		if (WARN_ON(template[i].len > PAGE_SIZE))
1242
-+		if (fcw_warn_on(!!(template[i].len > PAGE_SIZE)))
1242
++		if (fcw_warn_on(template[i].len > PAGE_SIZE))
1243 1243
  			goto out;
1244 1244
  
1245 1245
  		data = xbuf[0];
... ...
@@ -1253,7 +1253,7 @@ index 98422b8da..592ce69c3 100644
1253 1253
  	/* The IV must be copied to a buffer, as the algorithm may modify it */
1254 1254
  	if (ivsize) {
1255 1255
 -		if (WARN_ON(ivsize > MAX_IVLEN))
1256
-+		if (fcw_warn_on(!!(ivsize > MAX_IVLEN)))
1256
++		if (fcw_warn_on(ivsize > MAX_IVLEN))
1257 1257
  			return -EINVAL;
1258 1258
  		if (vec->generates_iv && !enc)
1259 1259
 -			memcpy(iv, vec->iv_out, ivsize);
... ...
@@ -1350,7 +1350,7 @@ index 98422b8da..592ce69c3 100644
1350 1350
  
1351 1351
  	err = -E2BIG;
1352 1352
 -	if (WARN_ON(m_size > PAGE_SIZE))
1353
-+	if (fcw_warn_on(!!(m_size > PAGE_SIZE)))
1353
++	if (fcw_warn_on(m_size > PAGE_SIZE))
1354 1354
  		goto free_all;
1355 1355
 -	memcpy(xbuf[0], m, m_size);
1356 1356
 +	fcw_memcpy(xbuf[0], m, m_size);
... ...
@@ -1362,7 +1362,7 @@ index 98422b8da..592ce69c3 100644
1362 1362
 +	fcw_sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
1363 1363
  	if (vecs->siggen_sigver_test) {
1364 1364
 -		if (WARN_ON(c_size > PAGE_SIZE))
1365
-+		if (fcw_warn_on(!!(c_size > PAGE_SIZE)))
1365
++		if (fcw_warn_on(c_size > PAGE_SIZE))
1366 1366
  			goto free_all;
1367 1367
 -		memcpy(xbuf[1], c, c_size);
1368 1368
 -		sg_set_buf(&src_tab[2], xbuf[1], c_size);
... ...
@@ -1376,7 +1376,7 @@ index 98422b8da..592ce69c3 100644
1376 1376
  	err = -E2BIG;
1377 1377
  	op = vecs->siggen_sigver_test ? "sign" : "decrypt";
1378 1378
 -	if (WARN_ON(c_size > PAGE_SIZE))
1379
-+	if (fcw_warn_on(!!(c_size > PAGE_SIZE)))
1379
++	if (fcw_warn_on(c_size > PAGE_SIZE))
1380 1380
  		goto free_all;
1381 1381
 -	memcpy(xbuf[0], c, c_size);
1382 1382
 +	fcw_memcpy(xbuf[0], c, c_size);
... ...
@@ -1388,14 +1388,14 @@ index 98422b8da..592ce69c3 100644
1388 1388
  				  alg_test_descs[i].alg);
1389 1389
  
1390 1390
 -		if (WARN_ON(diff > 0)) {
1391
-+		if (fcw_warn_on(!!(diff > 0))) {
1391
++		if (fcw_warn_on(diff > 0)) {
1392 1392
  			pr_warn("testmgr: alg_test_descs entries in wrong order: '%s' before '%s'\n",
1393 1393
  				alg_test_descs[i - 1].alg,
1394 1394
  				alg_test_descs[i].alg);
1395 1395
  		}
1396 1396
  
1397 1397
 -		if (WARN_ON(diff == 0)) {
1398
-+		if (fcw_warn_on(!!(diff == 0))) {
1398
++		if (fcw_warn_on(diff == 0)) {
1399 1399
  			pr_warn("testmgr: duplicate alg_test_descs entry: '%s'\n",
1400 1400
  				alg_test_descs[i].alg);
1401 1401
  		}
... ...
@@ -1405,13 +1405,13 @@ index 98422b8da..592ce69c3 100644
1405 1405
  	for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++)
1406 1406
 -		WARN_ON(!valid_testvec_config(
1407 1407
 -				&default_cipher_testvec_configs[i]));
1408
-+		fcw_warn_on(!!(!valid_testvec_config(
1408
++		fcw_warn_on((!valid_testvec_config(
1409 1409
 +				&default_cipher_testvec_configs[i])));
1410 1410
  
1411 1411
  	for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++)
1412 1412
 -		WARN_ON(!valid_testvec_config(
1413 1413
 -				&default_hash_testvec_configs[i]));
1414
-+		fcw_warn_on(!!(!valid_testvec_config(
1414
++		fcw_warn_on((!valid_testvec_config(
1415 1415
 +				&default_hash_testvec_configs[i])));
1416 1416
  }
1417 1417
  
... ...
@@ -1421,7 +1421,7 @@ index 98422b8da..592ce69c3 100644
1421 1421
  		pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
1422 1422
  			alg, driver, rc);
1423 1423
 -		WARN(rc != -ENOENT,
1424
-+		fcw_warn(!!(rc != -ENOENT),
1424
++		fcw_warn(rc != -ENOENT,
1425 1425
  		     "alg: self-tests for %s using %s failed (rc=%d)",
1426 1426
  		     alg, driver, rc);
1427 1427
  	} else {
... ...
@@ -5055,11 +5055,9 @@ CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
5055 5055
 CONFIG_ZERO_CALL_USED_REGS=y
5056 5056
 # end of Memory initialization
5057 5057
 
5058
-# CONFIG_RANDSTRUCT_NONE is not set
5058
+CONFIG_RANDSTRUCT_NONE=y
5059 5059
 # CONFIG_RANDSTRUCT_FULL is not set
5060
-CONFIG_RANDSTRUCT_PERFORMANCE=y
5061
-CONFIG_RANDSTRUCT=y
5062
-CONFIG_GCC_PLUGIN_RANDSTRUCT=y
5060
+# CONFIG_RANDSTRUCT_PERFORMANCE is not set
5063 5061
 # end of Kernel hardening options
5064 5062
 # end of Security options
5065 5063
 
... ...
@@ -5500,6 +5498,7 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
5500 5500
 # CONFIG_DEBUG_INFO_REDUCED is not set
5501 5501
 # CONFIG_DEBUG_INFO_COMPRESSED is not set
5502 5502
 # CONFIG_DEBUG_INFO_SPLIT is not set
5503
+# CONFIG_DEBUG_INFO_BTF is not set
5503 5504
 # CONFIG_GDB_SCRIPTS is not set
5504 5505
 CONFIG_FRAME_WARN=2048
5505 5506
 CONFIG_STRIP_ASM_SYMS=y
... ...
@@ -16,7 +16,7 @@
16 16
 Summary:        Kernel
17 17
 Name:           linux-secure
18 18
 Version:        6.1.10
19
-Release:        12%{?kat_build:.kat}%{?dist}
19
+Release:        13%{?kat_build:.kat}%{?dist}
20 20
 License:        GPLv2
21 21
 URL:            http://www.kernel.org
22 22
 Group:          System Environment/Kernel
... ...
@@ -125,10 +125,10 @@ Patch512: 0003-FIPS-broken-kattest.patch
125 125
 %if 0%{?canister_build}
126 126
 Patch10000:      6.1.10-8-0001-FIPS-canister-binary-usage.patch
127 127
 Patch10001:      0002-FIPS-canister-creation.patch
128
-Patch10003:      0003-aesni_intel_glue-Revert-static-calls-with-indirect-c.patch
129
-Patch10004:      0004-Disable-retpoline_sites-and-return_sites-section-in-.patch
130
-Patch10005:      0005-Move-__bug_table-section-to-fips_canister_wrapper.patch
131
-Patch10006:      0001-scripts-kallsyms-Extra-kallsyms-parsing.patch
128
+Patch10002:      0003-aesni_intel-Remove-static-call.patch
129
+Patch10003:      0004-Disable-retpoline_sites-and-return_sites-section-in-.patch
130
+Patch10004:      0005-Move-__bug_table-section-to-fips_canister_wrapper.patch
131
+Patch10005:      0001-scripts-kallsyms-Extra-kallsyms-parsing.patch
132 132
 %endif
133 133
 
134 134
 BuildArch:      x86_64
... ...
@@ -223,7 +223,7 @@ The kernel fips-canister
223 223
 %endif
224 224
 
225 225
 %if 0%{?canister_build}
226
-%autopatch -p1 -m10000 -M10006
226
+%autopatch -p1 -m10000 -M10005
227 227
 %endif
228 228
 
229 229
 %build
... ...
@@ -252,7 +252,6 @@ sed -i 's/CONFIG_LOCALVERSION="-secure"/CONFIG_LOCALVERSION="-%{release}-secure"
252 252
 %if 0%{?canister_build}
253 253
 sed -i "s/CONFIG_DEBUG_LIST=y/# CONFIG_DEBUG_LIST is not set/" .config
254 254
 sed -i "s/CONFIG_BUG_ON_DATA_CORRUPTION=y/# CONFIG_BUG_ON_DATA_CORRUPTION is not set/" .config
255
-#sed -i "/# CONFIG_DEBUG_INFO_DWARF4 is not set/a  # CONFIG_DEBUG_INFO_BTF is not set" .config
256 255
 sed -i "s/CONFIG_CRYPTO_AEAD=m/CONFIG_CRYPTO_AEAD=y/" .config
257 256
 sed -i "s/CONFIG_CRYPTO_SIMD=m/CONFIG_CRYPTO_SIMD=y/" .config
258 257
 sed -i "s/CONFIG_CRYPTO_AES_NI_INTEL=m/CONFIG_CRYPTO_AES_NI_INTEL=y/" .config
... ...
@@ -386,6 +385,9 @@ ln -sf linux-%{uname_r}.cfg /boot/photon.cfg
386 386
 %endif
387 387
 
388 388
 %changelog
389
+* Fri May 19 2023 Keerthana K <keerthanak@vmware.com> 6.1.10-13
390
+- Fix static call patch and disable RANDSTRUCT
391
+- Update fcw_warn_on, fcw_warn and fcw_warn_on_once calls in canister
389 392
 * Wed May 17 2023 Keerthana K <keerthanak@vmware.com> 6.1.10-12
390 393
 - Disable special sections from canister
391 394
 * Tue Apr 25 2023 Shreenidhi Shedi <sshedi@vmware.com> 6.1.10-11