While at it, remove the SSBD patches as they were merged into 4.9.102.
Change-Id: Idab81e11f8d5e552ed58350fdd0facb1131454b8
Reviewed-on: http://photon-jenkins.eng.vmware.com:8082/5284
Tested-by: Sharath George
Reviewed-by: Bo Gan <ganb@vmware.com>
| ... | ... |
@@ -1,14 +1,14 @@ |
| 1 | 1 |
Summary: Linux API header files |
| 2 | 2 |
Name: linux-api-headers |
| 3 |
-Version: 4.9.101 |
|
| 4 |
-Release: 2%{?dist}
|
|
| 3 |
+Version: 4.9.109 |
|
| 4 |
+Release: 1%{?dist}
|
|
| 5 | 5 |
License: GPLv2 |
| 6 | 6 |
URL: http://www.kernel.org/ |
| 7 | 7 |
Group: System Environment/Kernel |
| 8 | 8 |
Vendor: VMware, Inc. |
| 9 | 9 |
Distribution: Photon |
| 10 | 10 |
Source0: http://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz
|
| 11 |
-%define sha1 linux=12b399649df63355823d482fd91711b1be3e7f1b |
|
| 11 |
+%define sha1 linux=9d3af34a31661b2c7f6bea5682deb131c406f3d6 |
|
| 12 | 12 |
BuildArch: noarch |
| 13 | 13 |
Patch0: Implement-the-f-xattrat-family-of-functions.patch |
| 14 | 14 |
%description |
| ... | ... |
@@ -27,6 +27,8 @@ find /%{buildroot}%{_includedir} \( -name .install -o -name ..install.cmd \) -de
|
| 27 | 27 |
%defattr(-,root,root) |
| 28 | 28 |
%{_includedir}/*
|
| 29 | 29 |
%changelog |
| 30 |
+* Thu Jun 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.109-1 |
|
| 31 |
+- Update to version 4.9.109 |
|
| 30 | 32 |
* Mon May 21 2018 Alexey Makhalov <amakhalov@vmware.com> 4.9.101-2 |
| 31 | 33 |
- Add the f*xattrat family of syscalls. |
| 32 | 34 |
* Mon May 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.101-1 |
| 33 | 35 |
deleted file mode 100644 |
| ... | ... |
@@ -1,38 +0,0 @@ |
| 1 |
-From 0ddcff49b672239dda94d70d0fcf50317a9f4b51 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: "weiyongjun (A)" <weiyongjun1@huawei.com> |
|
| 3 |
-Date: Thu, 18 Jan 2018 02:23:34 +0000 |
|
| 4 |
-Subject: [PATCH] mac80211_hwsim: fix possible memory leak in |
|
| 5 |
- hwsim_new_radio_nl() |
|
| 6 |
- |
|
| 7 |
-'hwname' is malloced in hwsim_new_radio_nl() and should be freed |
|
| 8 |
-before leaving from the error handling cases, otherwise it will cause |
|
| 9 |
-memory leak. |
|
| 10 |
- |
|
| 11 |
-Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length")
|
|
| 12 |
-Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> |
|
| 13 |
-Reviewed-by: Ben Hutchings <ben.hutchings@codethink.co.uk> |
|
| 14 |
-Signed-off-by: Johannes Berg <johannes.berg@intel.com> |
|
| 15 |
-Signed-off-by: Srivatsa S. Bhat <srivatsa@csail.mit.edu> |
|
| 16 |
- drivers/net/wireless/mac80211_hwsim.c | 4 +++- |
|
| 17 |
- 1 file changed, 3 insertions(+), 1 deletion(-) |
|
| 18 |
- |
|
| 19 |
-diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c |
|
| 20 |
-index 2681b533..95e9641 100644 |
|
| 21 |
-+++ b/drivers/net/wireless/mac80211_hwsim.c |
|
| 22 |
-@@ -3084,8 +3084,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) |
|
| 23 |
- if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
|
|
| 24 |
- u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); |
|
| 25 |
- |
|
| 26 |
-- if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) |
|
| 27 |
-+ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
|
|
| 28 |
-+ kfree(hwname); |
|
| 29 |
- return -EINVAL; |
|
| 30 |
-+ } |
|
| 31 |
- param.regd = hwsim_world_regdom_custom[idx]; |
|
| 32 |
- } |
|
| 33 |
- |
|
| 34 |
-2.7.4 |
|
| 35 |
- |
| ... | ... |
@@ -55,10 +55,10 @@ index 1b3a2f9..5d34da2 100644 |
| 55 | 55 |
* Compute outer checksum based on the assumption that the |
| 56 | 56 |
* inner checksum will be offloaded later. |
| 57 | 57 |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
| 58 |
-index fb422df..40f966e 100644 |
|
| 58 |
+index 9f697b0..5945bf4 100644 |
|
| 59 | 59 |
--- a/net/core/skbuff.c |
| 60 | 60 |
+++ b/net/core/skbuff.c |
| 61 |
-@@ -4441,37 +4441,74 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
|
| 61 |
+@@ -4442,37 +4442,74 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
|
| 62 | 62 |
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
| 63 | 63 |
|
| 64 | 64 |
/** |
| ... | ... |
@@ -145,7 +145,7 @@ index fb422df..40f966e 100644 |
| 145 | 145 |
+ |
| 146 | 146 |
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
| 147 | 147 |
{
|
| 148 |
- if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
|
| 148 |
+ int mac_len; |
|
| 149 | 149 |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c |
| 150 | 150 |
index 303355c..ad60a45 100644 |
| 151 | 151 |
--- a/net/sched/sch_tbf.c |
| ... | ... |
@@ -1,15 +1,15 @@ |
| 1 | 1 |
%global security_hardening none |
| 2 | 2 |
Summary: Kernel |
| 3 | 3 |
Name: linux-aws |
| 4 |
-Version: 4.9.101 |
|
| 5 |
-Release: 2%{?kat_build:.%kat_build}%{?dist}
|
|
| 4 |
+Version: 4.9.109 |
|
| 5 |
+Release: 1%{?kat_build:.%kat_build}%{?dist}
|
|
| 6 | 6 |
License: GPLv2 |
| 7 | 7 |
URL: http://www.kernel.org/ |
| 8 | 8 |
Group: System Environment/Kernel |
| 9 | 9 |
Vendor: VMware, Inc. |
| 10 | 10 |
Distribution: Photon |
| 11 | 11 |
Source0: http://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz
|
| 12 |
-%define sha1 linux=12b399649df63355823d482fd91711b1be3e7f1b |
|
| 12 |
+%define sha1 linux=9d3af34a31661b2c7f6bea5682deb131c406f3d6 |
|
| 13 | 13 |
Source1: config-aws |
| 14 | 14 |
Source2: initramfs.trigger |
| 15 | 15 |
# common |
| ... | ... |
@@ -54,8 +54,6 @@ Patch35: 0002-bnx2x-disable-GSO-where-gso_size-is-too-big-for-hard.patch |
| 54 | 54 |
Patch37: 0001-ocfs2-subsystem.su_mutex-is-required-while-accessing.patch |
| 55 | 55 |
# Fix for CVE-2018-8043 |
| 56 | 56 |
Patch38: 0001-net-phy-mdio-bcm-unimac-fix-potential-NULL-dereferen.patch |
| 57 |
-# Fix for CVE-2018-8087 |
|
| 58 |
-Patch39: 0001-mac80211_hwsim-fix-possible-memory-leak-in-hwsim_new.patch |
|
| 59 | 57 |
# Fix for CVE-2017-18241 |
| 60 | 58 |
Patch40: 0001-f2fs-fix-a-panic-caused-by-NULL-flush_cmd_control.patch |
| 61 | 59 |
# Fix for CVE-2017-18224 |
| ... | ... |
@@ -124,60 +122,6 @@ Patch144: 0052-drivers-amazon-ena-update-to-1.4.0.patch |
| 124 | 124 |
Patch145: 0053-PM-hibernate-update-the-resume-offset-on-SNAPSHOT_SE.patch |
| 125 | 125 |
Patch146: 0054-Not-for-upstream-PM-hibernate-Speed-up-hibernation-b.patch |
| 126 | 126 |
|
| 127 |
-# Fix CVE-2018-3639 (Speculative Store Bypass) |
|
| 128 |
-Patch201: 0001-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch |
|
| 129 |
-Patch202: 0002-x86-nospec-Simplify-alternative_msr_write.patch |
|
| 130 |
-Patch203: 0003-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch |
|
| 131 |
-Patch204: 0004-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch |
|
| 132 |
-Patch205: 0005-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch |
|
| 133 |
-Patch206: 0006-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch |
|
| 134 |
-Patch207: 0007-x86-bugs-Expose-sys-.-spec_store_bypass.patch |
|
| 135 |
-Patch208: 0008-x86-cpufeatures-Add-X86_FEATURE_RDS.patch |
|
| 136 |
-Patch209: 0009-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch |
|
| 137 |
-Patch210: 0010-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch |
|
| 138 |
-Patch211: 0011-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch |
|
| 139 |
-Patch212: 0012-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch |
|
| 140 |
-Patch213: 0013-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch |
|
| 141 |
-Patch214: 0014-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch |
|
| 142 |
-Patch215: 0015-prctl-Add-speculation-control-prctls.patch |
|
| 143 |
-Patch216: 0016-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch |
|
| 144 |
-Patch217: 0017-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch |
|
| 145 |
-Patch218: 0018-x86-process-Optimize-TIF_NOTSC-switch.patch |
|
| 146 |
-Patch219: 0019-x86-process-Allow-runtime-control-of-Speculative-Sto.patch |
|
| 147 |
-Patch220: 0020-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch |
|
| 148 |
-Patch221: 0021-nospec-Allow-getting-setting-on-non-current-task.patch |
|
| 149 |
-Patch222: 0022-proc-Provide-details-on-speculation-flaw-mitigations.patch |
|
| 150 |
-Patch223: 0023-seccomp-Enable-speculation-flaw-mitigations.patch |
|
| 151 |
-Patch224: 0024-x86-bugs-Make-boot-modes-__ro_after_init.patch |
|
| 152 |
-Patch225: 0025-prctl-Add-force-disable-speculation.patch |
|
| 153 |
-Patch226: 0026-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch |
|
| 154 |
-Patch227: 0027-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch |
|
| 155 |
-Patch228: 0028-seccomp-Move-speculation-migitation-control-to-arch-.patch |
|
| 156 |
-Patch229: 0029-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch |
|
| 157 |
-Patch230: 0030-x86-bugs-Rename-_RDS-to-_SSBD.patch |
|
| 158 |
-Patch231: 0031-proc-Use-underscores-for-SSBD-in-status.patch |
|
| 159 |
-Patch232: 0032-Documentation-spec_ctrl-Do-some-minor-cleanups.patch |
|
| 160 |
-Patch233: 0033-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch |
|
| 161 |
-Patch234: 0034-x86-bugs-Make-cpu_show_common-static.patch |
|
| 162 |
-Patch235: 0035-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch |
|
| 163 |
-Patch236: 0036-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch |
|
| 164 |
-Patch237: 0037-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch |
|
| 165 |
-Patch238: 0038-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch |
|
| 166 |
-Patch239: 0039-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch |
|
| 167 |
-Patch240: 0040-x86-cpufeatures-Disentangle-SSBD-enumeration.patch |
|
| 168 |
-Patch241: 0041-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch |
|
| 169 |
-Patch242: 0042-x86-cpufeatures-Add-FEATURE_ZEN.patch |
|
| 170 |
-Patch243: 0043-x86-speculation-Handle-HT-correctly-on-AMD.patch |
|
| 171 |
-Patch244: 0044-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch |
|
| 172 |
-Patch245: 0045-x86-speculation-Add-virtualized-speculative-store-by.patch |
|
| 173 |
-Patch246: 0046-x86-speculation-Rework-speculative_store_bypass_upda.patch |
|
| 174 |
-Patch247: 0047-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch |
|
| 175 |
-Patch248: 0048-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch |
|
| 176 |
-Patch249: 0049-x86-bugs-Remove-x86_spec_ctrl_set.patch |
|
| 177 |
-Patch250: 0050-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch |
|
| 178 |
-Patch251: 0051-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch |
|
| 179 |
-Patch252: 0052-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch |
|
| 180 |
-Patch253: 0053-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch |
|
| 181 | 127 |
|
| 182 | 128 |
%if 0%{?kat_build:1}
|
| 183 | 129 |
Patch1000: %{kat_build}.patch
|
| ... | ... |
@@ -283,7 +227,6 @@ This package contains the 'perf' performance analysis tools for Linux kernel. |
| 283 | 283 |
%patch35 -p1 |
| 284 | 284 |
%patch37 -p1 |
| 285 | 285 |
%patch38 -p1 |
| 286 |
-%patch39 -p1 |
|
| 287 | 286 |
%patch40 -p1 |
| 288 | 287 |
%patch41 -p1 |
| 289 | 288 |
|
| ... | ... |
@@ -349,59 +292,6 @@ This package contains the 'perf' performance analysis tools for Linux kernel. |
| 349 | 349 |
%patch145 -p1 |
| 350 | 350 |
%patch146 -p1 |
| 351 | 351 |
|
| 352 |
-%patch201 -p1 |
|
| 353 |
-%patch202 -p1 |
|
| 354 |
-%patch203 -p1 |
|
| 355 |
-%patch204 -p1 |
|
| 356 |
-%patch205 -p1 |
|
| 357 |
-%patch206 -p1 |
|
| 358 |
-%patch207 -p1 |
|
| 359 |
-%patch208 -p1 |
|
| 360 |
-%patch209 -p1 |
|
| 361 |
-%patch210 -p1 |
|
| 362 |
-%patch211 -p1 |
|
| 363 |
-%patch212 -p1 |
|
| 364 |
-%patch213 -p1 |
|
| 365 |
-%patch214 -p1 |
|
| 366 |
-%patch215 -p1 |
|
| 367 |
-%patch216 -p1 |
|
| 368 |
-%patch217 -p1 |
|
| 369 |
-%patch218 -p1 |
|
| 370 |
-%patch219 -p1 |
|
| 371 |
-%patch220 -p1 |
|
| 372 |
-%patch221 -p1 |
|
| 373 |
-%patch222 -p1 |
|
| 374 |
-%patch223 -p1 |
|
| 375 |
-%patch224 -p1 |
|
| 376 |
-%patch225 -p1 |
|
| 377 |
-%patch226 -p1 |
|
| 378 |
-%patch227 -p1 |
|
| 379 |
-%patch228 -p1 |
|
| 380 |
-%patch229 -p1 |
|
| 381 |
-%patch230 -p1 |
|
| 382 |
-%patch231 -p1 |
|
| 383 |
-%patch232 -p1 |
|
| 384 |
-%patch233 -p1 |
|
| 385 |
-%patch234 -p1 |
|
| 386 |
-%patch235 -p1 |
|
| 387 |
-%patch236 -p1 |
|
| 388 |
-%patch237 -p1 |
|
| 389 |
-%patch238 -p1 |
|
| 390 |
-%patch239 -p1 |
|
| 391 |
-%patch240 -p1 |
|
| 392 |
-%patch241 -p1 |
|
| 393 |
-%patch242 -p1 |
|
| 394 |
-%patch243 -p1 |
|
| 395 |
-%patch244 -p1 |
|
| 396 |
-%patch245 -p1 |
|
| 397 |
-%patch246 -p1 |
|
| 398 |
-%patch247 -p1 |
|
| 399 |
-%patch248 -p1 |
|
| 400 |
-%patch249 -p1 |
|
| 401 |
-%patch250 -p1 |
|
| 402 |
-%patch251 -p1 |
|
| 403 |
-%patch252 -p1 |
|
| 404 |
-%patch253 -p1 |
|
| 405 | 352 |
|
| 406 | 353 |
%if 0%{?kat_build:1}
|
| 407 | 354 |
%patch1000 -p1 |
| ... | ... |
@@ -559,6 +449,8 @@ ln -sf %{name}-%{uname_r}.cfg /boot/photon.cfg
|
| 559 | 559 |
/usr/share/doc/* |
| 560 | 560 |
|
| 561 | 561 |
%changelog |
| 562 |
+* Thu Jun 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.109-1 |
|
| 563 |
+- Update to version 4.9.109 |
|
| 562 | 564 |
* Mon May 21 2018 Alexey Makhalov <amakhalov@vmware.com> 4.9.101-2 |
| 563 | 565 |
- Add the f*xattrat family of syscalls. |
| 564 | 566 |
* Mon May 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.101-1 |
| ... | ... |
@@ -1,15 +1,15 @@ |
| 1 | 1 |
%global security_hardening none |
| 2 | 2 |
Summary: Kernel |
| 3 | 3 |
Name: linux-esx |
| 4 |
-Version: 4.9.101 |
|
| 5 |
-Release: 2%{?dist}
|
|
| 4 |
+Version: 4.9.109 |
|
| 5 |
+Release: 1%{?dist}
|
|
| 6 | 6 |
License: GPLv2 |
| 7 | 7 |
URL: http://www.kernel.org/ |
| 8 | 8 |
Group: System Environment/Kernel |
| 9 | 9 |
Vendor: VMware, Inc. |
| 10 | 10 |
Distribution: Photon |
| 11 | 11 |
Source0: http://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz
|
| 12 |
-%define sha1 linux=12b399649df63355823d482fd91711b1be3e7f1b |
|
| 12 |
+%define sha1 linux=9d3af34a31661b2c7f6bea5682deb131c406f3d6 |
|
| 13 | 13 |
Source1: config-esx |
| 14 | 14 |
Source2: initramfs.trigger |
| 15 | 15 |
# common |
| ... | ... |
@@ -51,8 +51,6 @@ Patch35: 0002-bnx2x-disable-GSO-where-gso_size-is-too-big-for-hard.patch |
| 51 | 51 |
Patch37: 0001-ocfs2-subsystem.su_mutex-is-required-while-accessing.patch |
| 52 | 52 |
# Fix for CVE-2018-8043 |
| 53 | 53 |
Patch38: 0001-net-phy-mdio-bcm-unimac-fix-potential-NULL-dereferen.patch |
| 54 |
-# Fix for CVE-2018-8087 |
|
| 55 |
-Patch39: 0001-mac80211_hwsim-fix-possible-memory-leak-in-hwsim_new.patch |
|
| 56 | 54 |
# Fix for CVE-2017-18241 |
| 57 | 55 |
Patch40: 0001-f2fs-fix-a-panic-caused-by-NULL-flush_cmd_control.patch |
| 58 | 56 |
# Fix for CVE-2017-18224 |
| ... | ... |
@@ -74,60 +72,6 @@ Patch64: 0153-net-mpls-prevent-speculative-execution.patch |
| 74 | 74 |
Patch65: 0154-udf-prevent-speculative-execution.patch |
| 75 | 75 |
Patch66: 0155-userns-prevent-speculative-execution.patch |
| 76 | 76 |
|
| 77 |
-# Fix CVE-2018-3639 (Speculative Store Bypass) |
|
| 78 |
-Patch201: 0001-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch |
|
| 79 |
-Patch202: 0002-x86-nospec-Simplify-alternative_msr_write.patch |
|
| 80 |
-Patch203: 0003-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch |
|
| 81 |
-Patch204: 0004-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch |
|
| 82 |
-Patch205: 0005-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch |
|
| 83 |
-Patch206: 0006-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch |
|
| 84 |
-Patch207: 0007-x86-bugs-Expose-sys-.-spec_store_bypass.patch |
|
| 85 |
-Patch208: 0008-x86-cpufeatures-Add-X86_FEATURE_RDS.patch |
|
| 86 |
-Patch209: 0009-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch |
|
| 87 |
-Patch210: 0010-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch |
|
| 88 |
-Patch211: 0011-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch |
|
| 89 |
-Patch212: 0012-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch |
|
| 90 |
-Patch213: 0013-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch |
|
| 91 |
-Patch214: 0014-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch |
|
| 92 |
-Patch215: 0015-prctl-Add-speculation-control-prctls.patch |
|
| 93 |
-Patch216: 0016-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch |
|
| 94 |
-Patch217: 0017-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch |
|
| 95 |
-Patch218: 0018-x86-process-Optimize-TIF_NOTSC-switch.patch |
|
| 96 |
-Patch219: 0019-x86-process-Allow-runtime-control-of-Speculative-Sto.patch |
|
| 97 |
-Patch220: 0020-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch |
|
| 98 |
-Patch221: 0021-nospec-Allow-getting-setting-on-non-current-task.patch |
|
| 99 |
-Patch222: 0022-proc-Provide-details-on-speculation-flaw-mitigations.patch |
|
| 100 |
-Patch223: 0023-seccomp-Enable-speculation-flaw-mitigations.patch |
|
| 101 |
-Patch224: 0024-x86-bugs-Make-boot-modes-__ro_after_init.patch |
|
| 102 |
-Patch225: 0025-prctl-Add-force-disable-speculation.patch |
|
| 103 |
-Patch226: 0026-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch |
|
| 104 |
-Patch227: 0027-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch |
|
| 105 |
-Patch228: 0028-seccomp-Move-speculation-migitation-control-to-arch-.patch |
|
| 106 |
-Patch229: 0029-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch |
|
| 107 |
-Patch230: 0030-x86-bugs-Rename-_RDS-to-_SSBD.patch |
|
| 108 |
-Patch231: 0031-proc-Use-underscores-for-SSBD-in-status.patch |
|
| 109 |
-Patch232: 0032-Documentation-spec_ctrl-Do-some-minor-cleanups.patch |
|
| 110 |
-Patch233: 0033-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch |
|
| 111 |
-Patch234: 0034-x86-bugs-Make-cpu_show_common-static.patch |
|
| 112 |
-Patch235: 0035-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch |
|
| 113 |
-Patch236: 0036-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch |
|
| 114 |
-Patch237: 0037-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch |
|
| 115 |
-Patch238: 0038-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch |
|
| 116 |
-Patch239: 0039-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch |
|
| 117 |
-Patch240: 0040-x86-cpufeatures-Disentangle-SSBD-enumeration.patch |
|
| 118 |
-Patch241: 0041-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch |
|
| 119 |
-Patch242: 0042-x86-cpufeatures-Add-FEATURE_ZEN.patch |
|
| 120 |
-Patch243: 0043-x86-speculation-Handle-HT-correctly-on-AMD.patch |
|
| 121 |
-Patch244: 0044-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch |
|
| 122 |
-Patch245: 0045-x86-speculation-Add-virtualized-speculative-store-by.patch |
|
| 123 |
-Patch246: 0046-x86-speculation-Rework-speculative_store_bypass_upda.patch |
|
| 124 |
-Patch247: 0047-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch |
|
| 125 |
-Patch248: 0048-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch |
|
| 126 |
-Patch249: 0049-x86-bugs-Remove-x86_spec_ctrl_set.patch |
|
| 127 |
-Patch250: 0050-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch |
|
| 128 |
-Patch251: 0051-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch |
|
| 129 |
-Patch252: 0052-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch |
|
| 130 |
-Patch253: 0053-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch |
|
| 131 | 77 |
|
| 132 | 78 |
BuildRequires: bc |
| 133 | 79 |
BuildRequires: kbd |
| ... | ... |
@@ -198,7 +142,6 @@ The Linux package contains the Linux kernel doc files |
| 198 | 198 |
%patch35 -p1 |
| 199 | 199 |
%patch37 -p1 |
| 200 | 200 |
%patch38 -p1 |
| 201 |
-%patch39 -p1 |
|
| 202 | 201 |
%patch40 -p1 |
| 203 | 202 |
%patch41 -p1 |
| 204 | 203 |
|
| ... | ... |
@@ -217,59 +160,6 @@ The Linux package contains the Linux kernel doc files |
| 217 | 217 |
%patch65 -p1 |
| 218 | 218 |
%patch66 -p1 |
| 219 | 219 |
|
| 220 |
-%patch201 -p1 |
|
| 221 |
-%patch202 -p1 |
|
| 222 |
-%patch203 -p1 |
|
| 223 |
-%patch204 -p1 |
|
| 224 |
-%patch205 -p1 |
|
| 225 |
-%patch206 -p1 |
|
| 226 |
-%patch207 -p1 |
|
| 227 |
-%patch208 -p1 |
|
| 228 |
-%patch209 -p1 |
|
| 229 |
-%patch210 -p1 |
|
| 230 |
-%patch211 -p1 |
|
| 231 |
-%patch212 -p1 |
|
| 232 |
-%patch213 -p1 |
|
| 233 |
-%patch214 -p1 |
|
| 234 |
-%patch215 -p1 |
|
| 235 |
-%patch216 -p1 |
|
| 236 |
-%patch217 -p1 |
|
| 237 |
-%patch218 -p1 |
|
| 238 |
-%patch219 -p1 |
|
| 239 |
-%patch220 -p1 |
|
| 240 |
-%patch221 -p1 |
|
| 241 |
-%patch222 -p1 |
|
| 242 |
-%patch223 -p1 |
|
| 243 |
-%patch224 -p1 |
|
| 244 |
-%patch225 -p1 |
|
| 245 |
-%patch226 -p1 |
|
| 246 |
-%patch227 -p1 |
|
| 247 |
-%patch228 -p1 |
|
| 248 |
-%patch229 -p1 |
|
| 249 |
-%patch230 -p1 |
|
| 250 |
-%patch231 -p1 |
|
| 251 |
-%patch232 -p1 |
|
| 252 |
-%patch233 -p1 |
|
| 253 |
-%patch234 -p1 |
|
| 254 |
-%patch235 -p1 |
|
| 255 |
-%patch236 -p1 |
|
| 256 |
-%patch237 -p1 |
|
| 257 |
-%patch238 -p1 |
|
| 258 |
-%patch239 -p1 |
|
| 259 |
-%patch240 -p1 |
|
| 260 |
-%patch241 -p1 |
|
| 261 |
-%patch242 -p1 |
|
| 262 |
-%patch243 -p1 |
|
| 263 |
-%patch244 -p1 |
|
| 264 |
-%patch245 -p1 |
|
| 265 |
-%patch246 -p1 |
|
| 266 |
-%patch247 -p1 |
|
| 267 |
-%patch248 -p1 |
|
| 268 |
-%patch249 -p1 |
|
| 269 |
-%patch250 -p1 |
|
| 270 |
-%patch251 -p1 |
|
| 271 |
-%patch252 -p1 |
|
| 272 |
-%patch253 -p1 |
|
| 273 | 220 |
|
| 274 | 221 |
%build |
| 275 | 222 |
# patch vmw_balloon driver |
| ... | ... |
@@ -366,6 +256,8 @@ ln -sf linux-%{uname_r}.cfg /boot/photon.cfg
|
| 366 | 366 |
/usr/src/linux-headers-%{uname_r}
|
| 367 | 367 |
|
| 368 | 368 |
%changelog |
| 369 |
+* Thu Jun 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.109-1 |
|
| 370 |
+- Update to version 4.9.109 |
|
| 369 | 371 |
* Mon May 21 2018 Alexey Makhalov <amakhalov@vmware.com> 4.9.101-2 |
| 370 | 372 |
- Add the f*xattrat family of syscalls. |
| 371 | 373 |
* Mon May 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.101-1 |
| ... | ... |
@@ -1,15 +1,15 @@ |
| 1 | 1 |
%global security_hardening none |
| 2 | 2 |
Summary: Kernel |
| 3 | 3 |
Name: linux-secure |
| 4 |
-Version: 4.9.101 |
|
| 5 |
-Release: 2%{?kat_build:.%kat_build}%{?dist}
|
|
| 4 |
+Version: 4.9.109 |
|
| 5 |
+Release: 1%{?kat_build:.%kat_build}%{?dist}
|
|
| 6 | 6 |
License: GPLv2 |
| 7 | 7 |
URL: http://www.kernel.org/ |
| 8 | 8 |
Group: System Environment/Kernel |
| 9 | 9 |
Vendor: VMware, Inc. |
| 10 | 10 |
Distribution: Photon |
| 11 | 11 |
Source0: http://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz
|
| 12 |
-%define sha1 linux=12b399649df63355823d482fd91711b1be3e7f1b |
|
| 12 |
+%define sha1 linux=9d3af34a31661b2c7f6bea5682deb131c406f3d6 |
|
| 13 | 13 |
Source1: config-secure |
| 14 | 14 |
Source2: aufs4.9.tar.gz |
| 15 | 15 |
%define sha1 aufs=ebe716ce4b638a3772c7cd3161abbfe11d584906 |
| ... | ... |
@@ -60,8 +60,6 @@ Patch37: 0002-bnx2x-disable-GSO-where-gso_size-is-too-big-for-hard.patch |
| 60 | 60 |
Patch39: 0001-ocfs2-subsystem.su_mutex-is-required-while-accessing.patch |
| 61 | 61 |
# Fix for CVE-2018-8043 |
| 62 | 62 |
Patch40: 0001-net-phy-mdio-bcm-unimac-fix-potential-NULL-dereferen.patch |
| 63 |
-# Fix for CVE-2018-8087 |
|
| 64 |
-Patch41: 0001-mac80211_hwsim-fix-possible-memory-leak-in-hwsim_new.patch |
|
| 65 | 63 |
# Fix for CVE-2017-18241 |
| 66 | 64 |
Patch42: 0001-f2fs-fix-a-panic-caused-by-NULL-flush_cmd_control.patch |
| 67 | 65 |
# Fix for CVE-2017-18224 |
| ... | ... |
@@ -83,60 +81,6 @@ Patch64: 0153-net-mpls-prevent-speculative-execution.patch |
| 83 | 83 |
Patch65: 0154-udf-prevent-speculative-execution.patch |
| 84 | 84 |
Patch66: 0155-userns-prevent-speculative-execution.patch |
| 85 | 85 |
|
| 86 |
-# Fix CVE-2018-3639 (Speculative Store Bypass) |
|
| 87 |
-Patch201: 0001-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch |
|
| 88 |
-Patch202: 0002-x86-nospec-Simplify-alternative_msr_write.patch |
|
| 89 |
-Patch203: 0003-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch |
|
| 90 |
-Patch204: 0004-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch |
|
| 91 |
-Patch205: 0005-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch |
|
| 92 |
-Patch206: 0006-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch |
|
| 93 |
-Patch207: 0007-x86-bugs-Expose-sys-.-spec_store_bypass.patch |
|
| 94 |
-Patch208: 0008-x86-cpufeatures-Add-X86_FEATURE_RDS.patch |
|
| 95 |
-Patch209: 0009-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch |
|
| 96 |
-Patch210: 0010-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch |
|
| 97 |
-Patch211: 0011-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch |
|
| 98 |
-Patch212: 0012-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch |
|
| 99 |
-Patch213: 0013-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch |
|
| 100 |
-Patch214: 0014-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch |
|
| 101 |
-Patch215: 0015-prctl-Add-speculation-control-prctls.patch |
|
| 102 |
-Patch216: 0016-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch |
|
| 103 |
-Patch217: 0017-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch |
|
| 104 |
-Patch218: 0018-x86-process-Optimize-TIF_NOTSC-switch.patch |
|
| 105 |
-Patch219: 0019-x86-process-Allow-runtime-control-of-Speculative-Sto.patch |
|
| 106 |
-Patch220: 0020-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch |
|
| 107 |
-Patch221: 0021-nospec-Allow-getting-setting-on-non-current-task.patch |
|
| 108 |
-Patch222: 0022-proc-Provide-details-on-speculation-flaw-mitigations.patch |
|
| 109 |
-Patch223: 0023-seccomp-Enable-speculation-flaw-mitigations.patch |
|
| 110 |
-Patch224: 0024-x86-bugs-Make-boot-modes-__ro_after_init.patch |
|
| 111 |
-Patch225: 0025-prctl-Add-force-disable-speculation.patch |
|
| 112 |
-Patch226: 0026-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch |
|
| 113 |
-Patch227: 0027-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch |
|
| 114 |
-Patch228: 0028-seccomp-Move-speculation-migitation-control-to-arch-.patch |
|
| 115 |
-Patch229: 0029-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch |
|
| 116 |
-Patch230: 0030-x86-bugs-Rename-_RDS-to-_SSBD.patch |
|
| 117 |
-Patch231: 0031-proc-Use-underscores-for-SSBD-in-status.patch |
|
| 118 |
-Patch232: 0032-Documentation-spec_ctrl-Do-some-minor-cleanups.patch |
|
| 119 |
-Patch233: 0033-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch |
|
| 120 |
-Patch234: 0034-x86-bugs-Make-cpu_show_common-static.patch |
|
| 121 |
-Patch235: 0035-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch |
|
| 122 |
-Patch236: 0036-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch |
|
| 123 |
-Patch237: 0037-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch |
|
| 124 |
-Patch238: 0038-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch |
|
| 125 |
-Patch239: 0039-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch |
|
| 126 |
-Patch240: 0040-x86-cpufeatures-Disentangle-SSBD-enumeration.patch |
|
| 127 |
-Patch241: 0041-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch |
|
| 128 |
-Patch242: 0042-x86-cpufeatures-Add-FEATURE_ZEN.patch |
|
| 129 |
-Patch243: 0043-x86-speculation-Handle-HT-correctly-on-AMD.patch |
|
| 130 |
-Patch244: 0044-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch |
|
| 131 |
-Patch245: 0045-x86-speculation-Add-virtualized-speculative-store-by.patch |
|
| 132 |
-Patch246: 0046-x86-speculation-Rework-speculative_store_bypass_upda.patch |
|
| 133 |
-Patch247: 0047-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch |
|
| 134 |
-Patch248: 0048-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch |
|
| 135 |
-Patch249: 0049-x86-bugs-Remove-x86_spec_ctrl_set.patch |
|
| 136 |
-Patch250: 0050-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch |
|
| 137 |
-Patch251: 0051-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch |
|
| 138 |
-Patch252: 0052-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch |
|
| 139 |
-Patch253: 0053-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch |
|
| 140 | 86 |
|
| 141 | 87 |
# NSX requirements (should be removed) |
| 142 | 88 |
Patch99: LKCM.patch |
| ... | ... |
@@ -251,7 +195,6 @@ EOF |
| 251 | 251 |
%patch37 -p1 |
| 252 | 252 |
%patch39 -p1 |
| 253 | 253 |
%patch40 -p1 |
| 254 |
-%patch41 -p1 |
|
| 255 | 254 |
%patch42 -p1 |
| 256 | 255 |
%patch43 -p1 |
| 257 | 256 |
|
| ... | ... |
@@ -276,60 +219,6 @@ EOF |
| 276 | 276 |
%patch14 -p1 |
| 277 | 277 |
%patch15 -p1 |
| 278 | 278 |
|
| 279 |
-%patch201 -p1 |
|
| 280 |
-%patch202 -p1 |
|
| 281 |
-%patch203 -p1 |
|
| 282 |
-%patch204 -p1 |
|
| 283 |
-%patch205 -p1 |
|
| 284 |
-%patch206 -p1 |
|
| 285 |
-%patch207 -p1 |
|
| 286 |
-%patch208 -p1 |
|
| 287 |
-%patch209 -p1 |
|
| 288 |
-%patch210 -p1 |
|
| 289 |
-%patch211 -p1 |
|
| 290 |
-%patch212 -p1 |
|
| 291 |
-%patch213 -p1 |
|
| 292 |
-%patch214 -p1 |
|
| 293 |
-%patch215 -p1 |
|
| 294 |
-%patch216 -p1 |
|
| 295 |
-%patch217 -p1 |
|
| 296 |
-%patch218 -p1 |
|
| 297 |
-%patch219 -p1 |
|
| 298 |
-%patch220 -p1 |
|
| 299 |
-%patch221 -p1 |
|
| 300 |
-%patch222 -p1 |
|
| 301 |
-%patch223 -p1 |
|
| 302 |
-%patch224 -p1 |
|
| 303 |
-%patch225 -p1 |
|
| 304 |
-%patch226 -p1 |
|
| 305 |
-%patch227 -p1 |
|
| 306 |
-%patch228 -p1 |
|
| 307 |
-%patch229 -p1 |
|
| 308 |
-%patch230 -p1 |
|
| 309 |
-%patch231 -p1 |
|
| 310 |
-%patch232 -p1 |
|
| 311 |
-%patch233 -p1 |
|
| 312 |
-%patch234 -p1 |
|
| 313 |
-%patch235 -p1 |
|
| 314 |
-%patch236 -p1 |
|
| 315 |
-%patch237 -p1 |
|
| 316 |
-%patch238 -p1 |
|
| 317 |
-%patch239 -p1 |
|
| 318 |
-%patch240 -p1 |
|
| 319 |
-%patch241 -p1 |
|
| 320 |
-%patch242 -p1 |
|
| 321 |
-%patch243 -p1 |
|
| 322 |
-%patch244 -p1 |
|
| 323 |
-%patch245 -p1 |
|
| 324 |
-%patch246 -p1 |
|
| 325 |
-%patch247 -p1 |
|
| 326 |
-%patch248 -p1 |
|
| 327 |
-%patch249 -p1 |
|
| 328 |
-%patch250 -p1 |
|
| 329 |
-%patch251 -p1 |
|
| 330 |
-%patch252 -p1 |
|
| 331 |
-%patch253 -p1 |
|
| 332 |
- |
|
| 333 | 279 |
pushd .. |
| 334 | 280 |
%patch99 -p0 |
| 335 | 281 |
popd |
| ... | ... |
@@ -455,6 +344,8 @@ ln -sf linux-%{uname_r}.cfg /boot/photon.cfg
|
| 455 | 455 |
/usr/src/linux-headers-%{uname_r}
|
| 456 | 456 |
|
| 457 | 457 |
%changelog |
| 458 |
+* Thu Jun 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.109-1 |
|
| 459 |
+- Update to version 4.9.109 |
|
| 458 | 460 |
* Mon May 21 2018 Alexey Makhalov <amakhalov@vmware.com> 4.9.101-2 |
| 459 | 461 |
- Add the f*xattrat family of syscalls. |
| 460 | 462 |
* Mon May 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.101-1 |
| ... | ... |
@@ -1,15 +1,15 @@ |
| 1 | 1 |
%global security_hardening none |
| 2 | 2 |
Summary: Kernel |
| 3 | 3 |
Name: linux |
| 4 |
-Version: 4.9.101 |
|
| 5 |
-Release: 2%{?kat_build:.%kat_build}%{?dist}
|
|
| 4 |
+Version: 4.9.109 |
|
| 5 |
+Release: 1%{?kat_build:.%kat_build}%{?dist}
|
|
| 6 | 6 |
License: GPLv2 |
| 7 | 7 |
URL: http://www.kernel.org/ |
| 8 | 8 |
Group: System Environment/Kernel |
| 9 | 9 |
Vendor: VMware, Inc. |
| 10 | 10 |
Distribution: Photon |
| 11 | 11 |
Source0: http://www.kernel.org/pub/linux/kernel/v4.x/linux-%{version}.tar.xz
|
| 12 |
-%define sha1 linux=12b399649df63355823d482fd91711b1be3e7f1b |
|
| 12 |
+%define sha1 linux=9d3af34a31661b2c7f6bea5682deb131c406f3d6 |
|
| 13 | 13 |
Source1: config |
| 14 | 14 |
Source2: initramfs.trigger |
| 15 | 15 |
%define ena_version 1.1.3 |
| ... | ... |
@@ -58,8 +58,6 @@ Patch35: 0002-bnx2x-disable-GSO-where-gso_size-is-too-big-for-hard.patch |
| 58 | 58 |
Patch37: 0001-ocfs2-subsystem.su_mutex-is-required-while-accessing.patch |
| 59 | 59 |
# Fix for CVE-2018-8043 |
| 60 | 60 |
Patch38: 0001-net-phy-mdio-bcm-unimac-fix-potential-NULL-dereferen.patch |
| 61 |
-# Fix for CVE-2018-8087 |
|
| 62 |
-Patch39: 0001-mac80211_hwsim-fix-possible-memory-leak-in-hwsim_new.patch |
|
| 63 | 61 |
# Fix for CVE-2017-18241 |
| 64 | 62 |
Patch40: 0001-f2fs-fix-a-panic-caused-by-NULL-flush_cmd_control.patch |
| 65 | 63 |
# Fix for CVE-2017-18224 |
| ... | ... |
@@ -81,60 +79,6 @@ Patch64: 0153-net-mpls-prevent-speculative-execution.patch |
| 81 | 81 |
Patch65: 0154-udf-prevent-speculative-execution.patch |
| 82 | 82 |
Patch66: 0155-userns-prevent-speculative-execution.patch |
| 83 | 83 |
|
| 84 |
-# Fix CVE-2018-3639 (Speculative Store Bypass) |
|
| 85 |
-Patch201: 0001-x86-amd-don-t-set-X86_BUG_SYSRET_SS_ATTRS-when-runni.patch |
|
| 86 |
-Patch202: 0002-x86-nospec-Simplify-alternative_msr_write.patch |
|
| 87 |
-Patch203: 0003-x86-bugs-Concentrate-bug-detection-into-a-separate-f.patch |
|
| 88 |
-Patch204: 0004-x86-bugs-Concentrate-bug-reporting-into-a-separate-f.patch |
|
| 89 |
-Patch205: 0005-x86-bugs-Read-SPEC_CTRL-MSR-during-boot-and-re-use-r.patch |
|
| 90 |
-Patch206: 0006-x86-bugs-KVM-Support-the-combination-of-guest-and-ho.patch |
|
| 91 |
-Patch207: 0007-x86-bugs-Expose-sys-.-spec_store_bypass.patch |
|
| 92 |
-Patch208: 0008-x86-cpufeatures-Add-X86_FEATURE_RDS.patch |
|
| 93 |
-Patch209: 0009-x86-bugs-Provide-boot-parameters-for-the-spec_store_.patch |
|
| 94 |
-Patch210: 0010-x86-bugs-intel-Set-proper-CPU-features-and-setup-RDS.patch |
|
| 95 |
-Patch211: 0011-x86-bugs-Whitelist-allowed-SPEC_CTRL-MSR-values.patch |
|
| 96 |
-Patch212: 0012-x86-bugs-AMD-Add-support-to-disable-RDS-on-Fam-15-16.patch |
|
| 97 |
-Patch213: 0013-x86-KVM-VMX-Expose-SPEC_CTRL-Bit-2-to-the-guest.patch |
|
| 98 |
-Patch214: 0014-x86-speculation-Create-spec-ctrl.h-to-avoid-include-.patch |
|
| 99 |
-Patch215: 0015-prctl-Add-speculation-control-prctls.patch |
|
| 100 |
-Patch216: 0016-x86-process-Optimize-TIF-checks-in-__switch_to_xtra.patch |
|
| 101 |
-Patch217: 0017-x86-process-Correct-and-optimize-TIF_BLOCKSTEP-switc.patch |
|
| 102 |
-Patch218: 0018-x86-process-Optimize-TIF_NOTSC-switch.patch |
|
| 103 |
-Patch219: 0019-x86-process-Allow-runtime-control-of-Speculative-Sto.patch |
|
| 104 |
-Patch220: 0020-x86-speculation-Add-prctl-for-Speculative-Store-Bypa.patch |
|
| 105 |
-Patch221: 0021-nospec-Allow-getting-setting-on-non-current-task.patch |
|
| 106 |
-Patch222: 0022-proc-Provide-details-on-speculation-flaw-mitigations.patch |
|
| 107 |
-Patch223: 0023-seccomp-Enable-speculation-flaw-mitigations.patch |
|
| 108 |
-Patch224: 0024-x86-bugs-Make-boot-modes-__ro_after_init.patch |
|
| 109 |
-Patch225: 0025-prctl-Add-force-disable-speculation.patch |
|
| 110 |
-Patch226: 0026-seccomp-Use-PR_SPEC_FORCE_DISABLE.patch |
|
| 111 |
-Patch227: 0027-seccomp-Add-filter-flag-to-opt-out-of-SSB-mitigation.patch |
|
| 112 |
-Patch228: 0028-seccomp-Move-speculation-migitation-control-to-arch-.patch |
|
| 113 |
-Patch229: 0029-x86-speculation-Make-seccomp-the-default-mode-for-Sp.patch |
|
| 114 |
-Patch230: 0030-x86-bugs-Rename-_RDS-to-_SSBD.patch |
|
| 115 |
-Patch231: 0031-proc-Use-underscores-for-SSBD-in-status.patch |
|
| 116 |
-Patch232: 0032-Documentation-spec_ctrl-Do-some-minor-cleanups.patch |
|
| 117 |
-Patch233: 0033-x86-bugs-Fix-__ssb_select_mitigation-return-type.patch |
|
| 118 |
-Patch234: 0034-x86-bugs-Make-cpu_show_common-static.patch |
|
| 119 |
-Patch235: 0035-x86-bugs-Fix-the-parameters-alignment-and-missing-vo.patch |
|
| 120 |
-Patch236: 0036-x86-cpu-Make-alternative_msr_write-work-for-32-bit-c.patch |
|
| 121 |
-Patch237: 0037-KVM-SVM-Move-spec-control-call-after-restore-of-GS.patch |
|
| 122 |
-Patch238: 0038-x86-speculation-Use-synthetic-bits-for-IBRS-IBPB-STI.patch |
|
| 123 |
-Patch239: 0039-x86-cpufeatures-Disentangle-MSR_SPEC_CTRL-enumeratio.patch |
|
| 124 |
-Patch240: 0040-x86-cpufeatures-Disentangle-SSBD-enumeration.patch |
|
| 125 |
-Patch241: 0041-x86-cpu-AMD-Fix-erratum-1076-CPB-bit.patch |
|
| 126 |
-Patch242: 0042-x86-cpufeatures-Add-FEATURE_ZEN.patch |
|
| 127 |
-Patch243: 0043-x86-speculation-Handle-HT-correctly-on-AMD.patch |
|
| 128 |
-Patch244: 0044-x86-bugs-KVM-Extend-speculation-control-for-VIRT_SPE.patch |
|
| 129 |
-Patch245: 0045-x86-speculation-Add-virtualized-speculative-store-by.patch |
|
| 130 |
-Patch246: 0046-x86-speculation-Rework-speculative_store_bypass_upda.patch |
|
| 131 |
-Patch247: 0047-x86-bugs-Unify-x86_spec_ctrl_-set_guest-restore_host.patch |
|
| 132 |
-Patch248: 0048-x86-bugs-Expose-x86_spec_ctrl_base-directly.patch |
|
| 133 |
-Patch249: 0049-x86-bugs-Remove-x86_spec_ctrl_set.patch |
|
| 134 |
-Patch250: 0050-x86-bugs-Rework-spec_ctrl-base-and-mask-logic.patch |
|
| 135 |
-Patch251: 0051-x86-speculation-KVM-Implement-support-for-VIRT_SPEC_.patch |
|
| 136 |
-Patch252: 0052-KVM-SVM-Implement-VIRT_SPEC_CTRL-support-for-SSBD.patch |
|
| 137 |
-Patch253: 0053-x86-bugs-Rename-SSBD_NO-to-SSB_NO.patch |
|
| 138 | 84 |
|
| 139 | 85 |
%if 0%{?kat_build:1}
|
| 140 | 86 |
Patch1000: %{kat_build}.patch
|
| ... | ... |
@@ -242,7 +186,6 @@ This package contains the 'perf' performance analysis tools for Linux kernel. |
| 242 | 242 |
%patch35 -p1 |
| 243 | 243 |
%patch37 -p1 |
| 244 | 244 |
%patch38 -p1 |
| 245 |
-%patch39 -p1 |
|
| 246 | 245 |
%patch40 -p1 |
| 247 | 246 |
%patch41 -p1 |
| 248 | 247 |
|
| ... | ... |
@@ -261,59 +204,6 @@ This package contains the 'perf' performance analysis tools for Linux kernel. |
| 261 | 261 |
%patch65 -p1 |
| 262 | 262 |
%patch66 -p1 |
| 263 | 263 |
|
| 264 |
-%patch201 -p1 |
|
| 265 |
-%patch202 -p1 |
|
| 266 |
-%patch203 -p1 |
|
| 267 |
-%patch204 -p1 |
|
| 268 |
-%patch205 -p1 |
|
| 269 |
-%patch206 -p1 |
|
| 270 |
-%patch207 -p1 |
|
| 271 |
-%patch208 -p1 |
|
| 272 |
-%patch209 -p1 |
|
| 273 |
-%patch210 -p1 |
|
| 274 |
-%patch211 -p1 |
|
| 275 |
-%patch212 -p1 |
|
| 276 |
-%patch213 -p1 |
|
| 277 |
-%patch214 -p1 |
|
| 278 |
-%patch215 -p1 |
|
| 279 |
-%patch216 -p1 |
|
| 280 |
-%patch217 -p1 |
|
| 281 |
-%patch218 -p1 |
|
| 282 |
-%patch219 -p1 |
|
| 283 |
-%patch220 -p1 |
|
| 284 |
-%patch221 -p1 |
|
| 285 |
-%patch222 -p1 |
|
| 286 |
-%patch223 -p1 |
|
| 287 |
-%patch224 -p1 |
|
| 288 |
-%patch225 -p1 |
|
| 289 |
-%patch226 -p1 |
|
| 290 |
-%patch227 -p1 |
|
| 291 |
-%patch228 -p1 |
|
| 292 |
-%patch229 -p1 |
|
| 293 |
-%patch230 -p1 |
|
| 294 |
-%patch231 -p1 |
|
| 295 |
-%patch232 -p1 |
|
| 296 |
-%patch233 -p1 |
|
| 297 |
-%patch234 -p1 |
|
| 298 |
-%patch235 -p1 |
|
| 299 |
-%patch236 -p1 |
|
| 300 |
-%patch237 -p1 |
|
| 301 |
-%patch238 -p1 |
|
| 302 |
-%patch239 -p1 |
|
| 303 |
-%patch240 -p1 |
|
| 304 |
-%patch241 -p1 |
|
| 305 |
-%patch242 -p1 |
|
| 306 |
-%patch243 -p1 |
|
| 307 |
-%patch244 -p1 |
|
| 308 |
-%patch245 -p1 |
|
| 309 |
-%patch246 -p1 |
|
| 310 |
-%patch247 -p1 |
|
| 311 |
-%patch248 -p1 |
|
| 312 |
-%patch249 -p1 |
|
| 313 |
-%patch250 -p1 |
|
| 314 |
-%patch251 -p1 |
|
| 315 |
-%patch252 -p1 |
|
| 316 |
-%patch253 -p1 |
|
| 317 | 264 |
|
| 318 | 265 |
%if 0%{?kat_build:1}
|
| 319 | 266 |
%patch1000 -p1 |
| ... | ... |
@@ -481,6 +371,8 @@ ln -sf %{name}-%{uname_r}.cfg /boot/photon.cfg
|
| 481 | 481 |
/usr/share/doc/* |
| 482 | 482 |
|
| 483 | 483 |
%changelog |
| 484 |
+* Thu Jun 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.109-1 |
|
| 485 |
+- Update to version 4.9.109 |
|
| 484 | 486 |
* Mon May 21 2018 Alexey Makhalov <amakhalov@vmware.com> 4.9.101-2 |
| 485 | 487 |
- Add the f*xattrat family of syscalls. |
| 486 | 488 |
* Mon May 21 2018 Srivatsa S. Bhat <srivatsa@csail.mit.edu> 4.9.101-1 |
| 487 | 489 |
deleted file mode 100644 |
| ... | ... |
@@ -1,66 +0,0 @@ |
| 1 |
-From 955fb59065c8d4dd1efe70a6d3233f7f2aa925f8 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: David Woodhouse <dwmw@amazon.co.uk> |
|
| 3 |
-Date: Sun, 20 May 2018 20:51:10 +0100 |
|
| 4 |
-Subject: [PATCH] x86/amd: don't set X86_BUG_SYSRET_SS_ATTRS when running under |
|
| 5 |
- Xen |
|
| 6 |
- |
|
| 7 |
-commit def9331a12977770cc6132d79f8e6565871e8e38 upstream |
|
| 8 |
- |
|
| 9 |
-When running as Xen pv guest X86_BUG_SYSRET_SS_ATTRS must not be set |
|
| 10 |
-on AMD cpus. |
|
| 11 |
- |
|
| 12 |
-This bug/feature bit is kind of special as it will be used very early |
|
| 13 |
-when switching threads. Setting the bit and clearing it a little bit |
|
| 14 |
-later leaves a critical window where things can go wrong. This time |
|
| 15 |
-window has enlarged a little bit by using setup_clear_cpu_cap() instead |
|
| 16 |
-of the hypervisor's set_cpu_features callback. It seems this larger |
|
| 17 |
-window now makes it rather easy to hit the problem. |
|
| 18 |
- |
|
| 19 |
-The proper solution is to never set the bit in case of Xen. |
|
| 20 |
- |
|
| 21 |
-Signed-off-by: Juergen Gross <jgross@suse.com> |
|
| 22 |
-Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> |
|
| 23 |
-Acked-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 24 |
-Signed-off-by: Juergen Gross <jgross@suse.com> |
|
| 25 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 26 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 27 |
- arch/x86/kernel/cpu/amd.c | 5 +++-- |
|
| 28 |
- arch/x86/xen/enlighten.c | 4 +--- |
|
| 29 |
- 2 files changed, 4 insertions(+), 5 deletions(-) |
|
| 30 |
- |
|
| 31 |
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
|
| 32 |
-index c375bc6..747f8a2 100644 |
|
| 33 |
-+++ b/arch/x86/kernel/cpu/amd.c |
|
| 34 |
-@@ -824,8 +824,9 @@ static void init_amd(struct cpuinfo_x86 *c) |
|
| 35 |
- if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) |
|
| 36 |
- set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); |
|
| 37 |
- |
|
| 38 |
-- /* AMD CPUs don't reset SS attributes on SYSRET */ |
|
| 39 |
-- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); |
|
| 40 |
-+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ |
|
| 41 |
-+ if (!cpu_has(c, X86_FEATURE_XENPV)) |
|
| 42 |
-+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); |
|
| 43 |
- } |
|
| 44 |
- |
|
| 45 |
- #ifdef CONFIG_X86_32 |
|
| 46 |
-diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c |
|
| 47 |
-index 2bea87c..081437b 100644 |
|
| 48 |
-+++ b/arch/x86/xen/enlighten.c |
|
| 49 |
-@@ -1977,10 +1977,8 @@ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); |
|
| 50 |
- |
|
| 51 |
- static void xen_set_cpu_features(struct cpuinfo_x86 *c) |
|
| 52 |
- {
|
|
| 53 |
-- if (xen_pv_domain()) {
|
|
| 54 |
-- clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); |
|
| 55 |
-+ if (xen_pv_domain()) |
|
| 56 |
- set_cpu_cap(c, X86_FEATURE_XENPV); |
|
| 57 |
-- } |
|
| 58 |
- } |
|
| 59 |
- |
|
| 60 |
- static void xen_pin_vcpu(int cpu) |
|
| 61 |
-2.7.4 |
|
| 62 |
- |
| 63 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,71 +0,0 @@ |
| 1 |
-From 50611b175f16c043d2a277faef4dd1a73050bba1 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Linus Torvalds <torvalds@linux-foundation.org> |
|
| 3 |
-Date: Tue, 1 May 2018 15:55:51 +0200 |
|
| 4 |
-Subject: [PATCH 02/54] x86/nospec: Simplify alternative_msr_write() |
|
| 5 |
- |
|
| 6 |
-commit 1aa7a5735a41418d8e01fa7c9565eb2657e2ea3f upstream |
|
| 7 |
- |
|
| 8 |
-The macro is not type safe and I did look for why that "g" constraint for |
|
| 9 |
-the asm doesn't work: it's because the asm is more fundamentally wrong. |
|
| 10 |
- |
|
| 11 |
-It does |
|
| 12 |
- |
|
| 13 |
- movl %[val], %%eax |
|
| 14 |
- |
|
| 15 |
-but "val" isn't a 32-bit value, so then gcc will pass it in a register, |
|
| 16 |
-and generate code like |
|
| 17 |
- |
|
| 18 |
- movl %rsi, %eax |
|
| 19 |
- |
|
| 20 |
-and gas will complain about a nonsensical 'mov' instruction (it's moving a |
|
| 21 |
-64-bit register to a 32-bit one). |
|
| 22 |
- |
|
| 23 |
-Passing it through memory will just hide the real bug - gcc still thinks |
|
| 24 |
-the memory location is 64-bit, but the "movl" will only load the first 32 |
|
| 25 |
-bits and it all happens to work because x86 is little-endian. |
|
| 26 |
- |
|
| 27 |
-Convert it to a type safe inline function with a little trick which hands |
|
| 28 |
-the feature into the ALTERNATIVE macro. |
|
| 29 |
- |
|
| 30 |
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
|
| 31 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 32 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 33 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 34 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 35 |
- arch/x86/include/asm/nospec-branch.h | 19 ++++++++++--------- |
|
| 36 |
- 1 file changed, 10 insertions(+), 9 deletions(-) |
|
| 37 |
- |
|
| 38 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 39 |
-index f928ad9..870acfc 100644 |
|
| 40 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 41 |
-@@ -241,15 +241,16 @@ static inline void vmexit_fill_RSB(void) |
|
| 42 |
- #endif |
|
| 43 |
- } |
|
| 44 |
- |
|
| 45 |
--#define alternative_msr_write(_msr, _val, _feature) \ |
|
| 46 |
-- asm volatile(ALTERNATIVE("", \
|
|
| 47 |
-- "movl %[msr], %%ecx\n\t" \ |
|
| 48 |
-- "movl %[val], %%eax\n\t" \ |
|
| 49 |
-- "movl $0, %%edx\n\t" \ |
|
| 50 |
-- "wrmsr", \ |
|
| 51 |
-- _feature) \ |
|
| 52 |
-- : : [msr] "i" (_msr), [val] "i" (_val) \ |
|
| 53 |
-- : "eax", "ecx", "edx", "memory") |
|
| 54 |
-+static __always_inline |
|
| 55 |
-+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) |
|
| 56 |
-+{
|
|
| 57 |
-+ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
|
| 58 |
-+ : : "c" (msr), |
|
| 59 |
-+ "a" (val), |
|
| 60 |
-+ "d" (val >> 32), |
|
| 61 |
-+ [feature] "i" (feature) |
|
| 62 |
-+ : "memory"); |
|
| 63 |
-+} |
|
| 64 |
- |
|
| 65 |
- static inline void indirect_branch_prediction_barrier(void) |
|
| 66 |
- {
|
|
| 67 |
-2.7.4 |
|
| 68 |
- |
| 69 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,75 +0,0 @@ |
| 1 |
-From c0fe8ba000900104b0e1d75792cc3b649e2ca5c8 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:16 -0400 |
|
| 4 |
-Subject: [PATCH 03/54] x86/bugs: Concentrate bug detection into a separate |
|
| 5 |
- function |
|
| 6 |
- |
|
| 7 |
-commit 4a28bfe3267b68e22c663ac26185aa16c9b879ef upstream |
|
| 8 |
- |
|
| 9 |
-Combine the various logic which goes through all those |
|
| 10 |
-x86_cpu_id matching structures in one function. |
|
| 11 |
- |
|
| 12 |
-Suggested-by: Borislav Petkov <bp@suse.de> |
|
| 13 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 14 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 15 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 16 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 17 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 18 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 19 |
- arch/x86/kernel/cpu/common.c | 21 +++++++++++---------- |
|
| 20 |
- 1 file changed, 11 insertions(+), 10 deletions(-) |
|
| 21 |
- |
|
| 22 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 23 |
-index 301bbd1..357c589 100644 |
|
| 24 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 25 |
-@@ -879,21 +879,27 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
|
| 26 |
- {}
|
|
| 27 |
- }; |
|
| 28 |
- |
|
| 29 |
--static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c) |
|
| 30 |
-+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
|
| 31 |
- {
|
|
| 32 |
- u64 ia32_cap = 0; |
|
| 33 |
- |
|
| 34 |
-+ if (x86_match_cpu(cpu_no_speculation)) |
|
| 35 |
-+ return; |
|
| 36 |
-+ |
|
| 37 |
-+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1); |
|
| 38 |
-+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2); |
|
| 39 |
-+ |
|
| 40 |
- if (x86_match_cpu(cpu_no_meltdown)) |
|
| 41 |
-- return false; |
|
| 42 |
-+ return; |
|
| 43 |
- |
|
| 44 |
- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) |
|
| 45 |
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
|
| 46 |
- |
|
| 47 |
- /* Rogue Data Cache Load? No! */ |
|
| 48 |
- if (ia32_cap & ARCH_CAP_RDCL_NO) |
|
| 49 |
-- return false; |
|
| 50 |
-+ return; |
|
| 51 |
- |
|
| 52 |
-- return true; |
|
| 53 |
-+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); |
|
| 54 |
- } |
|
| 55 |
- |
|
| 56 |
- /* |
|
| 57 |
-@@ -942,12 +948,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
|
| 58 |
- |
|
| 59 |
- setup_force_cpu_cap(X86_FEATURE_ALWAYS); |
|
| 60 |
- |
|
| 61 |
-- if (!x86_match_cpu(cpu_no_speculation)) {
|
|
| 62 |
-- if (cpu_vulnerable_to_meltdown(c)) |
|
| 63 |
-- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); |
|
| 64 |
-- setup_force_cpu_bug(X86_BUG_SPECTRE_V1); |
|
| 65 |
-- setup_force_cpu_bug(X86_BUG_SPECTRE_V2); |
|
| 66 |
-- } |
|
| 67 |
-+ cpu_set_bug_bits(c); |
|
| 68 |
- |
|
| 69 |
- fpu__init_system(c); |
|
| 70 |
- |
|
| 71 |
-2.7.4 |
|
| 72 |
- |
| 73 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,92 +0,0 @@ |
| 1 |
-From ff78adefff46730763280872f6c78e9599b58eaf Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:17 -0400 |
|
| 4 |
-Subject: [PATCH 04/54] x86/bugs: Concentrate bug reporting into a separate |
|
| 5 |
- function |
|
| 6 |
- |
|
| 7 |
-commit d1059518b4789cabe34bb4b714d07e6089c82ca1 upstream |
|
| 8 |
- |
|
| 9 |
-Those SysFS functions have a similar preamble, as such make common |
|
| 10 |
-code to handle them. |
|
| 11 |
- |
|
| 12 |
-Suggested-by: Borislav Petkov <bp@suse.de> |
|
| 13 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 14 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 15 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 16 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 17 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 18 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 19 |
- arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++++++-------------- |
|
| 20 |
- 1 file changed, 32 insertions(+), 14 deletions(-) |
|
| 21 |
- |
|
| 22 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 23 |
-index b8b0b6e..4d9c5fe 100644 |
|
| 24 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 25 |
-@@ -313,30 +313,48 @@ static void __init spectre_v2_select_mitigation(void) |
|
| 26 |
- #undef pr_fmt |
|
| 27 |
- |
|
| 28 |
- #ifdef CONFIG_SYSFS |
|
| 29 |
--ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
|
| 30 |
-+ |
|
| 31 |
-+ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
|
| 32 |
-+ char *buf, unsigned int bug) |
|
| 33 |
- {
|
|
| 34 |
-- if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
|
| 35 |
-+ if (!boot_cpu_has_bug(bug)) |
|
| 36 |
- return sprintf(buf, "Not affected\n"); |
|
| 37 |
-- if (boot_cpu_has(X86_FEATURE_KAISER)) |
|
| 38 |
-- return sprintf(buf, "Mitigation: PTI\n"); |
|
| 39 |
-+ |
|
| 40 |
-+ switch (bug) {
|
|
| 41 |
-+ case X86_BUG_CPU_MELTDOWN: |
|
| 42 |
-+ if (boot_cpu_has(X86_FEATURE_KAISER)) |
|
| 43 |
-+ return sprintf(buf, "Mitigation: PTI\n"); |
|
| 44 |
-+ |
|
| 45 |
-+ break; |
|
| 46 |
-+ |
|
| 47 |
-+ case X86_BUG_SPECTRE_V1: |
|
| 48 |
-+ return sprintf(buf, "Mitigation: __user pointer sanitization\n"); |
|
| 49 |
-+ |
|
| 50 |
-+ case X86_BUG_SPECTRE_V2: |
|
| 51 |
-+ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], |
|
| 52 |
-+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", |
|
| 53 |
-+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", |
|
| 54 |
-+ spectre_v2_module_string()); |
|
| 55 |
-+ |
|
| 56 |
-+ default: |
|
| 57 |
-+ break; |
|
| 58 |
-+ } |
|
| 59 |
-+ |
|
| 60 |
- return sprintf(buf, "Vulnerable\n"); |
|
| 61 |
- } |
|
| 62 |
- |
|
| 63 |
-+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
|
| 64 |
-+{
|
|
| 65 |
-+ return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); |
|
| 66 |
-+} |
|
| 67 |
-+ |
|
| 68 |
- ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) |
|
| 69 |
- {
|
|
| 70 |
-- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) |
|
| 71 |
-- return sprintf(buf, "Not affected\n"); |
|
| 72 |
-- return sprintf(buf, "Mitigation: __user pointer sanitization\n"); |
|
| 73 |
-+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); |
|
| 74 |
- } |
|
| 75 |
- |
|
| 76 |
- ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) |
|
| 77 |
- {
|
|
| 78 |
-- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) |
|
| 79 |
-- return sprintf(buf, "Not affected\n"); |
|
| 80 |
-- |
|
| 81 |
-- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], |
|
| 82 |
-- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", |
|
| 83 |
-- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", |
|
| 84 |
-- spectre_v2_module_string()); |
|
| 85 |
-+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); |
|
| 86 |
- } |
|
| 87 |
- #endif |
|
| 88 |
-2.7.4 |
|
| 89 |
- |
| 90 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,143 +0,0 @@ |
| 1 |
-From 84f69ae87c13c1ab6f0bdf945f5e3710a37bcb6d Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:18 -0400 |
|
| 4 |
-Subject: [PATCH 05/54] x86/bugs: Read SPEC_CTRL MSR during boot and re-use |
|
| 5 |
- reserved bits |
|
| 6 |
- |
|
| 7 |
-commit 1b86883ccb8d5d9506529d42dbe1a5257cb30b18 upstream |
|
| 8 |
- |
|
| 9 |
-The 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to all |
|
| 10 |
-the other bits as reserved. The Intel SDM glossary defines reserved as |
|
| 11 |
-implementation specific - aka unknown. |
|
| 12 |
- |
|
| 13 |
-As such at bootup this must be taken it into account and proper masking for |
|
| 14 |
-the bits in use applied. |
|
| 15 |
- |
|
| 16 |
-A copy of this document is available at |
|
| 17 |
-https://bugzilla.kernel.org/show_bug.cgi?id=199511 |
|
| 18 |
- |
|
| 19 |
-[ tglx: Made x86_spec_ctrl_base __ro_after_init ] |
|
| 20 |
- |
|
| 21 |
-Suggested-by: Jon Masters <jcm@redhat.com> |
|
| 22 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 23 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 24 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 25 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 26 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 27 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 28 |
- arch/x86/include/asm/nospec-branch.h | 24 ++++++++++++++++++++---- |
|
| 29 |
- arch/x86/kernel/cpu/bugs.c | 28 ++++++++++++++++++++++++++++ |
|
| 30 |
- 2 files changed, 48 insertions(+), 4 deletions(-) |
|
| 31 |
- |
|
| 32 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 33 |
-index 870acfc..9ec3d4d 100644 |
|
| 34 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 35 |
-@@ -217,6 +217,17 @@ enum spectre_v2_mitigation {
|
|
| 36 |
- SPECTRE_V2_IBRS, |
|
| 37 |
- }; |
|
| 38 |
- |
|
| 39 |
-+/* |
|
| 40 |
-+ * The Intel specification for the SPEC_CTRL MSR requires that we |
|
| 41 |
-+ * preserve any already set reserved bits at boot time (e.g. for |
|
| 42 |
-+ * future additions that this kernel is not currently aware of). |
|
| 43 |
-+ * We then set any additional mitigation bits that we want |
|
| 44 |
-+ * ourselves and always use this as the base for SPEC_CTRL. |
|
| 45 |
-+ * We also use this when handling guest entry/exit as below. |
|
| 46 |
-+ */ |
|
| 47 |
-+extern void x86_spec_ctrl_set(u64); |
|
| 48 |
-+extern u64 x86_spec_ctrl_get_default(void); |
|
| 49 |
-+ |
|
| 50 |
- extern char __indirect_thunk_start[]; |
|
| 51 |
- extern char __indirect_thunk_end[]; |
|
| 52 |
- |
|
| 53 |
-@@ -254,8 +265,9 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) |
|
| 54 |
- |
|
| 55 |
- static inline void indirect_branch_prediction_barrier(void) |
|
| 56 |
- {
|
|
| 57 |
-- alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, |
|
| 58 |
-- X86_FEATURE_USE_IBPB); |
|
| 59 |
-+ u64 val = PRED_CMD_IBPB; |
|
| 60 |
-+ |
|
| 61 |
-+ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); |
|
| 62 |
- } |
|
| 63 |
- |
|
| 64 |
- /* |
|
| 65 |
-@@ -266,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void) |
|
| 66 |
- */ |
|
| 67 |
- #define firmware_restrict_branch_speculation_start() \ |
|
| 68 |
- do { \
|
|
| 69 |
-+ u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \ |
|
| 70 |
-+ \ |
|
| 71 |
- preempt_disable(); \ |
|
| 72 |
-- alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ |
|
| 73 |
-+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ |
|
| 74 |
- X86_FEATURE_USE_IBRS_FW); \ |
|
| 75 |
- } while (0) |
|
| 76 |
- |
|
| 77 |
- #define firmware_restrict_branch_speculation_end() \ |
|
| 78 |
- do { \
|
|
| 79 |
-- alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ |
|
| 80 |
-+ u64 val = x86_spec_ctrl_get_default(); \ |
|
| 81 |
-+ \ |
|
| 82 |
-+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ |
|
| 83 |
- X86_FEATURE_USE_IBRS_FW); \ |
|
| 84 |
- preempt_enable(); \ |
|
| 85 |
- } while (0) |
|
| 86 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 87 |
-index 4d9c5fe..6ff972a 100644 |
|
| 88 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 89 |
-@@ -27,6 +27,12 @@ |
|
| 90 |
- |
|
| 91 |
- static void __init spectre_v2_select_mitigation(void); |
|
| 92 |
- |
|
| 93 |
-+/* |
|
| 94 |
-+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any |
|
| 95 |
-+ * writes to SPEC_CTRL contain whatever reserved bits have been set. |
|
| 96 |
-+ */ |
|
| 97 |
-+static u64 __ro_after_init x86_spec_ctrl_base; |
|
| 98 |
-+ |
|
| 99 |
- void __init check_bugs(void) |
|
| 100 |
- {
|
|
| 101 |
- identify_boot_cpu(); |
|
| 102 |
-@@ -36,6 +42,13 @@ void __init check_bugs(void) |
|
| 103 |
- print_cpu_info(&boot_cpu_data); |
|
| 104 |
- } |
|
| 105 |
- |
|
| 106 |
-+ /* |
|
| 107 |
-+ * Read the SPEC_CTRL MSR to account for reserved bits which may |
|
| 108 |
-+ * have unknown values. |
|
| 109 |
-+ */ |
|
| 110 |
-+ if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 111 |
-+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 112 |
-+ |
|
| 113 |
- /* Select the proper spectre mitigation before patching alternatives */ |
|
| 114 |
- spectre_v2_select_mitigation(); |
|
| 115 |
- |
|
| 116 |
-@@ -94,6 +107,21 @@ static const char *spectre_v2_strings[] = {
|
|
| 117 |
- |
|
| 118 |
- static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; |
|
| 119 |
- |
|
| 120 |
-+void x86_spec_ctrl_set(u64 val) |
|
| 121 |
-+{
|
|
| 122 |
-+ if (val & ~SPEC_CTRL_IBRS) |
|
| 123 |
-+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val); |
|
| 124 |
-+ else |
|
| 125 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val); |
|
| 126 |
-+} |
|
| 127 |
-+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); |
|
| 128 |
-+ |
|
| 129 |
-+u64 x86_spec_ctrl_get_default(void) |
|
| 130 |
-+{
|
|
| 131 |
-+ return x86_spec_ctrl_base; |
|
| 132 |
-+} |
|
| 133 |
-+EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); |
|
| 134 |
-+ |
|
| 135 |
- #ifdef RETPOLINE |
|
| 136 |
- static bool spectre_v2_bad_module; |
|
| 137 |
- |
|
| 138 |
-2.7.4 |
|
| 139 |
- |
| 140 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,137 +0,0 @@ |
| 1 |
-From 77da8ae16be7e944aad9306ecb481131ae77bb1f Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:19 -0400 |
|
| 4 |
-Subject: [PATCH 06/54] x86/bugs, KVM: Support the combination of guest and |
|
| 5 |
- host IBRS |
|
| 6 |
- |
|
| 7 |
-commit 5cf687548705412da47c9cec342fd952d71ed3d5 upstream |
|
| 8 |
- |
|
| 9 |
-A guest may modify the SPEC_CTRL MSR from the value used by the |
|
| 10 |
-kernel. Since the kernel doesn't use IBRS, this means a value of zero is |
|
| 11 |
-what is needed in the host. |
|
| 12 |
- |
|
| 13 |
-But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to |
|
| 14 |
-the other bits as reserved so the kernel should respect the boot time |
|
| 15 |
-SPEC_CTRL value and use that. |
|
| 16 |
- |
|
| 17 |
-This allows to deal with future extensions to the SPEC_CTRL interface if |
|
| 18 |
-any at all. |
|
| 19 |
- |
|
| 20 |
-Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any |
|
| 21 |
-difference as paravirt will over-write the callq *0xfff.. with the wrmsrl |
|
| 22 |
-assembler code. |
|
| 23 |
- |
|
| 24 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 25 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 26 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 27 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 28 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 29 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 30 |
- arch/x86/include/asm/nospec-branch.h | 10 ++++++++++ |
|
| 31 |
- arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++++++++ |
|
| 32 |
- arch/x86/kvm/svm.c | 6 ++---- |
|
| 33 |
- arch/x86/kvm/vmx.c | 6 ++---- |
|
| 34 |
- 4 files changed, 32 insertions(+), 8 deletions(-) |
|
| 35 |
- |
|
| 36 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 37 |
-index 9ec3d4d..d1c26309 100644 |
|
| 38 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 39 |
-@@ -228,6 +228,16 @@ enum spectre_v2_mitigation {
|
|
| 40 |
- extern void x86_spec_ctrl_set(u64); |
|
| 41 |
- extern u64 x86_spec_ctrl_get_default(void); |
|
| 42 |
- |
|
| 43 |
-+/* |
|
| 44 |
-+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR |
|
| 45 |
-+ * the guest has, while on VMEXIT we restore the host view. This |
|
| 46 |
-+ * would be easier if SPEC_CTRL were architecturally maskable or |
|
| 47 |
-+ * shadowable for guests but this is not (currently) the case. |
|
| 48 |
-+ * Takes the guest view of SPEC_CTRL MSR as a parameter. |
|
| 49 |
-+ */ |
|
| 50 |
-+extern void x86_spec_ctrl_set_guest(u64); |
|
| 51 |
-+extern void x86_spec_ctrl_restore_host(u64); |
|
| 52 |
-+ |
|
| 53 |
- extern char __indirect_thunk_start[]; |
|
| 54 |
- extern char __indirect_thunk_end[]; |
|
| 55 |
- |
|
| 56 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 57 |
-index 6ff972a..f5cad2f 100644 |
|
| 58 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 59 |
-@@ -122,6 +122,24 @@ u64 x86_spec_ctrl_get_default(void) |
|
| 60 |
- } |
|
| 61 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); |
|
| 62 |
- |
|
| 63 |
-+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) |
|
| 64 |
-+{
|
|
| 65 |
-+ if (!boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 66 |
-+ return; |
|
| 67 |
-+ if (x86_spec_ctrl_base != guest_spec_ctrl) |
|
| 68 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); |
|
| 69 |
-+} |
|
| 70 |
-+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); |
|
| 71 |
-+ |
|
| 72 |
-+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) |
|
| 73 |
-+{
|
|
| 74 |
-+ if (!boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 75 |
-+ return; |
|
| 76 |
-+ if (x86_spec_ctrl_base != guest_spec_ctrl) |
|
| 77 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 78 |
-+} |
|
| 79 |
-+EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); |
|
| 80 |
-+ |
|
| 81 |
- #ifdef RETPOLINE |
|
| 82 |
- static bool spectre_v2_bad_module; |
|
| 83 |
- |
|
| 84 |
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
|
| 85 |
-index aaa93b4..eeb8cd3 100644 |
|
| 86 |
-+++ b/arch/x86/kvm/svm.c |
|
| 87 |
-@@ -4917,8 +4917,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 88 |
- * is no need to worry about the conditional branch over the wrmsr |
|
| 89 |
- * being speculatively taken. |
|
| 90 |
- */ |
|
| 91 |
-- if (svm->spec_ctrl) |
|
| 92 |
-- native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); |
|
| 93 |
-+ x86_spec_ctrl_set_guest(svm->spec_ctrl); |
|
| 94 |
- |
|
| 95 |
- asm volatile ( |
|
| 96 |
- "push %%" _ASM_BP "; \n\t" |
|
| 97 |
-@@ -5030,8 +5029,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 98 |
- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
|
| 99 |
- svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
|
| 100 |
- |
|
| 101 |
-- if (svm->spec_ctrl) |
|
| 102 |
-- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); |
|
| 103 |
-+ x86_spec_ctrl_restore_host(svm->spec_ctrl); |
|
| 104 |
- |
|
| 105 |
- /* Eliminate branch target predictions from guest mode */ |
|
| 106 |
- vmexit_fill_RSB(); |
|
| 107 |
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
|
| 108 |
-index b978aec..266db0b 100644 |
|
| 109 |
-+++ b/arch/x86/kvm/vmx.c |
|
| 110 |
-@@ -8916,8 +8916,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 111 |
- * is no need to worry about the conditional branch over the wrmsr |
|
| 112 |
- * being speculatively taken. |
|
| 113 |
- */ |
|
| 114 |
-- if (vmx->spec_ctrl) |
|
| 115 |
-- native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); |
|
| 116 |
-+ x86_spec_ctrl_set_guest(vmx->spec_ctrl); |
|
| 117 |
- |
|
| 118 |
- vmx->__launched = vmx->loaded_vmcs->launched; |
|
| 119 |
- asm( |
|
| 120 |
-@@ -9055,8 +9054,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 121 |
- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
|
| 122 |
- vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
|
| 123 |
- |
|
| 124 |
-- if (vmx->spec_ctrl) |
|
| 125 |
-- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); |
|
| 126 |
-+ x86_spec_ctrl_restore_host(vmx->spec_ctrl); |
|
| 127 |
- |
|
| 128 |
- /* Eliminate branch target predictions from guest mode */ |
|
| 129 |
- vmexit_fill_RSB(); |
|
| 130 |
-2.7.4 |
|
| 131 |
- |
| 132 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,148 +0,0 @@ |
| 1 |
-From 0e2bf3faef41f14a643e992c4be77fbab56381c1 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:20 -0400 |
|
| 4 |
-Subject: [PATCH 07/54] x86/bugs: Expose /sys/../spec_store_bypass |
|
| 5 |
- |
|
| 6 |
-commit c456442cd3a59eeb1d60293c26cbe2ff2c4e42cf upstream |
|
| 7 |
- |
|
| 8 |
-Add the sysfs file for the new vulerability. It does not do much except |
|
| 9 |
-show the words 'Vulnerable' for recent x86 cores. |
|
| 10 |
- |
|
| 11 |
-Intel cores prior to family 6 are known not to be vulnerable, and so are |
|
| 12 |
-some Atoms and some Xeon Phi. |
|
| 13 |
- |
|
| 14 |
-It assumes that older Cyrix, Centaur, etc. cores are immune. |
|
| 15 |
- |
|
| 16 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 17 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 18 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 19 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 20 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 21 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 22 |
- Documentation/ABI/testing/sysfs-devices-system-cpu | 1 + |
|
| 23 |
- arch/x86/include/asm/cpufeatures.h | 1 + |
|
| 24 |
- arch/x86/kernel/cpu/bugs.c | 5 +++++ |
|
| 25 |
- arch/x86/kernel/cpu/common.c | 23 ++++++++++++++++++++++ |
|
| 26 |
- drivers/base/cpu.c | 8 ++++++++ |
|
| 27 |
- include/linux/cpu.h | 2 ++ |
|
| 28 |
- 6 files changed, 40 insertions(+) |
|
| 29 |
- |
|
| 30 |
-diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu |
|
| 31 |
-index dfd56ec..6d75a9c 100644 |
|
| 32 |
-+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu |
|
| 33 |
-@@ -355,6 +355,7 @@ What: /sys/devices/system/cpu/vulnerabilities |
|
| 34 |
- /sys/devices/system/cpu/vulnerabilities/meltdown |
|
| 35 |
- /sys/devices/system/cpu/vulnerabilities/spectre_v1 |
|
| 36 |
- /sys/devices/system/cpu/vulnerabilities/spectre_v2 |
|
| 37 |
-+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass |
|
| 38 |
- Date: January 2018 |
|
| 39 |
- Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> |
|
| 40 |
- Description: Information about CPU vulnerabilities |
|
| 41 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 42 |
-index a248531..a688adb 100644 |
|
| 43 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 44 |
-@@ -335,5 +335,6 @@ |
|
| 45 |
- #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ |
|
| 46 |
- #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ |
|
| 47 |
- #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ |
|
| 48 |
-+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ |
|
| 49 |
- |
|
| 50 |
- #endif /* _ASM_X86_CPUFEATURES_H */ |
|
| 51 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 52 |
-index f5cad2f..64e17a9 100644 |
|
| 53 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 54 |
-@@ -403,4 +403,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c |
|
| 55 |
- {
|
|
| 56 |
- return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); |
|
| 57 |
- } |
|
| 58 |
-+ |
|
| 59 |
-+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) |
|
| 60 |
-+{
|
|
| 61 |
-+ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); |
|
| 62 |
-+} |
|
| 63 |
- #endif |
|
| 64 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 65 |
-index 357c589..4f1050a 100644 |
|
| 66 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 67 |
-@@ -879,10 +879,33 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
|
| 68 |
- {}
|
|
| 69 |
- }; |
|
| 70 |
- |
|
| 71 |
-+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|
| 72 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
|
|
| 73 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
|
|
| 74 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
|
|
| 75 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
|
|
| 76 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
|
|
| 77 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
|
| 78 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
|
| 79 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
|
| 80 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
|
| 81 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
|
|
| 82 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
|
| 83 |
-+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
|
| 84 |
-+ { X86_VENDOR_CENTAUR, 5, },
|
|
| 85 |
-+ { X86_VENDOR_INTEL, 5, },
|
|
| 86 |
-+ { X86_VENDOR_NSC, 5, },
|
|
| 87 |
-+ { X86_VENDOR_ANY, 4, },
|
|
| 88 |
-+ {}
|
|
| 89 |
-+}; |
|
| 90 |
-+ |
|
| 91 |
- static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
|
| 92 |
- {
|
|
| 93 |
- u64 ia32_cap = 0; |
|
| 94 |
- |
|
| 95 |
-+ if (!x86_match_cpu(cpu_no_spec_store_bypass)) |
|
| 96 |
-+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
|
| 97 |
-+ |
|
| 98 |
- if (x86_match_cpu(cpu_no_speculation)) |
|
| 99 |
- return; |
|
| 100 |
- |
|
| 101 |
-diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c |
|
| 102 |
-index 56b6c85..cbb1cc6 100644 |
|
| 103 |
-+++ b/drivers/base/cpu.c |
|
| 104 |
-@@ -519,14 +519,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev, |
|
| 105 |
- return sprintf(buf, "Not affected\n"); |
|
| 106 |
- } |
|
| 107 |
- |
|
| 108 |
-+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, |
|
| 109 |
-+ struct device_attribute *attr, char *buf) |
|
| 110 |
-+{
|
|
| 111 |
-+ return sprintf(buf, "Not affected\n"); |
|
| 112 |
-+} |
|
| 113 |
-+ |
|
| 114 |
- static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); |
|
| 115 |
- static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); |
|
| 116 |
- static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); |
|
| 117 |
-+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); |
|
| 118 |
- |
|
| 119 |
- static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
|
| 120 |
- &dev_attr_meltdown.attr, |
|
| 121 |
- &dev_attr_spectre_v1.attr, |
|
| 122 |
- &dev_attr_spectre_v2.attr, |
|
| 123 |
-+ &dev_attr_spec_store_bypass.attr, |
|
| 124 |
- NULL |
|
| 125 |
- }; |
|
| 126 |
- |
|
| 127 |
-diff --git a/include/linux/cpu.h b/include/linux/cpu.h |
|
| 128 |
-index 2f475ad..917829b 100644 |
|
| 129 |
-+++ b/include/linux/cpu.h |
|
| 130 |
-@@ -50,6 +50,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev, |
|
| 131 |
- struct device_attribute *attr, char *buf); |
|
| 132 |
- extern ssize_t cpu_show_spectre_v2(struct device *dev, |
|
| 133 |
- struct device_attribute *attr, char *buf); |
|
| 134 |
-+extern ssize_t cpu_show_spec_store_bypass(struct device *dev, |
|
| 135 |
-+ struct device_attribute *attr, char *buf); |
|
| 136 |
- |
|
| 137 |
- extern __printf(4, 5) |
|
| 138 |
- struct device *cpu_device_create(struct device *parent, void *drvdata, |
|
| 139 |
-2.7.4 |
|
| 140 |
- |
| 141 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,36 +0,0 @@ |
| 1 |
-From 4cd0acbafcc274cc32fe6aa371e2c444741f66d3 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Sat, 28 Apr 2018 22:34:17 +0200 |
|
| 4 |
-Subject: [PATCH 08/54] x86/cpufeatures: Add X86_FEATURE_RDS |
|
| 5 |
- |
|
| 6 |
-commit 0cc5fa00b0a88dad140b4e5c2cead9951ad36822 upstream |
|
| 7 |
- |
|
| 8 |
-Add the CPU feature bit CPUID.7.0.EDX[31] which indicates whether the CPU |
|
| 9 |
-supports Reduced Data Speculation. |
|
| 10 |
- |
|
| 11 |
-[ tglx: Split it out from a later patch ] |
|
| 12 |
- |
|
| 13 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 14 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 15 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 16 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 17 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 18 |
- arch/x86/include/asm/cpufeatures.h | 1 + |
|
| 19 |
- 1 file changed, 1 insertion(+) |
|
| 20 |
- |
|
| 21 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 22 |
-index a688adb..0c05c6c 100644 |
|
| 23 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 24 |
-@@ -306,6 +306,7 @@ |
|
| 25 |
- #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
|
| 26 |
- #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
|
| 27 |
- #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
|
| 28 |
-+#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */ |
|
| 29 |
- |
|
| 30 |
- /* |
|
| 31 |
- * BUG word(s) |
|
| 32 |
-2.7.4 |
|
| 33 |
- |
| 34 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,272 +0,0 @@ |
| 1 |
-From e247cb769c45f3b7faa0efa8d854be2d2b6e52bf Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:21 -0400 |
|
| 4 |
-Subject: [PATCH 09/54] x86/bugs: Provide boot parameters for the |
|
| 5 |
- spec_store_bypass_disable mitigation |
|
| 6 |
- |
|
| 7 |
-commit 24f7fc83b9204d20f878c57cb77d261ae825e033 upstream |
|
| 8 |
- |
|
| 9 |
-Contemporary high performance processors use a common industry-wide |
|
| 10 |
-optimization known as "Speculative Store Bypass" in which loads from |
|
| 11 |
-addresses to which a recent store has occurred may (speculatively) see an |
|
| 12 |
-older value. Intel refers to this feature as "Memory Disambiguation" which |
|
| 13 |
-is part of their "Smart Memory Access" capability. |
|
| 14 |
- |
|
| 15 |
-Memory Disambiguation can expose a cache side-channel attack against such |
|
| 16 |
-speculatively read values. An attacker can create exploit code that allows |
|
| 17 |
-them to read memory outside of a sandbox environment (for example, |
|
| 18 |
-malicious JavaScript in a web page), or to perform more complex attacks |
|
| 19 |
-against code running within the same privilege level, e.g. via the stack. |
|
| 20 |
- |
|
| 21 |
-As a first step to mitigate against such attacks, provide two boot command |
|
| 22 |
-line control knobs: |
|
| 23 |
- |
|
| 24 |
- nospec_store_bypass_disable |
|
| 25 |
- spec_store_bypass_disable=[off,auto,on] |
|
| 26 |
- |
|
| 27 |
-By default affected x86 processors will power on with Speculative |
|
| 28 |
-Store Bypass enabled. Hence the provided kernel parameters are written |
|
| 29 |
-from the point of view of whether to enable a mitigation or not. |
|
| 30 |
-The parameters are as follows: |
|
| 31 |
- |
|
| 32 |
- - auto - Kernel detects whether your CPU model contains an implementation |
|
| 33 |
- of Speculative Store Bypass and picks the most appropriate |
|
| 34 |
- mitigation. |
|
| 35 |
- |
|
| 36 |
- - on - disable Speculative Store Bypass |
|
| 37 |
- - off - enable Speculative Store Bypass |
|
| 38 |
- |
|
| 39 |
-[ tglx: Reordered the checks so that the whole evaluation is not done |
|
| 40 |
- when the CPU does not support RDS ] |
|
| 41 |
- |
|
| 42 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 43 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 44 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 45 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 46 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 47 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 48 |
- Documentation/kernel-parameters.txt | 33 +++++++++++ |
|
| 49 |
- arch/x86/include/asm/cpufeatures.h | 1 + |
|
| 50 |
- arch/x86/include/asm/nospec-branch.h | 6 ++ |
|
| 51 |
- arch/x86/kernel/cpu/bugs.c | 103 +++++++++++++++++++++++++++++++++++ |
|
| 52 |
- 4 files changed, 143 insertions(+) |
|
| 53 |
- |
|
| 54 |
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt |
|
| 55 |
-index 5f9e514..792ac91 100644 |
|
| 56 |
-+++ b/Documentation/kernel-parameters.txt |
|
| 57 |
-@@ -2699,6 +2699,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. |
|
| 58 |
- allow data leaks with this option, which is equivalent |
|
| 59 |
- to spectre_v2=off. |
|
| 60 |
- |
|
| 61 |
-+ nospec_store_bypass_disable |
|
| 62 |
-+ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability |
|
| 63 |
-+ |
|
| 64 |
- noxsave [BUGS=X86] Disables x86 extended register state save |
|
| 65 |
- and restore using xsave. The kernel will fallback to |
|
| 66 |
- enabling legacy floating-point and sse state. |
|
| 67 |
-@@ -3973,6 +3976,36 @@ bytes respectively. Such letter suffixes can also be entirely omitted. |
|
| 68 |
- Not specifying this option is equivalent to |
|
| 69 |
- spectre_v2=auto. |
|
| 70 |
- |
|
| 71 |
-+ spec_store_bypass_disable= |
|
| 72 |
-+ [HW] Control Speculative Store Bypass (SSB) Disable mitigation |
|
| 73 |
-+ (Speculative Store Bypass vulnerability) |
|
| 74 |
-+ |
|
| 75 |
-+ Certain CPUs are vulnerable to an exploit against a |
|
| 76 |
-+ a common industry wide performance optimization known |
|
| 77 |
-+ as "Speculative Store Bypass" in which recent stores |
|
| 78 |
-+ to the same memory location may not be observed by |
|
| 79 |
-+ later loads during speculative execution. The idea |
|
| 80 |
-+ is that such stores are unlikely and that they can |
|
| 81 |
-+ be detected prior to instruction retirement at the |
|
| 82 |
-+ end of a particular speculation execution window. |
|
| 83 |
-+ |
|
| 84 |
-+ In vulnerable processors, the speculatively forwarded |
|
| 85 |
-+ store can be used in a cache side channel attack, for |
|
| 86 |
-+ example to read memory to which the attacker does not |
|
| 87 |
-+ directly have access (e.g. inside sandboxed code). |
|
| 88 |
-+ |
|
| 89 |
-+ This parameter controls whether the Speculative Store |
|
| 90 |
-+ Bypass optimization is used. |
|
| 91 |
-+ |
|
| 92 |
-+ on - Unconditionally disable Speculative Store Bypass |
|
| 93 |
-+ off - Unconditionally enable Speculative Store Bypass |
|
| 94 |
-+ auto - Kernel detects whether the CPU model contains an |
|
| 95 |
-+ implementation of Speculative Store Bypass and |
|
| 96 |
-+ picks the most appropriate mitigation |
|
| 97 |
-+ |
|
| 98 |
-+ Not specifying this option is equivalent to |
|
| 99 |
-+ spec_store_bypass_disable=auto. |
|
| 100 |
-+ |
|
| 101 |
- spia_io_base= [HW,MTD] |
|
| 102 |
- spia_fio_base= |
|
| 103 |
- spia_pedr= |
|
| 104 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 105 |
-index 0c05c6c..013f3de 100644 |
|
| 106 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 107 |
-@@ -204,6 +204,7 @@ |
|
| 108 |
- |
|
| 109 |
- #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
|
| 110 |
- #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ |
|
| 111 |
-+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ |
|
| 112 |
- |
|
| 113 |
- /* Virtualization flags: Linux defined, word 8 */ |
|
| 114 |
- #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
|
| 115 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 116 |
-index d1c26309..7b9eacf 100644 |
|
| 117 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 118 |
-@@ -238,6 +238,12 @@ extern u64 x86_spec_ctrl_get_default(void); |
|
| 119 |
- extern void x86_spec_ctrl_set_guest(u64); |
|
| 120 |
- extern void x86_spec_ctrl_restore_host(u64); |
|
| 121 |
- |
|
| 122 |
-+/* The Speculative Store Bypass disable variants */ |
|
| 123 |
-+enum ssb_mitigation {
|
|
| 124 |
-+ SPEC_STORE_BYPASS_NONE, |
|
| 125 |
-+ SPEC_STORE_BYPASS_DISABLE, |
|
| 126 |
-+}; |
|
| 127 |
-+ |
|
| 128 |
- extern char __indirect_thunk_start[]; |
|
| 129 |
- extern char __indirect_thunk_end[]; |
|
| 130 |
- |
|
| 131 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 132 |
-index 64e17a9..75146d9 100644 |
|
| 133 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 134 |
-@@ -26,6 +26,7 @@ |
|
| 135 |
- #include <asm/intel-family.h> |
|
| 136 |
- |
|
| 137 |
- static void __init spectre_v2_select_mitigation(void); |
|
| 138 |
-+static void __init ssb_select_mitigation(void); |
|
| 139 |
- |
|
| 140 |
- /* |
|
| 141 |
- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any |
|
| 142 |
-@@ -52,6 +53,12 @@ void __init check_bugs(void) |
|
| 143 |
- /* Select the proper spectre mitigation before patching alternatives */ |
|
| 144 |
- spectre_v2_select_mitigation(); |
|
| 145 |
- |
|
| 146 |
-+ /* |
|
| 147 |
-+ * Select proper mitigation for any exposure to the Speculative Store |
|
| 148 |
-+ * Bypass vulnerability. |
|
| 149 |
-+ */ |
|
| 150 |
-+ ssb_select_mitigation(); |
|
| 151 |
-+ |
|
| 152 |
- #ifdef CONFIG_X86_32 |
|
| 153 |
- /* |
|
| 154 |
- * Check whether we are able to run this kernel safely on SMP. |
|
| 155 |
-@@ -357,6 +364,99 @@ static void __init spectre_v2_select_mitigation(void) |
|
| 156 |
- } |
|
| 157 |
- |
|
| 158 |
- #undef pr_fmt |
|
| 159 |
-+#define pr_fmt(fmt) "Speculative Store Bypass: " fmt |
|
| 160 |
-+ |
|
| 161 |
-+static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE; |
|
| 162 |
-+ |
|
| 163 |
-+/* The kernel command line selection */ |
|
| 164 |
-+enum ssb_mitigation_cmd {
|
|
| 165 |
-+ SPEC_STORE_BYPASS_CMD_NONE, |
|
| 166 |
-+ SPEC_STORE_BYPASS_CMD_AUTO, |
|
| 167 |
-+ SPEC_STORE_BYPASS_CMD_ON, |
|
| 168 |
-+}; |
|
| 169 |
-+ |
|
| 170 |
-+static const char *ssb_strings[] = {
|
|
| 171 |
-+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable", |
|
| 172 |
-+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled" |
|
| 173 |
-+}; |
|
| 174 |
-+ |
|
| 175 |
-+static const struct {
|
|
| 176 |
-+ const char *option; |
|
| 177 |
-+ enum ssb_mitigation_cmd cmd; |
|
| 178 |
-+} ssb_mitigation_options[] = {
|
|
| 179 |
-+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
| 180 |
-+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
| 181 |
-+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
| 182 |
-+}; |
|
| 183 |
-+ |
|
| 184 |
-+static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) |
|
| 185 |
-+{
|
|
| 186 |
-+ enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; |
|
| 187 |
-+ char arg[20]; |
|
| 188 |
-+ int ret, i; |
|
| 189 |
-+ |
|
| 190 |
-+ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
|
|
| 191 |
-+ return SPEC_STORE_BYPASS_CMD_NONE; |
|
| 192 |
-+ } else {
|
|
| 193 |
-+ ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", |
|
| 194 |
-+ arg, sizeof(arg)); |
|
| 195 |
-+ if (ret < 0) |
|
| 196 |
-+ return SPEC_STORE_BYPASS_CMD_AUTO; |
|
| 197 |
-+ |
|
| 198 |
-+ for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
|
|
| 199 |
-+ if (!match_option(arg, ret, ssb_mitigation_options[i].option)) |
|
| 200 |
-+ continue; |
|
| 201 |
-+ |
|
| 202 |
-+ cmd = ssb_mitigation_options[i].cmd; |
|
| 203 |
-+ break; |
|
| 204 |
-+ } |
|
| 205 |
-+ |
|
| 206 |
-+ if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
|
|
| 207 |
-+ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
|
| 208 |
-+ return SPEC_STORE_BYPASS_CMD_AUTO; |
|
| 209 |
-+ } |
|
| 210 |
-+ } |
|
| 211 |
-+ |
|
| 212 |
-+ return cmd; |
|
| 213 |
-+} |
|
| 214 |
-+ |
|
| 215 |
-+static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 216 |
-+{
|
|
| 217 |
-+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; |
|
| 218 |
-+ enum ssb_mitigation_cmd cmd; |
|
| 219 |
-+ |
|
| 220 |
-+ if (!boot_cpu_has(X86_FEATURE_RDS)) |
|
| 221 |
-+ return mode; |
|
| 222 |
-+ |
|
| 223 |
-+ cmd = ssb_parse_cmdline(); |
|
| 224 |
-+ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && |
|
| 225 |
-+ (cmd == SPEC_STORE_BYPASS_CMD_NONE || |
|
| 226 |
-+ cmd == SPEC_STORE_BYPASS_CMD_AUTO)) |
|
| 227 |
-+ return mode; |
|
| 228 |
-+ |
|
| 229 |
-+ switch (cmd) {
|
|
| 230 |
-+ case SPEC_STORE_BYPASS_CMD_AUTO: |
|
| 231 |
-+ case SPEC_STORE_BYPASS_CMD_ON: |
|
| 232 |
-+ mode = SPEC_STORE_BYPASS_DISABLE; |
|
| 233 |
-+ break; |
|
| 234 |
-+ case SPEC_STORE_BYPASS_CMD_NONE: |
|
| 235 |
-+ break; |
|
| 236 |
-+ } |
|
| 237 |
-+ |
|
| 238 |
-+ if (mode != SPEC_STORE_BYPASS_NONE) |
|
| 239 |
-+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); |
|
| 240 |
-+ return mode; |
|
| 241 |
-+} |
|
| 242 |
-+ |
|
| 243 |
-+static void ssb_select_mitigation() |
|
| 244 |
-+{
|
|
| 245 |
-+ ssb_mode = __ssb_select_mitigation(); |
|
| 246 |
-+ |
|
| 247 |
-+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
|
| 248 |
-+ pr_info("%s\n", ssb_strings[ssb_mode]);
|
|
| 249 |
-+} |
|
| 250 |
-+ |
|
| 251 |
-+#undef pr_fmt |
|
| 252 |
- |
|
| 253 |
- #ifdef CONFIG_SYSFS |
|
| 254 |
- |
|
| 255 |
-@@ -382,6 +482,9 @@ ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
|
| 256 |
- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", |
|
| 257 |
- spectre_v2_module_string()); |
|
| 258 |
- |
|
| 259 |
-+ case X86_BUG_SPEC_STORE_BYPASS: |
|
| 260 |
-+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); |
|
| 261 |
-+ |
|
| 262 |
- default: |
|
| 263 |
- break; |
|
| 264 |
- } |
|
| 265 |
-2.7.4 |
|
| 266 |
- |
| 267 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,183 +0,0 @@ |
| 1 |
-From 7b6ef10e387fe35068656595506ff0e80e5dc8cf Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:22 -0400 |
|
| 4 |
-Subject: [PATCH 10/54] x86/bugs/intel: Set proper CPU features and setup RDS |
|
| 5 |
- |
|
| 6 |
-commit 772439717dbf703b39990be58d8d4e3e4ad0598a upstream |
|
| 7 |
- |
|
| 8 |
-Intel CPUs expose methods to: |
|
| 9 |
- |
|
| 10 |
- - Detect whether RDS capability is available via CPUID.7.0.EDX[31], |
|
| 11 |
- |
|
| 12 |
- - The SPEC_CTRL MSR(0x48), bit 2 set to enable RDS. |
|
| 13 |
- |
|
| 14 |
- - MSR_IA32_ARCH_CAPABILITIES, Bit(4) no need to enable RRS. |
|
| 15 |
- |
|
| 16 |
-With that in mind if spec_store_bypass_disable=[auto,on] is selected set at |
|
| 17 |
-boot-time the SPEC_CTRL MSR to enable RDS if the platform requires it. |
|
| 18 |
- |
|
| 19 |
-Note that this does not fix the KVM case where the SPEC_CTRL is exposed to |
|
| 20 |
-guests which can muck with it, see patch titled : |
|
| 21 |
- KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS. |
|
| 22 |
- |
|
| 23 |
-And for the firmware (IBRS to be set), see patch titled: |
|
| 24 |
- x86/spectre_v2: Read SPEC_CTRL MSR during boot and re-use reserved bits |
|
| 25 |
- |
|
| 26 |
-[ tglx: Distangled it from the intel implementation and kept the call order ] |
|
| 27 |
- |
|
| 28 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 29 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 30 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 31 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 32 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 33 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 34 |
- arch/x86/include/asm/msr-index.h | 6 ++++++ |
|
| 35 |
- arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++-- |
|
| 36 |
- arch/x86/kernel/cpu/common.c | 10 ++++++---- |
|
| 37 |
- arch/x86/kernel/cpu/cpu.h | 3 +++ |
|
| 38 |
- arch/x86/kernel/cpu/intel.c | 1 + |
|
| 39 |
- 5 files changed, 44 insertions(+), 6 deletions(-) |
|
| 40 |
- |
|
| 41 |
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
|
| 42 |
-index c768bc1..87103e8 100644 |
|
| 43 |
-+++ b/arch/x86/include/asm/msr-index.h |
|
| 44 |
-@@ -40,6 +40,7 @@ |
|
| 45 |
- #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ |
|
| 46 |
- #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ |
|
| 47 |
- #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ |
|
| 48 |
-+#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */ |
|
| 49 |
- |
|
| 50 |
- #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ |
|
| 51 |
- #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ |
|
| 52 |
-@@ -61,6 +62,11 @@ |
|
| 53 |
- #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a |
|
| 54 |
- #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ |
|
| 55 |
- #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ |
|
| 56 |
-+#define ARCH_CAP_RDS_NO (1 << 4) /* |
|
| 57 |
-+ * Not susceptible to Speculative Store Bypass |
|
| 58 |
-+ * attack, so no Reduced Data Speculation control |
|
| 59 |
-+ * required. |
|
| 60 |
-+ */ |
|
| 61 |
- |
|
| 62 |
- #define MSR_IA32_BBL_CR_CTL 0x00000119 |
|
| 63 |
- #define MSR_IA32_BBL_CR_CTL3 0x0000011e |
|
| 64 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 65 |
-index 75146d9..7dd16f4 100644 |
|
| 66 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 67 |
-@@ -116,7 +116,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; |
|
| 68 |
- |
|
| 69 |
- void x86_spec_ctrl_set(u64 val) |
|
| 70 |
- {
|
|
| 71 |
-- if (val & ~SPEC_CTRL_IBRS) |
|
| 72 |
-+ if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS)) |
|
| 73 |
- WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val); |
|
| 74 |
- else |
|
| 75 |
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val); |
|
| 76 |
-@@ -443,8 +443,28 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 77 |
- break; |
|
| 78 |
- } |
|
| 79 |
- |
|
| 80 |
-- if (mode != SPEC_STORE_BYPASS_NONE) |
|
| 81 |
-+ /* |
|
| 82 |
-+ * We have three CPU feature flags that are in play here: |
|
| 83 |
-+ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. |
|
| 84 |
-+ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass |
|
| 85 |
-+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation |
|
| 86 |
-+ */ |
|
| 87 |
-+ if (mode != SPEC_STORE_BYPASS_NONE) {
|
|
| 88 |
- setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); |
|
| 89 |
-+ /* |
|
| 90 |
-+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses |
|
| 91 |
-+ * a completely different MSR and bit dependent on family. |
|
| 92 |
-+ */ |
|
| 93 |
-+ switch (boot_cpu_data.x86_vendor) {
|
|
| 94 |
-+ case X86_VENDOR_INTEL: |
|
| 95 |
-+ x86_spec_ctrl_base |= SPEC_CTRL_RDS; |
|
| 96 |
-+ x86_spec_ctrl_set(SPEC_CTRL_RDS); |
|
| 97 |
-+ break; |
|
| 98 |
-+ case X86_VENDOR_AMD: |
|
| 99 |
-+ break; |
|
| 100 |
-+ } |
|
| 101 |
-+ } |
|
| 102 |
-+ |
|
| 103 |
- return mode; |
|
| 104 |
- } |
|
| 105 |
- |
|
| 106 |
-@@ -458,6 +478,12 @@ static void ssb_select_mitigation() |
|
| 107 |
- |
|
| 108 |
- #undef pr_fmt |
|
| 109 |
- |
|
| 110 |
-+void x86_spec_ctrl_setup_ap(void) |
|
| 111 |
-+{
|
|
| 112 |
-+ if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 113 |
-+ x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS)); |
|
| 114 |
-+} |
|
| 115 |
-+ |
|
| 116 |
- #ifdef CONFIG_SYSFS |
|
| 117 |
- |
|
| 118 |
- ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
|
| 119 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 120 |
-index 4f1050a..ab6b3ad 100644 |
|
| 121 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 122 |
-@@ -903,7 +903,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
|
| 123 |
- {
|
|
| 124 |
- u64 ia32_cap = 0; |
|
| 125 |
- |
|
| 126 |
-- if (!x86_match_cpu(cpu_no_spec_store_bypass)) |
|
| 127 |
-+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) |
|
| 128 |
-+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
|
| 129 |
-+ |
|
| 130 |
-+ if (!x86_match_cpu(cpu_no_spec_store_bypass) && |
|
| 131 |
-+ !(ia32_cap & ARCH_CAP_RDS_NO)) |
|
| 132 |
- setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
|
| 133 |
- |
|
| 134 |
- if (x86_match_cpu(cpu_no_speculation)) |
|
| 135 |
-@@ -915,9 +919,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
|
| 136 |
- if (x86_match_cpu(cpu_no_meltdown)) |
|
| 137 |
- return; |
|
| 138 |
- |
|
| 139 |
-- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) |
|
| 140 |
-- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
|
| 141 |
-- |
|
| 142 |
- /* Rogue Data Cache Load? No! */ |
|
| 143 |
- if (ia32_cap & ARCH_CAP_RDCL_NO) |
|
| 144 |
- return; |
|
| 145 |
-@@ -1339,6 +1340,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) |
|
| 146 |
- #endif |
|
| 147 |
- mtrr_ap_init(); |
|
| 148 |
- validate_apic_and_package_id(c); |
|
| 149 |
-+ x86_spec_ctrl_setup_ap(); |
|
| 150 |
- } |
|
| 151 |
- |
|
| 152 |
- struct msr_range {
|
|
| 153 |
-diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h |
|
| 154 |
-index 2584265..3b19d82 100644 |
|
| 155 |
-+++ b/arch/x86/kernel/cpu/cpu.h |
|
| 156 |
-@@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], |
|
| 157 |
- |
|
| 158 |
- extern void get_cpu_cap(struct cpuinfo_x86 *c); |
|
| 159 |
- extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
|
| 160 |
-+ |
|
| 161 |
-+extern void x86_spec_ctrl_setup_ap(void); |
|
| 162 |
-+ |
|
| 163 |
- #endif /* ARCH_X86_CPU_H */ |
|
| 164 |
-diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c |
|
| 165 |
-index 8fb1d65..f15aea6 100644 |
|
| 166 |
-+++ b/arch/x86/kernel/cpu/intel.c |
|
| 167 |
-@@ -154,6 +154,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
|
| 168 |
- setup_clear_cpu_cap(X86_FEATURE_STIBP); |
|
| 169 |
- setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); |
|
| 170 |
- setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); |
|
| 171 |
-+ setup_clear_cpu_cap(X86_FEATURE_RDS); |
|
| 172 |
- } |
|
| 173 |
- |
|
| 174 |
- /* |
|
| 175 |
-2.7.4 |
|
| 176 |
- |
| 177 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,70 +0,0 @@ |
| 1 |
-From 3cf8074c8d4211e42bb7a57bfbf97bb39160a1d2 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:23 -0400 |
|
| 4 |
-Subject: [PATCH 11/54] x86/bugs: Whitelist allowed SPEC_CTRL MSR values |
|
| 5 |
- |
|
| 6 |
-commit 1115a859f33276fe8afb31c60cf9d8e657872558 upstream |
|
| 7 |
- |
|
| 8 |
-Intel and AMD SPEC_CTRL (0x48) MSR semantics may differ in the |
|
| 9 |
-future (or in fact use different MSRs for the same functionality). |
|
| 10 |
- |
|
| 11 |
-As such a run-time mechanism is required to whitelist the appropriate MSR |
|
| 12 |
-values. |
|
| 13 |
- |
|
| 14 |
-[ tglx: Made the variable __ro_after_init ] |
|
| 15 |
- |
|
| 16 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 17 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 18 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 19 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 20 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 21 |
- arch/x86/kernel/cpu/bugs.c | 11 +++++++++-- |
|
| 22 |
- 1 file changed, 9 insertions(+), 2 deletions(-) |
|
| 23 |
- |
|
| 24 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 25 |
-index 7dd16f4..b92c469 100644 |
|
| 26 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 27 |
-@@ -34,6 +34,12 @@ static void __init ssb_select_mitigation(void); |
|
| 28 |
- */ |
|
| 29 |
- static u64 __ro_after_init x86_spec_ctrl_base; |
|
| 30 |
- |
|
| 31 |
-+/* |
|
| 32 |
-+ * The vendor and possibly platform specific bits which can be modified in |
|
| 33 |
-+ * x86_spec_ctrl_base. |
|
| 34 |
-+ */ |
|
| 35 |
-+static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; |
|
| 36 |
-+ |
|
| 37 |
- void __init check_bugs(void) |
|
| 38 |
- {
|
|
| 39 |
- identify_boot_cpu(); |
|
| 40 |
-@@ -116,7 +122,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; |
|
| 41 |
- |
|
| 42 |
- void x86_spec_ctrl_set(u64 val) |
|
| 43 |
- {
|
|
| 44 |
-- if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS)) |
|
| 45 |
-+ if (val & x86_spec_ctrl_mask) |
|
| 46 |
- WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val); |
|
| 47 |
- else |
|
| 48 |
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val); |
|
| 49 |
-@@ -458,6 +464,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 50 |
- switch (boot_cpu_data.x86_vendor) {
|
|
| 51 |
- case X86_VENDOR_INTEL: |
|
| 52 |
- x86_spec_ctrl_base |= SPEC_CTRL_RDS; |
|
| 53 |
-+ x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS; |
|
| 54 |
- x86_spec_ctrl_set(SPEC_CTRL_RDS); |
|
| 55 |
- break; |
|
| 56 |
- case X86_VENDOR_AMD: |
|
| 57 |
-@@ -481,7 +488,7 @@ static void ssb_select_mitigation() |
|
| 58 |
- void x86_spec_ctrl_setup_ap(void) |
|
| 59 |
- {
|
|
| 60 |
- if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 61 |
-- x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS)); |
|
| 62 |
-+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); |
|
| 63 |
- } |
|
| 64 |
- |
|
| 65 |
- #ifdef CONFIG_SYSFS |
|
| 66 |
-2.7.4 |
|
| 67 |
- |
| 68 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,200 +0,0 @@ |
| 1 |
-From 0c6f26d225be4797e07f72a7d05437d0f750b758 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: David Woodhouse <dwmw@amazon.co.uk> |
|
| 3 |
-Date: Sun, 20 May 2018 20:52:05 +0100 |
|
| 4 |
-Subject: [PATCH 12/54] x86/bugs/AMD: Add support to disable RDS on |
|
| 5 |
- Fam[15,16,17]h if requested |
|
| 6 |
- |
|
| 7 |
-commit 764f3c21588a059cd783c6ba0734d4db2d72822d upstream |
|
| 8 |
- |
|
| 9 |
-AMD does not need the Speculative Store Bypass mitigation to be enabled. |
|
| 10 |
- |
|
| 11 |
-The parameters for this are already available and can be done via MSR |
|
| 12 |
-C001_1020. Each family uses a different bit in that MSR for this. |
|
| 13 |
- |
|
| 14 |
-[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling |
|
| 15 |
- into the bugs code as that's the right thing to do and also required |
|
| 16 |
- to prepare for dynamic enable/disable ] |
|
| 17 |
- |
|
| 18 |
-Suggested-by: Borislav Petkov <bp@suse.de> |
|
| 19 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 20 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 21 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 22 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 23 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 24 |
- arch/x86/include/asm/cpufeatures.h | 1 + |
|
| 25 |
- arch/x86/include/asm/nospec-branch.h | 4 ++++ |
|
| 26 |
- arch/x86/kernel/cpu/amd.c | 26 ++++++++++++++++++++++++++ |
|
| 27 |
- arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++++++++++++- |
|
| 28 |
- arch/x86/kernel/cpu/common.c | 4 ++++ |
|
| 29 |
- 5 files changed, 61 insertions(+), 1 deletion(-) |
|
| 30 |
- |
|
| 31 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 32 |
-index 013f3de..8797069 100644 |
|
| 33 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 34 |
-@@ -205,6 +205,7 @@ |
|
| 35 |
- #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
|
| 36 |
- #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ |
|
| 37 |
- #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ |
|
| 38 |
-+#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */ |
|
| 39 |
- |
|
| 40 |
- /* Virtualization flags: Linux defined, word 8 */ |
|
| 41 |
- #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
|
| 42 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 43 |
-index 7b9eacf..3a1541c 100644 |
|
| 44 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 45 |
-@@ -244,6 +244,10 @@ enum ssb_mitigation {
|
|
| 46 |
- SPEC_STORE_BYPASS_DISABLE, |
|
| 47 |
- }; |
|
| 48 |
- |
|
| 49 |
-+/* AMD specific Speculative Store Bypass MSR data */ |
|
| 50 |
-+extern u64 x86_amd_ls_cfg_base; |
|
| 51 |
-+extern u64 x86_amd_ls_cfg_rds_mask; |
|
| 52 |
-+ |
|
| 53 |
- extern char __indirect_thunk_start[]; |
|
| 54 |
- extern char __indirect_thunk_end[]; |
|
| 55 |
- |
|
| 56 |
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
|
| 57 |
-index 747f8a2..7551d9ad 100644 |
|
| 58 |
-+++ b/arch/x86/kernel/cpu/amd.c |
|
| 59 |
-@@ -9,6 +9,7 @@ |
|
| 60 |
- #include <asm/processor.h> |
|
| 61 |
- #include <asm/apic.h> |
|
| 62 |
- #include <asm/cpu.h> |
|
| 63 |
-+#include <asm/nospec-branch.h> |
|
| 64 |
- #include <asm/smp.h> |
|
| 65 |
- #include <asm/pci-direct.h> |
|
| 66 |
- #include <asm/delay.h> |
|
| 67 |
-@@ -542,6 +543,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) |
|
| 68 |
- rdmsrl(MSR_FAM10H_NODE_ID, value); |
|
| 69 |
- nodes_per_socket = ((value >> 3) & 7) + 1; |
|
| 70 |
- } |
|
| 71 |
-+ |
|
| 72 |
-+ if (c->x86 >= 0x15 && c->x86 <= 0x17) {
|
|
| 73 |
-+ unsigned int bit; |
|
| 74 |
-+ |
|
| 75 |
-+ switch (c->x86) {
|
|
| 76 |
-+ case 0x15: bit = 54; break; |
|
| 77 |
-+ case 0x16: bit = 33; break; |
|
| 78 |
-+ case 0x17: bit = 10; break; |
|
| 79 |
-+ default: return; |
|
| 80 |
-+ } |
|
| 81 |
-+ /* |
|
| 82 |
-+ * Try to cache the base value so further operations can |
|
| 83 |
-+ * avoid RMW. If that faults, do not enable RDS. |
|
| 84 |
-+ */ |
|
| 85 |
-+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
|
| 86 |
-+ setup_force_cpu_cap(X86_FEATURE_RDS); |
|
| 87 |
-+ setup_force_cpu_cap(X86_FEATURE_AMD_RDS); |
|
| 88 |
-+ x86_amd_ls_cfg_rds_mask = 1ULL << bit; |
|
| 89 |
-+ } |
|
| 90 |
-+ } |
|
| 91 |
- } |
|
| 92 |
- |
|
| 93 |
- static void early_init_amd(struct cpuinfo_x86 *c) |
|
| 94 |
-@@ -827,6 +848,11 @@ static void init_amd(struct cpuinfo_x86 *c) |
|
| 95 |
- /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ |
|
| 96 |
- if (!cpu_has(c, X86_FEATURE_XENPV)) |
|
| 97 |
- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); |
|
| 98 |
-+ |
|
| 99 |
-+ if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
| 100 |
-+ set_cpu_cap(c, X86_FEATURE_RDS); |
|
| 101 |
-+ set_cpu_cap(c, X86_FEATURE_AMD_RDS); |
|
| 102 |
-+ } |
|
| 103 |
- } |
|
| 104 |
- |
|
| 105 |
- #ifdef CONFIG_X86_32 |
|
| 106 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 107 |
-index b92c469..b3696cc 100644 |
|
| 108 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 109 |
-@@ -40,6 +40,13 @@ static u64 __ro_after_init x86_spec_ctrl_base; |
|
| 110 |
- */ |
|
| 111 |
- static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; |
|
| 112 |
- |
|
| 113 |
-+/* |
|
| 114 |
-+ * AMD specific MSR info for Speculative Store Bypass control. |
|
| 115 |
-+ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu(). |
|
| 116 |
-+ */ |
|
| 117 |
-+u64 __ro_after_init x86_amd_ls_cfg_base; |
|
| 118 |
-+u64 __ro_after_init x86_amd_ls_cfg_rds_mask; |
|
| 119 |
-+ |
|
| 120 |
- void __init check_bugs(void) |
|
| 121 |
- {
|
|
| 122 |
- identify_boot_cpu(); |
|
| 123 |
-@@ -51,7 +58,8 @@ void __init check_bugs(void) |
|
| 124 |
- |
|
| 125 |
- /* |
|
| 126 |
- * Read the SPEC_CTRL MSR to account for reserved bits which may |
|
| 127 |
-- * have unknown values. |
|
| 128 |
-+ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD |
|
| 129 |
-+ * init code as it is not enumerated and depends on the family. |
|
| 130 |
- */ |
|
| 131 |
- if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 132 |
- rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 133 |
-@@ -153,6 +161,14 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) |
|
| 134 |
- } |
|
| 135 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); |
|
| 136 |
- |
|
| 137 |
-+static void x86_amd_rds_enable(void) |
|
| 138 |
-+{
|
|
| 139 |
-+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask; |
|
| 140 |
-+ |
|
| 141 |
-+ if (boot_cpu_has(X86_FEATURE_AMD_RDS)) |
|
| 142 |
-+ wrmsrl(MSR_AMD64_LS_CFG, msrval); |
|
| 143 |
-+} |
|
| 144 |
-+ |
|
| 145 |
- #ifdef RETPOLINE |
|
| 146 |
- static bool spectre_v2_bad_module; |
|
| 147 |
- |
|
| 148 |
-@@ -442,6 +458,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 149 |
- |
|
| 150 |
- switch (cmd) {
|
|
| 151 |
- case SPEC_STORE_BYPASS_CMD_AUTO: |
|
| 152 |
-+ /* |
|
| 153 |
-+ * AMD platforms by default don't need SSB mitigation. |
|
| 154 |
-+ */ |
|
| 155 |
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
|
| 156 |
-+ break; |
|
| 157 |
- case SPEC_STORE_BYPASS_CMD_ON: |
|
| 158 |
- mode = SPEC_STORE_BYPASS_DISABLE; |
|
| 159 |
- break; |
|
| 160 |
-@@ -468,6 +489,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 161 |
- x86_spec_ctrl_set(SPEC_CTRL_RDS); |
|
| 162 |
- break; |
|
| 163 |
- case X86_VENDOR_AMD: |
|
| 164 |
-+ x86_amd_rds_enable(); |
|
| 165 |
- break; |
|
| 166 |
- } |
|
| 167 |
- } |
|
| 168 |
-@@ -489,6 +511,9 @@ void x86_spec_ctrl_setup_ap(void) |
|
| 169 |
- {
|
|
| 170 |
- if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 171 |
- x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); |
|
| 172 |
-+ |
|
| 173 |
-+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) |
|
| 174 |
-+ x86_amd_rds_enable(); |
|
| 175 |
- } |
|
| 176 |
- |
|
| 177 |
- #ifdef CONFIG_SYSFS |
|
| 178 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 179 |
-index ab6b3ad..beb1da8 100644 |
|
| 180 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 181 |
-@@ -895,6 +895,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|
| 182 |
- { X86_VENDOR_CENTAUR, 5, },
|
|
| 183 |
- { X86_VENDOR_INTEL, 5, },
|
|
| 184 |
- { X86_VENDOR_NSC, 5, },
|
|
| 185 |
-+ { X86_VENDOR_AMD, 0x12, },
|
|
| 186 |
-+ { X86_VENDOR_AMD, 0x11, },
|
|
| 187 |
-+ { X86_VENDOR_AMD, 0x10, },
|
|
| 188 |
-+ { X86_VENDOR_AMD, 0xf, },
|
|
| 189 |
- { X86_VENDOR_ANY, 4, },
|
|
| 190 |
- {}
|
|
| 191 |
- }; |
|
| 192 |
-2.7.4 |
|
| 193 |
- |
| 194 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,120 +0,0 @@ |
| 1 |
-From d9234d3b1e19782a47e762987b7bda684c8d6b22 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 25 Apr 2018 22:04:25 -0400 |
|
| 4 |
-Subject: [PATCH 13/54] x86/KVM/VMX: Expose SPEC_CTRL Bit(2) to the guest |
|
| 5 |
-MIME-Version: 1.0 |
|
| 6 |
-Content-Type: text/plain; charset=UTF-8 |
|
| 7 |
-Content-Transfer-Encoding: 8bit |
|
| 8 |
- |
|
| 9 |
-commit da39556f66f5cfe8f9c989206974f1cb16ca5d7c upstream |
|
| 10 |
- |
|
| 11 |
-Expose the CPUID.7.EDX[31] bit to the guest, and also guard against various |
|
| 12 |
-combinations of SPEC_CTRL MSR values. |
|
| 13 |
- |
|
| 14 |
-The handling of the MSR (to take into account the host value of SPEC_CTRL |
|
| 15 |
-Bit(2)) is taken care of in patch: |
|
| 16 |
- |
|
| 17 |
- KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS |
|
| 18 |
- |
|
| 19 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 20 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 21 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 22 |
- |
|
| 23 |
-[dwmw2: Handle 4.9 guest CPUID differences, rename |
|
| 24 |
- guest_cpu_has_ibrs() → guest_cpu_has_spec_ctrl()] |
|
| 25 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 26 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 27 |
- arch/x86/kvm/cpuid.c | 2 +- |
|
| 28 |
- arch/x86/kvm/cpuid.h | 4 ++-- |
|
| 29 |
- arch/x86/kvm/svm.c | 4 ++-- |
|
| 30 |
- arch/x86/kvm/vmx.c | 6 +++--- |
|
| 31 |
- 4 files changed, 8 insertions(+), 8 deletions(-) |
|
| 32 |
- |
|
| 33 |
-diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c |
|
| 34 |
-index 93f924d..a9409f0 100644 |
|
| 35 |
-+++ b/arch/x86/kvm/cpuid.c |
|
| 36 |
-@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
|
| 37 |
- |
|
| 38 |
- /* cpuid 7.0.edx*/ |
|
| 39 |
- const u32 kvm_cpuid_7_0_edx_x86_features = |
|
| 40 |
-- F(SPEC_CTRL) | F(ARCH_CAPABILITIES); |
|
| 41 |
-+ F(SPEC_CTRL) | F(RDS) | F(ARCH_CAPABILITIES); |
|
| 42 |
- |
|
| 43 |
- /* all calls to cpuid_count() should be made on the same cpu */ |
|
| 44 |
- get_cpu(); |
|
| 45 |
-diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h |
|
| 46 |
-index d1beb71..24187d0 100644 |
|
| 47 |
-+++ b/arch/x86/kvm/cpuid.h |
|
| 48 |
-@@ -171,7 +171,7 @@ static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu) |
|
| 49 |
- return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL)); |
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
--static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu) |
|
| 53 |
-+static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu) |
|
| 54 |
- {
|
|
| 55 |
- struct kvm_cpuid_entry2 *best; |
|
| 56 |
- |
|
| 57 |
-@@ -179,7 +179,7 @@ static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu) |
|
| 58 |
- if (best && (best->ebx & bit(X86_FEATURE_IBRS))) |
|
| 59 |
- return true; |
|
| 60 |
- best = kvm_find_cpuid_entry(vcpu, 7, 0); |
|
| 61 |
-- return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL)); |
|
| 62 |
-+ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_RDS))); |
|
| 63 |
- } |
|
| 64 |
- |
|
| 65 |
- static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu) |
|
| 66 |
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
|
| 67 |
-index eeb8cd3..57b886b 100644 |
|
| 68 |
-+++ b/arch/x86/kvm/svm.c |
|
| 69 |
-@@ -3545,7 +3545,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
|
| 70 |
- break; |
|
| 71 |
- case MSR_IA32_SPEC_CTRL: |
|
| 72 |
- if (!msr_info->host_initiated && |
|
| 73 |
-- !guest_cpuid_has_ibrs(vcpu)) |
|
| 74 |
-+ !guest_cpuid_has_spec_ctrl(vcpu)) |
|
| 75 |
- return 1; |
|
| 76 |
- |
|
| 77 |
- msr_info->data = svm->spec_ctrl; |
|
| 78 |
-@@ -3643,7 +3643,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
|
| 79 |
- break; |
|
| 80 |
- case MSR_IA32_SPEC_CTRL: |
|
| 81 |
- if (!msr->host_initiated && |
|
| 82 |
-- !guest_cpuid_has_ibrs(vcpu)) |
|
| 83 |
-+ !guest_cpuid_has_spec_ctrl(vcpu)) |
|
| 84 |
- return 1; |
|
| 85 |
- |
|
| 86 |
- /* The STIBP bit doesn't fault even if it's not advertised */ |
|
| 87 |
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
|
| 88 |
-index 266db0b..67ed4e9 100644 |
|
| 89 |
-+++ b/arch/x86/kvm/vmx.c |
|
| 90 |
-@@ -3020,7 +3020,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
|
| 91 |
- break; |
|
| 92 |
- case MSR_IA32_SPEC_CTRL: |
|
| 93 |
- if (!msr_info->host_initiated && |
|
| 94 |
-- !guest_cpuid_has_ibrs(vcpu)) |
|
| 95 |
-+ !guest_cpuid_has_spec_ctrl(vcpu)) |
|
| 96 |
- return 1; |
|
| 97 |
- |
|
| 98 |
- msr_info->data = to_vmx(vcpu)->spec_ctrl; |
|
| 99 |
-@@ -3137,11 +3137,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
|
| 100 |
- break; |
|
| 101 |
- case MSR_IA32_SPEC_CTRL: |
|
| 102 |
- if (!msr_info->host_initiated && |
|
| 103 |
-- !guest_cpuid_has_ibrs(vcpu)) |
|
| 104 |
-+ !guest_cpuid_has_spec_ctrl(vcpu)) |
|
| 105 |
- return 1; |
|
| 106 |
- |
|
| 107 |
- /* The STIBP bit doesn't fault even if it's not advertised */ |
|
| 108 |
-- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) |
|
| 109 |
-+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS)) |
|
| 110 |
- return 1; |
|
| 111 |
- |
|
| 112 |
- vmx->spec_ctrl = data; |
|
| 113 |
-2.7.4 |
|
| 114 |
- |
| 115 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,141 +0,0 @@ |
| 1 |
-From 5e82739dc5290969a34d90066c15241425c4a748 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Sun, 29 Apr 2018 15:01:37 +0200 |
|
| 4 |
-Subject: [PATCH 14/54] x86/speculation: Create spec-ctrl.h to avoid include |
|
| 5 |
- hell |
|
| 6 |
- |
|
| 7 |
-commit 28a2775217b17208811fa43a9e96bd1fdf417b86 upstream |
|
| 8 |
- |
|
| 9 |
-Having everything in nospec-branch.h creates a hell of dependencies when |
|
| 10 |
-adding the prctl based switching mechanism. Move everything which is not |
|
| 11 |
-required in nospec-branch.h to spec-ctrl.h and fix up the includes in the |
|
| 12 |
-relevant files. |
|
| 13 |
- |
|
| 14 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 15 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 16 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 17 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 18 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 19 |
- arch/x86/include/asm/nospec-branch.h | 14 -------------- |
|
| 20 |
- arch/x86/include/asm/spec-ctrl.h | 21 +++++++++++++++++++++ |
|
| 21 |
- arch/x86/kernel/cpu/amd.c | 2 +- |
|
| 22 |
- arch/x86/kernel/cpu/bugs.c | 2 +- |
|
| 23 |
- arch/x86/kvm/svm.c | 2 +- |
|
| 24 |
- arch/x86/kvm/vmx.c | 2 +- |
|
| 25 |
- 6 files changed, 25 insertions(+), 18 deletions(-) |
|
| 26 |
- create mode 100644 arch/x86/include/asm/spec-ctrl.h |
|
| 27 |
- |
|
| 28 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 29 |
-index 3a1541c..1119f14 100644 |
|
| 30 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 31 |
-@@ -228,26 +228,12 @@ enum spectre_v2_mitigation {
|
|
| 32 |
- extern void x86_spec_ctrl_set(u64); |
|
| 33 |
- extern u64 x86_spec_ctrl_get_default(void); |
|
| 34 |
- |
|
| 35 |
--/* |
|
| 36 |
-- * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR |
|
| 37 |
-- * the guest has, while on VMEXIT we restore the host view. This |
|
| 38 |
-- * would be easier if SPEC_CTRL were architecturally maskable or |
|
| 39 |
-- * shadowable for guests but this is not (currently) the case. |
|
| 40 |
-- * Takes the guest view of SPEC_CTRL MSR as a parameter. |
|
| 41 |
-- */ |
|
| 42 |
--extern void x86_spec_ctrl_set_guest(u64); |
|
| 43 |
--extern void x86_spec_ctrl_restore_host(u64); |
|
| 44 |
-- |
|
| 45 |
- /* The Speculative Store Bypass disable variants */ |
|
| 46 |
- enum ssb_mitigation {
|
|
| 47 |
- SPEC_STORE_BYPASS_NONE, |
|
| 48 |
- SPEC_STORE_BYPASS_DISABLE, |
|
| 49 |
- }; |
|
| 50 |
- |
|
| 51 |
--/* AMD specific Speculative Store Bypass MSR data */ |
|
| 52 |
--extern u64 x86_amd_ls_cfg_base; |
|
| 53 |
--extern u64 x86_amd_ls_cfg_rds_mask; |
|
| 54 |
-- |
|
| 55 |
- extern char __indirect_thunk_start[]; |
|
| 56 |
- extern char __indirect_thunk_end[]; |
|
| 57 |
- |
|
| 58 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 59 |
-new file mode 100644 |
|
| 60 |
-index 0000000..3ad6442 |
|
| 61 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 62 |
-@@ -0,0 +1,21 @@ |
|
| 63 |
-+/* SPDX-License-Identifier: GPL-2.0 */ |
|
| 64 |
-+#ifndef _ASM_X86_SPECCTRL_H_ |
|
| 65 |
-+#define _ASM_X86_SPECCTRL_H_ |
|
| 66 |
-+ |
|
| 67 |
-+#include <asm/nospec-branch.h> |
|
| 68 |
-+ |
|
| 69 |
-+/* |
|
| 70 |
-+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR |
|
| 71 |
-+ * the guest has, while on VMEXIT we restore the host view. This |
|
| 72 |
-+ * would be easier if SPEC_CTRL were architecturally maskable or |
|
| 73 |
-+ * shadowable for guests but this is not (currently) the case. |
|
| 74 |
-+ * Takes the guest view of SPEC_CTRL MSR as a parameter. |
|
| 75 |
-+ */ |
|
| 76 |
-+extern void x86_spec_ctrl_set_guest(u64); |
|
| 77 |
-+extern void x86_spec_ctrl_restore_host(u64); |
|
| 78 |
-+ |
|
| 79 |
-+/* AMD specific Speculative Store Bypass MSR data */ |
|
| 80 |
-+extern u64 x86_amd_ls_cfg_base; |
|
| 81 |
-+extern u64 x86_amd_ls_cfg_rds_mask; |
|
| 82 |
-+ |
|
| 83 |
-+#endif |
|
| 84 |
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
|
| 85 |
-index 7551d9ad..a176c81 100644 |
|
| 86 |
-+++ b/arch/x86/kernel/cpu/amd.c |
|
| 87 |
-@@ -9,7 +9,7 @@ |
|
| 88 |
- #include <asm/processor.h> |
|
| 89 |
- #include <asm/apic.h> |
|
| 90 |
- #include <asm/cpu.h> |
|
| 91 |
--#include <asm/nospec-branch.h> |
|
| 92 |
-+#include <asm/spec-ctrl.h> |
|
| 93 |
- #include <asm/smp.h> |
|
| 94 |
- #include <asm/pci-direct.h> |
|
| 95 |
- #include <asm/delay.h> |
|
| 96 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 97 |
-index b3696cc..46d01fd 100644 |
|
| 98 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 99 |
-@@ -12,7 +12,7 @@ |
|
| 100 |
- #include <linux/cpu.h> |
|
| 101 |
- #include <linux/module.h> |
|
| 102 |
- |
|
| 103 |
--#include <asm/nospec-branch.h> |
|
| 104 |
-+#include <asm/spec-ctrl.h> |
|
| 105 |
- #include <asm/cmdline.h> |
|
| 106 |
- #include <asm/bugs.h> |
|
| 107 |
- #include <asm/processor.h> |
|
| 108 |
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
|
| 109 |
-index 57b886b..516ddff 100644 |
|
| 110 |
-+++ b/arch/x86/kvm/svm.c |
|
| 111 |
-@@ -45,7 +45,7 @@ |
|
| 112 |
- #include <asm/kvm_para.h> |
|
| 113 |
- #include <asm/irq_remapping.h> |
|
| 114 |
- #include <asm/microcode.h> |
|
| 115 |
--#include <asm/nospec-branch.h> |
|
| 116 |
-+#include <asm/spec-ctrl.h> |
|
| 117 |
- |
|
| 118 |
- #include <asm/virtext.h> |
|
| 119 |
- #include "trace.h" |
|
| 120 |
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
|
| 121 |
-index 67ed4e9..0eb3863 100644 |
|
| 122 |
-+++ b/arch/x86/kvm/vmx.c |
|
| 123 |
-@@ -50,7 +50,7 @@ |
|
| 124 |
- #include <asm/apic.h> |
|
| 125 |
- #include <asm/irq_remapping.h> |
|
| 126 |
- #include <asm/microcode.h> |
|
| 127 |
--#include <asm/nospec-branch.h> |
|
| 128 |
-+#include <asm/spec-ctrl.h> |
|
| 129 |
- |
|
| 130 |
- #include "trace.h" |
|
| 131 |
- #include "pmu.h" |
|
| 132 |
-2.7.4 |
|
| 133 |
- |
| 134 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,239 +0,0 @@ |
| 1 |
-From 259d613d145206198100baf407e1f41c0e1edf15 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Sun, 29 Apr 2018 15:20:11 +0200 |
|
| 4 |
-Subject: [PATCH 15/54] prctl: Add speculation control prctls |
|
| 5 |
- |
|
| 6 |
-commit b617cfc858161140d69cc0b5cc211996b557a1c7 upstream |
|
| 7 |
- |
|
| 8 |
-Add two new prctls to control aspects of speculation related vulnerabilites |
|
| 9 |
-and their mitigations to provide finer grained control over performance |
|
| 10 |
-impacting mitigations. |
|
| 11 |
- |
|
| 12 |
-PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature |
|
| 13 |
-which is selected with arg2 of prctl(2). The return value uses bit 0-2 with |
|
| 14 |
-the following meaning: |
|
| 15 |
- |
|
| 16 |
-Bit Define Description |
|
| 17 |
-0 PR_SPEC_PRCTL Mitigation can be controlled per task by |
|
| 18 |
- PR_SET_SPECULATION_CTRL |
|
| 19 |
-1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is |
|
| 20 |
- disabled |
|
| 21 |
-2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is |
|
| 22 |
- enabled |
|
| 23 |
- |
|
| 24 |
-If all bits are 0 the CPU is not affected by the speculation misfeature. |
|
| 25 |
- |
|
| 26 |
-If PR_SPEC_PRCTL is set, then the per task control of the mitigation is |
|
| 27 |
-available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation |
|
| 28 |
-misfeature will fail. |
|
| 29 |
- |
|
| 30 |
-PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which |
|
| 31 |
-is selected by arg2 of prctl(2) per task. arg3 is used to hand in the |
|
| 32 |
-control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE. |
|
| 33 |
- |
|
| 34 |
-The common return values are: |
|
| 35 |
- |
|
| 36 |
-EINVAL prctl is not implemented by the architecture or the unused prctl() |
|
| 37 |
- arguments are not 0 |
|
| 38 |
-ENODEV arg2 is selecting a not supported speculation misfeature |
|
| 39 |
- |
|
| 40 |
-PR_SET_SPECULATION_CTRL has these additional return values: |
|
| 41 |
- |
|
| 42 |
-ERANGE arg3 is incorrect, i.e. it's not either PR_SPEC_ENABLE or PR_SPEC_DISABLE |
|
| 43 |
-ENXIO prctl control of the selected speculation misfeature is disabled |
|
| 44 |
- |
|
| 45 |
-The first supported controlable speculation misfeature is |
|
| 46 |
-PR_SPEC_STORE_BYPASS. Add the define so this can be shared between |
|
| 47 |
-architectures. |
|
| 48 |
- |
|
| 49 |
-Based on an initial patch from Tim Chen and mostly rewritten. |
|
| 50 |
- |
|
| 51 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 52 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 53 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 54 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 55 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 56 |
- Documentation/spec_ctrl.txt | 86 +++++++++++++++++++++++++++++++++++++++++++++ |
|
| 57 |
- include/linux/nospec.h | 5 +++ |
|
| 58 |
- include/uapi/linux/prctl.h | 11 ++++++ |
|
| 59 |
- kernel/sys.c | 22 ++++++++++++ |
|
| 60 |
- 4 files changed, 124 insertions(+) |
|
| 61 |
- create mode 100644 Documentation/spec_ctrl.txt |
|
| 62 |
- |
|
| 63 |
-diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt |
|
| 64 |
-new file mode 100644 |
|
| 65 |
-index 0000000..ddbebcd |
|
| 66 |
-+++ b/Documentation/spec_ctrl.txt |
|
| 67 |
-@@ -0,0 +1,86 @@ |
|
| 68 |
-+=================== |
|
| 69 |
-+Speculation Control |
|
| 70 |
-+=================== |
|
| 71 |
-+ |
|
| 72 |
-+Quite some CPUs have speculation related misfeatures which are in fact |
|
| 73 |
-+vulnerabilites causing data leaks in various forms even accross privilege |
|
| 74 |
-+domains. |
|
| 75 |
-+ |
|
| 76 |
-+The kernel provides mitigation for such vulnerabilities in various |
|
| 77 |
-+forms. Some of these mitigations are compile time configurable and some on |
|
| 78 |
-+the kernel command line. |
|
| 79 |
-+ |
|
| 80 |
-+There is also a class of mitigations which are very expensive, but they can |
|
| 81 |
-+be restricted to a certain set of processes or tasks in controlled |
|
| 82 |
-+environments. The mechanism to control these mitigations is via |
|
| 83 |
-+:manpage:`prctl(2)`. |
|
| 84 |
-+ |
|
| 85 |
-+There are two prctl options which are related to this: |
|
| 86 |
-+ |
|
| 87 |
-+ * PR_GET_SPECULATION_CTRL |
|
| 88 |
-+ |
|
| 89 |
-+ * PR_SET_SPECULATION_CTRL |
|
| 90 |
-+ |
|
| 91 |
-+PR_GET_SPECULATION_CTRL |
|
| 92 |
-+----------------------- |
|
| 93 |
-+ |
|
| 94 |
-+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature |
|
| 95 |
-+which is selected with arg2 of prctl(2). The return value uses bits 0-2 with |
|
| 96 |
-+the following meaning: |
|
| 97 |
-+ |
|
| 98 |
-+==== ================ =================================================== |
|
| 99 |
-+Bit Define Description |
|
| 100 |
-+==== ================ =================================================== |
|
| 101 |
-+0 PR_SPEC_PRCTL Mitigation can be controlled per task by |
|
| 102 |
-+ PR_SET_SPECULATION_CTRL |
|
| 103 |
-+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is |
|
| 104 |
-+ disabled |
|
| 105 |
-+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is |
|
| 106 |
-+ enabled |
|
| 107 |
-+==== ================ =================================================== |
|
| 108 |
-+ |
|
| 109 |
-+If all bits are 0 the CPU is not affected by the speculation misfeature. |
|
| 110 |
-+ |
|
| 111 |
-+If PR_SPEC_PRCTL is set, then the per task control of the mitigation is |
|
| 112 |
-+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation |
|
| 113 |
-+misfeature will fail. |
|
| 114 |
-+ |
|
| 115 |
-+PR_SET_SPECULATION_CTRL |
|
| 116 |
-+----------------------- |
|
| 117 |
-+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which |
|
| 118 |
-+is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand |
|
| 119 |
-+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE. |
|
| 120 |
-+ |
|
| 121 |
-+Common error codes |
|
| 122 |
-+------------------ |
|
| 123 |
-+======= ================================================================= |
|
| 124 |
-+Value Meaning |
|
| 125 |
-+======= ================================================================= |
|
| 126 |
-+EINVAL The prctl is not implemented by the architecture or unused |
|
| 127 |
-+ prctl(2) arguments are not 0 |
|
| 128 |
-+ |
|
| 129 |
-+ENODEV arg2 is selecting a not supported speculation misfeature |
|
| 130 |
-+======= ================================================================= |
|
| 131 |
-+ |
|
| 132 |
-+PR_SET_SPECULATION_CTRL error codes |
|
| 133 |
-+----------------------------------- |
|
| 134 |
-+======= ================================================================= |
|
| 135 |
-+Value Meaning |
|
| 136 |
-+======= ================================================================= |
|
| 137 |
-+0 Success |
|
| 138 |
-+ |
|
| 139 |
-+ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor |
|
| 140 |
-+ PR_SPEC_DISABLE |
|
| 141 |
-+ |
|
| 142 |
-+ENXIO Control of the selected speculation misfeature is not possible. |
|
| 143 |
-+ See PR_GET_SPECULATION_CTRL. |
|
| 144 |
-+======= ================================================================= |
|
| 145 |
-+ |
|
| 146 |
-+Speculation misfeature controls |
|
| 147 |
-+------------------------------- |
|
| 148 |
-+- PR_SPEC_STORE_BYPASS: Speculative Store Bypass |
|
| 149 |
-+ |
|
| 150 |
-+ Invocations: |
|
| 151 |
-+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0); |
|
| 152 |
-+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); |
|
| 153 |
-+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); |
|
| 154 |
-diff --git a/include/linux/nospec.h b/include/linux/nospec.h |
|
| 155 |
-index e791ebc..700bb8a 100644 |
|
| 156 |
-+++ b/include/linux/nospec.h |
|
| 157 |
-@@ -55,4 +55,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, |
|
| 158 |
- \ |
|
| 159 |
- (typeof(_i)) (_i & _mask); \ |
|
| 160 |
- }) |
|
| 161 |
-+ |
|
| 162 |
-+/* Speculation control prctl */ |
|
| 163 |
-+int arch_prctl_spec_ctrl_get(unsigned long which); |
|
| 164 |
-+int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl); |
|
| 165 |
-+ |
|
| 166 |
- #endif /* _LINUX_NOSPEC_H */ |
|
| 167 |
-diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h |
|
| 168 |
-index a8d0759..3b316be 100644 |
|
| 169 |
-+++ b/include/uapi/linux/prctl.h |
|
| 170 |
-@@ -197,4 +197,15 @@ struct prctl_mm_map {
|
|
| 171 |
- # define PR_CAP_AMBIENT_LOWER 3 |
|
| 172 |
- # define PR_CAP_AMBIENT_CLEAR_ALL 4 |
|
| 173 |
- |
|
| 174 |
-+/* Per task speculation control */ |
|
| 175 |
-+#define PR_GET_SPECULATION_CTRL 52 |
|
| 176 |
-+#define PR_SET_SPECULATION_CTRL 53 |
|
| 177 |
-+/* Speculation control variants */ |
|
| 178 |
-+# define PR_SPEC_STORE_BYPASS 0 |
|
| 179 |
-+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ |
|
| 180 |
-+# define PR_SPEC_NOT_AFFECTED 0 |
|
| 181 |
-+# define PR_SPEC_PRCTL (1UL << 0) |
|
| 182 |
-+# define PR_SPEC_ENABLE (1UL << 1) |
|
| 183 |
-+# define PR_SPEC_DISABLE (1UL << 2) |
|
| 184 |
-+ |
|
| 185 |
- #endif /* _LINUX_PRCTL_H */ |
|
| 186 |
-diff --git a/kernel/sys.c b/kernel/sys.c |
|
| 187 |
-index 89d5be4..312c985 100644 |
|
| 188 |
-+++ b/kernel/sys.c |
|
| 189 |
-@@ -53,6 +53,8 @@ |
|
| 190 |
- #include <linux/uidgid.h> |
|
| 191 |
- #include <linux/cred.h> |
|
| 192 |
- |
|
| 193 |
-+#include <linux/nospec.h> |
|
| 194 |
-+ |
|
| 195 |
- #include <linux/kmsg_dump.h> |
|
| 196 |
- /* Move somewhere else to avoid recompiling? */ |
|
| 197 |
- #include <generated/utsrelease.h> |
|
| 198 |
-@@ -2072,6 +2074,16 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) |
|
| 199 |
- } |
|
| 200 |
- #endif |
|
| 201 |
- |
|
| 202 |
-+int __weak arch_prctl_spec_ctrl_get(unsigned long which) |
|
| 203 |
-+{
|
|
| 204 |
-+ return -EINVAL; |
|
| 205 |
-+} |
|
| 206 |
-+ |
|
| 207 |
-+int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl) |
|
| 208 |
-+{
|
|
| 209 |
-+ return -EINVAL; |
|
| 210 |
-+} |
|
| 211 |
-+ |
|
| 212 |
- SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
|
| 213 |
- unsigned long, arg4, unsigned long, arg5) |
|
| 214 |
- {
|
|
| 215 |
-@@ -2270,6 +2282,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
|
| 216 |
- case PR_GET_FP_MODE: |
|
| 217 |
- error = GET_FP_MODE(me); |
|
| 218 |
- break; |
|
| 219 |
-+ case PR_GET_SPECULATION_CTRL: |
|
| 220 |
-+ if (arg3 || arg4 || arg5) |
|
| 221 |
-+ return -EINVAL; |
|
| 222 |
-+ error = arch_prctl_spec_ctrl_get(arg2); |
|
| 223 |
-+ break; |
|
| 224 |
-+ case PR_SET_SPECULATION_CTRL: |
|
| 225 |
-+ if (arg4 || arg5) |
|
| 226 |
-+ return -EINVAL; |
|
| 227 |
-+ error = arch_prctl_spec_ctrl_set(arg2, arg3); |
|
| 228 |
-+ break; |
|
| 229 |
- default: |
|
| 230 |
- error = -EINVAL; |
|
| 231 |
- break; |
|
| 232 |
-2.7.4 |
|
| 233 |
- |
| 234 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,125 +0,0 @@ |
| 1 |
-From 0a927d08b636a68b1eaa44edf4ebc1b12e7ca86f Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kyle Huey <me@kylehuey.com> |
|
| 3 |
-Date: Tue, 14 Feb 2017 00:11:02 -0800 |
|
| 4 |
-Subject: [PATCH 16/54] x86/process: Optimize TIF checks in __switch_to_xtra() |
|
| 5 |
- |
|
| 6 |
-commit af8b3cd3934ec60f4c2a420d19a9d416554f140b upstream |
|
| 7 |
- |
|
| 8 |
-Help the compiler to avoid reevaluating the thread flags for each checked |
|
| 9 |
-bit by reordering the bit checks and providing an explicit xor for |
|
| 10 |
-evaluation. |
|
| 11 |
- |
|
| 12 |
-With default defconfigs for each arch, |
|
| 13 |
- |
|
| 14 |
-x86_64: arch/x86/kernel/process.o |
|
| 15 |
-text data bss dec hex |
|
| 16 |
-3056 8577 16 11649 2d81 Before |
|
| 17 |
-3024 8577 16 11617 2d61 After |
|
| 18 |
- |
|
| 19 |
-i386: arch/x86/kernel/process.o |
|
| 20 |
-text data bss dec hex |
|
| 21 |
-2957 8673 8 11638 2d76 Before |
|
| 22 |
-2925 8673 8 11606 2d56 After |
|
| 23 |
- |
|
| 24 |
-Originally-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 25 |
-Signed-off-by: Kyle Huey <khuey@kylehuey.com> |
|
| 26 |
-Cc: Peter Zijlstra <peterz@infradead.org> |
|
| 27 |
-Cc: Andy Lutomirski <luto@kernel.org> |
|
| 28 |
-Link: http://lkml.kernel.org/r/20170214081104.9244-2-khuey@kylehuey.com |
|
| 29 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 30 |
- |
|
| 31 |
-[dwmw2: backported to make TIF_RDS handling simpler. |
|
| 32 |
- No deferred TR reload.] |
|
| 33 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 34 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 35 |
- arch/x86/kernel/process.c | 54 +++++++++++++++++++++++++++-------------------- |
|
| 36 |
- 1 file changed, 31 insertions(+), 23 deletions(-) |
|
| 37 |
- |
|
| 38 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 39 |
-index a55b320..0e1999e 100644 |
|
| 40 |
-+++ b/arch/x86/kernel/process.c |
|
| 41 |
-@@ -192,48 +192,56 @@ int set_tsc_mode(unsigned int val) |
|
| 42 |
- return 0; |
|
| 43 |
- } |
|
| 44 |
- |
|
| 45 |
-+static inline void switch_to_bitmap(struct tss_struct *tss, |
|
| 46 |
-+ struct thread_struct *prev, |
|
| 47 |
-+ struct thread_struct *next, |
|
| 48 |
-+ unsigned long tifp, unsigned long tifn) |
|
| 49 |
-+{
|
|
| 50 |
-+ if (tifn & _TIF_IO_BITMAP) {
|
|
| 51 |
-+ /* |
|
| 52 |
-+ * Copy the relevant range of the IO bitmap. |
|
| 53 |
-+ * Normally this is 128 bytes or less: |
|
| 54 |
-+ */ |
|
| 55 |
-+ memcpy(tss->io_bitmap, next->io_bitmap_ptr, |
|
| 56 |
-+ max(prev->io_bitmap_max, next->io_bitmap_max)); |
|
| 57 |
-+ } else if (tifp & _TIF_IO_BITMAP) {
|
|
| 58 |
-+ /* |
|
| 59 |
-+ * Clear any possible leftover bits: |
|
| 60 |
-+ */ |
|
| 61 |
-+ memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
|
| 62 |
-+ } |
|
| 63 |
-+} |
|
| 64 |
-+ |
|
| 65 |
- void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
|
| 66 |
- struct tss_struct *tss) |
|
| 67 |
- {
|
|
| 68 |
- struct thread_struct *prev, *next; |
|
| 69 |
-+ unsigned long tifp, tifn; |
|
| 70 |
- |
|
| 71 |
- prev = &prev_p->thread; |
|
| 72 |
- next = &next_p->thread; |
|
| 73 |
- |
|
| 74 |
-- if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ |
|
| 75 |
-- test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
|
|
| 76 |
-+ tifn = READ_ONCE(task_thread_info(next_p)->flags); |
|
| 77 |
-+ tifp = READ_ONCE(task_thread_info(prev_p)->flags); |
|
| 78 |
-+ switch_to_bitmap(tss, prev, next, tifp, tifn); |
|
| 79 |
-+ |
|
| 80 |
-+ propagate_user_return_notify(prev_p, next_p); |
|
| 81 |
-+ |
|
| 82 |
-+ if ((tifp ^ tifn) & _TIF_BLOCKSTEP) {
|
|
| 83 |
- unsigned long debugctl = get_debugctlmsr(); |
|
| 84 |
- |
|
| 85 |
- debugctl &= ~DEBUGCTLMSR_BTF; |
|
| 86 |
-- if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) |
|
| 87 |
-+ if (tifn & _TIF_BLOCKSTEP) |
|
| 88 |
- debugctl |= DEBUGCTLMSR_BTF; |
|
| 89 |
-- |
|
| 90 |
- update_debugctlmsr(debugctl); |
|
| 91 |
- } |
|
| 92 |
- |
|
| 93 |
-- if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ |
|
| 94 |
-- test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
|
| 95 |
-- /* prev and next are different */ |
|
| 96 |
-- if (test_tsk_thread_flag(next_p, TIF_NOTSC)) |
|
| 97 |
-+ if ((tifp ^ tifn) & _TIF_NOTSC) {
|
|
| 98 |
-+ if (tifn & _TIF_NOTSC) |
|
| 99 |
- hard_disable_TSC(); |
|
| 100 |
- else |
|
| 101 |
- hard_enable_TSC(); |
|
| 102 |
- } |
|
| 103 |
-- |
|
| 104 |
-- if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
|
|
| 105 |
-- /* |
|
| 106 |
-- * Copy the relevant range of the IO bitmap. |
|
| 107 |
-- * Normally this is 128 bytes or less: |
|
| 108 |
-- */ |
|
| 109 |
-- memcpy(tss->io_bitmap, next->io_bitmap_ptr, |
|
| 110 |
-- max(prev->io_bitmap_max, next->io_bitmap_max)); |
|
| 111 |
-- } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
|
|
| 112 |
-- /* |
|
| 113 |
-- * Clear any possible leftover bits: |
|
| 114 |
-- */ |
|
| 115 |
-- memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
|
| 116 |
-- } |
|
| 117 |
-- propagate_user_return_notify(prev_p, next_p); |
|
| 118 |
- } |
|
| 119 |
- |
|
| 120 |
- /* |
|
| 121 |
-2.7.4 |
|
| 122 |
- |
| 123 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,84 +0,0 @@ |
| 1 |
-From bb8a828adfbc5120a1beec01403e672f36cac5bf Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kyle Huey <me@kylehuey.com> |
|
| 3 |
-Date: Tue, 14 Feb 2017 00:11:03 -0800 |
|
| 4 |
-Subject: [PATCH 17/54] x86/process: Correct and optimize TIF_BLOCKSTEP switch |
|
| 5 |
- |
|
| 6 |
-commit b9894a2f5bd18b1691cb6872c9afe32b148d0132 upstream |
|
| 7 |
- |
|
| 8 |
-The debug control MSR is "highly magical" as the blockstep bit can be |
|
| 9 |
-cleared by hardware under not well documented circumstances. |
|
| 10 |
- |
|
| 11 |
-So a task switch relying on the bit set by the previous task (according to |
|
| 12 |
-the previous tasks thread flags) can trip over this and not update the flag |
|
| 13 |
-for the next task. |
|
| 14 |
- |
|
| 15 |
-To fix this its required to handle DEBUGCTLMSR_BTF when either the previous |
|
| 16 |
-or the next or both tasks have the TIF_BLOCKSTEP flag set. |
|
| 17 |
- |
|
| 18 |
-While at it avoid branching within the TIF_BLOCKSTEP case and evaluating |
|
| 19 |
-boot_cpu_data twice in kernels without CONFIG_X86_DEBUGCTLMSR. |
|
| 20 |
- |
|
| 21 |
-x86_64: arch/x86/kernel/process.o |
|
| 22 |
-text data bss dec hex |
|
| 23 |
-3024 8577 16 11617 2d61 Before |
|
| 24 |
-3008 8577 16 11601 2d51 After |
|
| 25 |
- |
|
| 26 |
-i386: No change |
|
| 27 |
- |
|
| 28 |
-[ tglx: Made the shift value explicit, use a local variable to make the |
|
| 29 |
-code readable and massaged changelog] |
|
| 30 |
- |
|
| 31 |
-Originally-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 32 |
-Signed-off-by: Kyle Huey <khuey@kylehuey.com> |
|
| 33 |
-Cc: Peter Zijlstra <peterz@infradead.org> |
|
| 34 |
-Cc: Andy Lutomirski <luto@kernel.org> |
|
| 35 |
-Link: http://lkml.kernel.org/r/20170214081104.9244-3-khuey@kylehuey.com |
|
| 36 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 37 |
- |
|
| 38 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 39 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 40 |
- arch/x86/include/asm/msr-index.h | 1 + |
|
| 41 |
- arch/x86/kernel/process.c | 12 +++++++----- |
|
| 42 |
- 2 files changed, 8 insertions(+), 5 deletions(-) |
|
| 43 |
- |
|
| 44 |
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
|
| 45 |
-index 87103e8..076e868 100644 |
|
| 46 |
-+++ b/arch/x86/include/asm/msr-index.h |
|
| 47 |
-@@ -141,6 +141,7 @@ |
|
| 48 |
- |
|
| 49 |
- /* DEBUGCTLMSR bits (others vary by model): */ |
|
| 50 |
- #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ |
|
| 51 |
-+#define DEBUGCTLMSR_BTF_SHIFT 1 |
|
| 52 |
- #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ |
|
| 53 |
- #define DEBUGCTLMSR_TR (1UL << 6) |
|
| 54 |
- #define DEBUGCTLMSR_BTS (1UL << 7) |
|
| 55 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 56 |
-index 0e1999e..496eef6 100644 |
|
| 57 |
-+++ b/arch/x86/kernel/process.c |
|
| 58 |
-@@ -227,13 +227,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
|
| 59 |
- |
|
| 60 |
- propagate_user_return_notify(prev_p, next_p); |
|
| 61 |
- |
|
| 62 |
-- if ((tifp ^ tifn) & _TIF_BLOCKSTEP) {
|
|
| 63 |
-- unsigned long debugctl = get_debugctlmsr(); |
|
| 64 |
-+ if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) && |
|
| 65 |
-+ arch_has_block_step()) {
|
|
| 66 |
-+ unsigned long debugctl, msk; |
|
| 67 |
- |
|
| 68 |
-+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
|
| 69 |
- debugctl &= ~DEBUGCTLMSR_BTF; |
|
| 70 |
-- if (tifn & _TIF_BLOCKSTEP) |
|
| 71 |
-- debugctl |= DEBUGCTLMSR_BTF; |
|
| 72 |
-- update_debugctlmsr(debugctl); |
|
| 73 |
-+ msk = tifn & _TIF_BLOCKSTEP; |
|
| 74 |
-+ debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT; |
|
| 75 |
-+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
|
| 76 |
- } |
|
| 77 |
- |
|
| 78 |
- if ((tifp ^ tifn) & _TIF_NOTSC) {
|
|
| 79 |
-2.7.4 |
|
| 80 |
- |
| 81 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,112 +0,0 @@ |
| 1 |
-From ae00cf150c8b8d9e0ede34bc70499eab625f9e70 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Tue, 14 Feb 2017 00:11:04 -0800 |
|
| 4 |
-Subject: [PATCH 18/54] x86/process: Optimize TIF_NOTSC switch |
|
| 5 |
- |
|
| 6 |
-commit 5a920155e388ec22a22e0532fb695b9215c9b34d upstream |
|
| 7 |
- |
|
| 8 |
-Provide and use a toggle helper instead of doing it with a branch. |
|
| 9 |
- |
|
| 10 |
-x86_64: arch/x86/kernel/process.o |
|
| 11 |
-text data bss dec hex |
|
| 12 |
-3008 8577 16 11601 2d51 Before |
|
| 13 |
-2976 8577 16 11569 2d31 After |
|
| 14 |
- |
|
| 15 |
-i386: arch/x86/kernel/process.o |
|
| 16 |
-text data bss dec hex |
|
| 17 |
-2925 8673 8 11606 2d56 Before |
|
| 18 |
-2893 8673 8 11574 2d36 After |
|
| 19 |
- |
|
| 20 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 21 |
-Cc: Peter Zijlstra <peterz@infradead.org> |
|
| 22 |
-Cc: Andy Lutomirski <luto@kernel.org> |
|
| 23 |
-Link: http://lkml.kernel.org/r/20170214081104.9244-4-khuey@kylehuey.com |
|
| 24 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 25 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 26 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 27 |
- arch/x86/include/asm/tlbflush.h | 10 ++++++++++ |
|
| 28 |
- arch/x86/kernel/process.c | 22 ++++------------------ |
|
| 29 |
- 2 files changed, 14 insertions(+), 18 deletions(-) |
|
| 30 |
- |
|
| 31 |
-diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h |
|
| 32 |
-index 99185a0..686a58d 100644 |
|
| 33 |
-+++ b/arch/x86/include/asm/tlbflush.h |
|
| 34 |
-@@ -111,6 +111,16 @@ static inline void cr4_clear_bits(unsigned long mask) |
|
| 35 |
- } |
|
| 36 |
- } |
|
| 37 |
- |
|
| 38 |
-+static inline void cr4_toggle_bits(unsigned long mask) |
|
| 39 |
-+{
|
|
| 40 |
-+ unsigned long cr4; |
|
| 41 |
-+ |
|
| 42 |
-+ cr4 = this_cpu_read(cpu_tlbstate.cr4); |
|
| 43 |
-+ cr4 ^= mask; |
|
| 44 |
-+ this_cpu_write(cpu_tlbstate.cr4, cr4); |
|
| 45 |
-+ __write_cr4(cr4); |
|
| 46 |
-+} |
|
| 47 |
-+ |
|
| 48 |
- /* Read the CR4 shadow. */ |
|
| 49 |
- static inline unsigned long cr4_read_shadow(void) |
|
| 50 |
- {
|
|
| 51 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 52 |
-index 496eef6..b7e3822 100644 |
|
| 53 |
-+++ b/arch/x86/kernel/process.c |
|
| 54 |
-@@ -134,11 +134,6 @@ void flush_thread(void) |
|
| 55 |
- fpu__clear(&tsk->thread.fpu); |
|
| 56 |
- } |
|
| 57 |
- |
|
| 58 |
--static void hard_disable_TSC(void) |
|
| 59 |
--{
|
|
| 60 |
-- cr4_set_bits(X86_CR4_TSD); |
|
| 61 |
--} |
|
| 62 |
-- |
|
| 63 |
- void disable_TSC(void) |
|
| 64 |
- {
|
|
| 65 |
- preempt_disable(); |
|
| 66 |
-@@ -147,15 +142,10 @@ void disable_TSC(void) |
|
| 67 |
- * Must flip the CPU state synchronously with |
|
| 68 |
- * TIF_NOTSC in the current running context. |
|
| 69 |
- */ |
|
| 70 |
-- hard_disable_TSC(); |
|
| 71 |
-+ cr4_set_bits(X86_CR4_TSD); |
|
| 72 |
- preempt_enable(); |
|
| 73 |
- } |
|
| 74 |
- |
|
| 75 |
--static void hard_enable_TSC(void) |
|
| 76 |
--{
|
|
| 77 |
-- cr4_clear_bits(X86_CR4_TSD); |
|
| 78 |
--} |
|
| 79 |
-- |
|
| 80 |
- static void enable_TSC(void) |
|
| 81 |
- {
|
|
| 82 |
- preempt_disable(); |
|
| 83 |
-@@ -164,7 +154,7 @@ static void enable_TSC(void) |
|
| 84 |
- * Must flip the CPU state synchronously with |
|
| 85 |
- * TIF_NOTSC in the current running context. |
|
| 86 |
- */ |
|
| 87 |
-- hard_enable_TSC(); |
|
| 88 |
-+ cr4_clear_bits(X86_CR4_TSD); |
|
| 89 |
- preempt_enable(); |
|
| 90 |
- } |
|
| 91 |
- |
|
| 92 |
-@@ -238,12 +228,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
|
| 93 |
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
|
| 94 |
- } |
|
| 95 |
- |
|
| 96 |
-- if ((tifp ^ tifn) & _TIF_NOTSC) {
|
|
| 97 |
-- if (tifn & _TIF_NOTSC) |
|
| 98 |
-- hard_disable_TSC(); |
|
| 99 |
-- else |
|
| 100 |
-- hard_enable_TSC(); |
|
| 101 |
-- } |
|
| 102 |
-+ if ((tifp ^ tifn) & _TIF_NOTSC) |
|
| 103 |
-+ cr4_toggle_bits(X86_CR4_TSD); |
|
| 104 |
- } |
|
| 105 |
- |
|
| 106 |
- /* |
|
| 107 |
-2.7.4 |
|
| 108 |
- |
| 109 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,229 +0,0 @@ |
| 1 |
-From c06c1b2d1725c1f6618d389cffe61cb538ca24b9 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Sun, 29 Apr 2018 15:21:42 +0200 |
|
| 4 |
-Subject: [PATCH 19/54] x86/process: Allow runtime control of Speculative Store |
|
| 5 |
- Bypass |
|
| 6 |
- |
|
| 7 |
-commit 885f82bfbc6fefb6664ea27965c3ab9ac4194b8c upstream |
|
| 8 |
- |
|
| 9 |
-The Speculative Store Bypass vulnerability can be mitigated with the |
|
| 10 |
-Reduced Data Speculation (RDS) feature. To allow finer grained control of |
|
| 11 |
-this eventually expensive mitigation a per task mitigation control is |
|
| 12 |
-required. |
|
| 13 |
- |
|
| 14 |
-Add a new TIF_RDS flag and put it into the group of TIF flags which are |
|
| 15 |
-evaluated for mismatch in switch_to(). If these bits differ in the previous |
|
| 16 |
-and the next task, then the slow path function __switch_to_xtra() is |
|
| 17 |
-invoked. Implement the TIF_RDS dependent mitigation control in the slow |
|
| 18 |
-path. |
|
| 19 |
- |
|
| 20 |
-If the prctl for controlling Speculative Store Bypass is disabled or no |
|
| 21 |
-task uses the prctl then there is no overhead in the switch_to() fast |
|
| 22 |
-path. |
|
| 23 |
- |
|
| 24 |
-Update the KVM related speculation control functions to take TID_RDS into |
|
| 25 |
-account as well. |
|
| 26 |
- |
|
| 27 |
-Based on a patch from Tim Chen. Completely rewritten. |
|
| 28 |
- |
|
| 29 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 30 |
-Reviewed-by: Ingo Molnar <mingo@kernel.org> |
|
| 31 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 32 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 33 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 34 |
- arch/x86/include/asm/msr-index.h | 3 ++- |
|
| 35 |
- arch/x86/include/asm/spec-ctrl.h | 17 +++++++++++++++++ |
|
| 36 |
- arch/x86/include/asm/thread_info.h | 6 ++++-- |
|
| 37 |
- arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++++----- |
|
| 38 |
- arch/x86/kernel/process.c | 22 ++++++++++++++++++++++ |
|
| 39 |
- 5 files changed, 66 insertions(+), 8 deletions(-) |
|
| 40 |
- |
|
| 41 |
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
|
| 42 |
-index 076e868..5dd28d0 100644 |
|
| 43 |
-+++ b/arch/x86/include/asm/msr-index.h |
|
| 44 |
-@@ -40,7 +40,8 @@ |
|
| 45 |
- #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ |
|
| 46 |
- #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ |
|
| 47 |
- #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ |
|
| 48 |
--#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */ |
|
| 49 |
-+#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */ |
|
| 50 |
-+#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */ |
|
| 51 |
- |
|
| 52 |
- #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ |
|
| 53 |
- #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ |
|
| 54 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 55 |
-index 3ad6442..45ef00a 100644 |
|
| 56 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 57 |
-@@ -2,6 +2,7 @@ |
|
| 58 |
- #ifndef _ASM_X86_SPECCTRL_H_ |
|
| 59 |
- #define _ASM_X86_SPECCTRL_H_ |
|
| 60 |
- |
|
| 61 |
-+#include <linux/thread_info.h> |
|
| 62 |
- #include <asm/nospec-branch.h> |
|
| 63 |
- |
|
| 64 |
- /* |
|
| 65 |
-@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u64); |
|
| 66 |
- extern u64 x86_amd_ls_cfg_base; |
|
| 67 |
- extern u64 x86_amd_ls_cfg_rds_mask; |
|
| 68 |
- |
|
| 69 |
-+/* The Intel SPEC CTRL MSR base value cache */ |
|
| 70 |
-+extern u64 x86_spec_ctrl_base; |
|
| 71 |
-+ |
|
| 72 |
-+static inline u64 rds_tif_to_spec_ctrl(u64 tifn) |
|
| 73 |
-+{
|
|
| 74 |
-+ BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT); |
|
| 75 |
-+ return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT); |
|
| 76 |
-+} |
|
| 77 |
-+ |
|
| 78 |
-+static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn) |
|
| 79 |
-+{
|
|
| 80 |
-+ return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL; |
|
| 81 |
-+} |
|
| 82 |
-+ |
|
| 83 |
-+extern void speculative_store_bypass_update(void); |
|
| 84 |
-+ |
|
| 85 |
- #endif |
|
| 86 |
-diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h |
|
| 87 |
-index 89978b9..661afac 100644 |
|
| 88 |
-+++ b/arch/x86/include/asm/thread_info.h |
|
| 89 |
-@@ -83,6 +83,7 @@ struct thread_info {
|
|
| 90 |
- #define TIF_SIGPENDING 2 /* signal pending */ |
|
| 91 |
- #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
|
| 92 |
- #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ |
|
| 93 |
-+#define TIF_RDS 5 /* Reduced data speculation */ |
|
| 94 |
- #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
|
| 95 |
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
|
| 96 |
- #define TIF_SECCOMP 8 /* secure computing */ |
|
| 97 |
-@@ -104,8 +105,9 @@ struct thread_info {
|
|
| 98 |
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
|
| 99 |
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
|
| 100 |
- #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
|
| 101 |
--#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
|
| 102 |
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
|
| 103 |
-+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
|
| 104 |
-+#define _TIF_RDS (1 << TIF_RDS) |
|
| 105 |
- #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
|
| 106 |
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
|
| 107 |
- #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
|
| 108 |
-@@ -139,7 +141,7 @@ struct thread_info {
|
|
| 109 |
- |
|
| 110 |
- /* flags to check in __switch_to() */ |
|
| 111 |
- #define _TIF_WORK_CTXSW \ |
|
| 112 |
-- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) |
|
| 113 |
-+ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS) |
|
| 114 |
- |
|
| 115 |
- #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
|
| 116 |
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
|
| 117 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 118 |
-index 46d01fd..4f09576 100644 |
|
| 119 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 120 |
-@@ -32,7 +32,7 @@ static void __init ssb_select_mitigation(void); |
|
| 121 |
- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any |
|
| 122 |
- * writes to SPEC_CTRL contain whatever reserved bits have been set. |
|
| 123 |
- */ |
|
| 124 |
--static u64 __ro_after_init x86_spec_ctrl_base; |
|
| 125 |
-+u64 __ro_after_init x86_spec_ctrl_base; |
|
| 126 |
- |
|
| 127 |
- /* |
|
| 128 |
- * The vendor and possibly platform specific bits which can be modified in |
|
| 129 |
-@@ -139,25 +139,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); |
|
| 130 |
- |
|
| 131 |
- u64 x86_spec_ctrl_get_default(void) |
|
| 132 |
- {
|
|
| 133 |
-- return x86_spec_ctrl_base; |
|
| 134 |
-+ u64 msrval = x86_spec_ctrl_base; |
|
| 135 |
-+ |
|
| 136 |
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 137 |
-+ msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 138 |
-+ return msrval; |
|
| 139 |
- } |
|
| 140 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); |
|
| 141 |
- |
|
| 142 |
- void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) |
|
| 143 |
- {
|
|
| 144 |
-+ u64 host = x86_spec_ctrl_base; |
|
| 145 |
-+ |
|
| 146 |
- if (!boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 147 |
- return; |
|
| 148 |
-- if (x86_spec_ctrl_base != guest_spec_ctrl) |
|
| 149 |
-+ |
|
| 150 |
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 151 |
-+ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 152 |
-+ |
|
| 153 |
-+ if (host != guest_spec_ctrl) |
|
| 154 |
- wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); |
|
| 155 |
- } |
|
| 156 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); |
|
| 157 |
- |
|
| 158 |
- void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) |
|
| 159 |
- {
|
|
| 160 |
-+ u64 host = x86_spec_ctrl_base; |
|
| 161 |
-+ |
|
| 162 |
- if (!boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 163 |
- return; |
|
| 164 |
-- if (x86_spec_ctrl_base != guest_spec_ctrl) |
|
| 165 |
-- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 166 |
-+ |
|
| 167 |
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 168 |
-+ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 169 |
-+ |
|
| 170 |
-+ if (host != guest_spec_ctrl) |
|
| 171 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, host); |
|
| 172 |
- } |
|
| 173 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); |
|
| 174 |
- |
|
| 175 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 176 |
-index b7e3822..9c48e18 100644 |
|
| 177 |
-+++ b/arch/x86/kernel/process.c |
|
| 178 |
-@@ -33,6 +33,7 @@ |
|
| 179 |
- #include <asm/mce.h> |
|
| 180 |
- #include <asm/vm86.h> |
|
| 181 |
- #include <asm/switch_to.h> |
|
| 182 |
-+#include <asm/spec-ctrl.h> |
|
| 183 |
- |
|
| 184 |
- /* |
|
| 185 |
- * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
|
| 186 |
-@@ -202,6 +203,24 @@ static inline void switch_to_bitmap(struct tss_struct *tss, |
|
| 187 |
- } |
|
| 188 |
- } |
|
| 189 |
- |
|
| 190 |
-+static __always_inline void __speculative_store_bypass_update(unsigned long tifn) |
|
| 191 |
-+{
|
|
| 192 |
-+ u64 msr; |
|
| 193 |
-+ |
|
| 194 |
-+ if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
| 195 |
-+ msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn); |
|
| 196 |
-+ wrmsrl(MSR_AMD64_LS_CFG, msr); |
|
| 197 |
-+ } else {
|
|
| 198 |
-+ msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn); |
|
| 199 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, msr); |
|
| 200 |
-+ } |
|
| 201 |
-+} |
|
| 202 |
-+ |
|
| 203 |
-+void speculative_store_bypass_update(void) |
|
| 204 |
-+{
|
|
| 205 |
-+ __speculative_store_bypass_update(current_thread_info()->flags); |
|
| 206 |
-+} |
|
| 207 |
-+ |
|
| 208 |
- void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
|
| 209 |
- struct tss_struct *tss) |
|
| 210 |
- {
|
|
| 211 |
-@@ -230,6 +249,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
|
| 212 |
- |
|
| 213 |
- if ((tifp ^ tifn) & _TIF_NOTSC) |
|
| 214 |
- cr4_toggle_bits(X86_CR4_TSD); |
|
| 215 |
-+ |
|
| 216 |
-+ if ((tifp ^ tifn) & _TIF_RDS) |
|
| 217 |
-+ __speculative_store_bypass_update(tifn); |
|
| 218 |
- } |
|
| 219 |
- |
|
| 220 |
- /* |
|
| 221 |
-2.7.4 |
|
| 222 |
- |
| 223 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,222 +0,0 @@ |
| 1 |
-From 38c6d310c9ef423b9c7f4c6490d2086858a4a740 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Sun, 29 Apr 2018 15:26:40 +0200 |
|
| 4 |
-Subject: [PATCH 20/54] x86/speculation: Add prctl for Speculative Store Bypass |
|
| 5 |
- mitigation |
|
| 6 |
- |
|
| 7 |
-commit a73ec77ee17ec556fe7f165d00314cb7c047b1ac upstream |
|
| 8 |
- |
|
| 9 |
-Add prctl based control for Speculative Store Bypass mitigation and make it |
|
| 10 |
-the default mitigation for Intel and AMD. |
|
| 11 |
- |
|
| 12 |
-Andi Kleen provided the following rationale (slightly redacted): |
|
| 13 |
- |
|
| 14 |
- There are multiple levels of impact of Speculative Store Bypass: |
|
| 15 |
- |
|
| 16 |
- 1) JITed sandbox. |
|
| 17 |
- It cannot invoke system calls, but can do PRIME+PROBE and may have call |
|
| 18 |
- interfaces to other code |
|
| 19 |
- |
|
| 20 |
- 2) Native code process. |
|
| 21 |
- No protection inside the process at this level. |
|
| 22 |
- |
|
| 23 |
- 3) Kernel. |
|
| 24 |
- |
|
| 25 |
- 4) Between processes. |
|
| 26 |
- |
|
| 27 |
- The prctl tries to protect against case (1) doing attacks. |
|
| 28 |
- |
|
| 29 |
- If the untrusted code can do random system calls then control is already |
|
| 30 |
- lost in a much worse way. So there needs to be system call protection in |
|
| 31 |
- some way (using a JIT not allowing them or seccomp). Or rather if the |
|
| 32 |
- process can subvert its environment somehow to do the prctl it can already |
|
| 33 |
- execute arbitrary code, which is much worse than SSB. |
|
| 34 |
- |
|
| 35 |
- To put it differently, the point of the prctl is to not allow JITed code |
|
| 36 |
- to read data it shouldn't read from its JITed sandbox. If it already has |
|
| 37 |
- escaped its sandbox then it can already read everything it wants in its |
|
| 38 |
- address space, and do much worse. |
|
| 39 |
- |
|
| 40 |
- The ability to control Speculative Store Bypass allows to enable the |
|
| 41 |
- protection selectively without affecting overall system performance. |
|
| 42 |
- |
|
| 43 |
-Based on an initial patch from Tim Chen. Completely rewritten. |
|
| 44 |
- |
|
| 45 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 46 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 47 |
- |
|
| 48 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 49 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 50 |
- Documentation/kernel-parameters.txt | 6 ++- |
|
| 51 |
- arch/x86/include/asm/nospec-branch.h | 1 + |
|
| 52 |
- arch/x86/kernel/cpu/bugs.c | 83 +++++++++++++++++++++++++++++++----- |
|
| 53 |
- 3 files changed, 79 insertions(+), 11 deletions(-) |
|
| 54 |
- |
|
| 55 |
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt |
|
| 56 |
-index 792ac91..543923b 100644 |
|
| 57 |
-+++ b/Documentation/kernel-parameters.txt |
|
| 58 |
-@@ -4001,7 +4001,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. |
|
| 59 |
- off - Unconditionally enable Speculative Store Bypass |
|
| 60 |
- auto - Kernel detects whether the CPU model contains an |
|
| 61 |
- implementation of Speculative Store Bypass and |
|
| 62 |
-- picks the most appropriate mitigation |
|
| 63 |
-+ picks the most appropriate mitigation. |
|
| 64 |
-+ prctl - Control Speculative Store Bypass per thread |
|
| 65 |
-+ via prctl. Speculative Store Bypass is enabled |
|
| 66 |
-+ for a process by default. The state of the control |
|
| 67 |
-+ is inherited on fork. |
|
| 68 |
- |
|
| 69 |
- Not specifying this option is equivalent to |
|
| 70 |
- spec_store_bypass_disable=auto. |
|
| 71 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 72 |
-index 1119f14..71ad014 100644 |
|
| 73 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 74 |
-@@ -232,6 +232,7 @@ extern u64 x86_spec_ctrl_get_default(void); |
|
| 75 |
- enum ssb_mitigation {
|
|
| 76 |
- SPEC_STORE_BYPASS_NONE, |
|
| 77 |
- SPEC_STORE_BYPASS_DISABLE, |
|
| 78 |
-+ SPEC_STORE_BYPASS_PRCTL, |
|
| 79 |
- }; |
|
| 80 |
- |
|
| 81 |
- extern char __indirect_thunk_start[]; |
|
| 82 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 83 |
-index 4f09576..b7d9adf 100644 |
|
| 84 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 85 |
-@@ -11,6 +11,8 @@ |
|
| 86 |
- #include <linux/utsname.h> |
|
| 87 |
- #include <linux/cpu.h> |
|
| 88 |
- #include <linux/module.h> |
|
| 89 |
-+#include <linux/nospec.h> |
|
| 90 |
-+#include <linux/prctl.h> |
|
| 91 |
- |
|
| 92 |
- #include <asm/spec-ctrl.h> |
|
| 93 |
- #include <asm/cmdline.h> |
|
| 94 |
-@@ -411,20 +413,23 @@ enum ssb_mitigation_cmd {
|
|
| 95 |
- SPEC_STORE_BYPASS_CMD_NONE, |
|
| 96 |
- SPEC_STORE_BYPASS_CMD_AUTO, |
|
| 97 |
- SPEC_STORE_BYPASS_CMD_ON, |
|
| 98 |
-+ SPEC_STORE_BYPASS_CMD_PRCTL, |
|
| 99 |
- }; |
|
| 100 |
- |
|
| 101 |
- static const char *ssb_strings[] = {
|
|
| 102 |
- [SPEC_STORE_BYPASS_NONE] = "Vulnerable", |
|
| 103 |
-- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled" |
|
| 104 |
-+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", |
|
| 105 |
-+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl" |
|
| 106 |
- }; |
|
| 107 |
- |
|
| 108 |
- static const struct {
|
|
| 109 |
- const char *option; |
|
| 110 |
- enum ssb_mitigation_cmd cmd; |
|
| 111 |
- } ssb_mitigation_options[] = {
|
|
| 112 |
-- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
| 113 |
-- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
| 114 |
-- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
| 115 |
-+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
| 116 |
-+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
| 117 |
-+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
| 118 |
-+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
|
| 119 |
- }; |
|
| 120 |
- |
|
| 121 |
- static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) |
|
| 122 |
-@@ -474,14 +479,15 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 123 |
- |
|
| 124 |
- switch (cmd) {
|
|
| 125 |
- case SPEC_STORE_BYPASS_CMD_AUTO: |
|
| 126 |
-- /* |
|
| 127 |
-- * AMD platforms by default don't need SSB mitigation. |
|
| 128 |
-- */ |
|
| 129 |
-- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
|
| 130 |
-- break; |
|
| 131 |
-+ /* Choose prctl as the default mode */ |
|
| 132 |
-+ mode = SPEC_STORE_BYPASS_PRCTL; |
|
| 133 |
-+ break; |
|
| 134 |
- case SPEC_STORE_BYPASS_CMD_ON: |
|
| 135 |
- mode = SPEC_STORE_BYPASS_DISABLE; |
|
| 136 |
- break; |
|
| 137 |
-+ case SPEC_STORE_BYPASS_CMD_PRCTL: |
|
| 138 |
-+ mode = SPEC_STORE_BYPASS_PRCTL; |
|
| 139 |
-+ break; |
|
| 140 |
- case SPEC_STORE_BYPASS_CMD_NONE: |
|
| 141 |
- break; |
|
| 142 |
- } |
|
| 143 |
-@@ -492,7 +498,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 144 |
- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass |
|
| 145 |
- * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation |
|
| 146 |
- */ |
|
| 147 |
-- if (mode != SPEC_STORE_BYPASS_NONE) {
|
|
| 148 |
-+ if (mode == SPEC_STORE_BYPASS_DISABLE) {
|
|
| 149 |
- setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); |
|
| 150 |
- /* |
|
| 151 |
- * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses |
|
| 152 |
-@@ -523,6 +529,63 @@ static void ssb_select_mitigation() |
|
| 153 |
- |
|
| 154 |
- #undef pr_fmt |
|
| 155 |
- |
|
| 156 |
-+static int ssb_prctl_set(unsigned long ctrl) |
|
| 157 |
-+{
|
|
| 158 |
-+ bool rds = !!test_tsk_thread_flag(current, TIF_RDS); |
|
| 159 |
-+ |
|
| 160 |
-+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL) |
|
| 161 |
-+ return -ENXIO; |
|
| 162 |
-+ |
|
| 163 |
-+ if (ctrl == PR_SPEC_ENABLE) |
|
| 164 |
-+ clear_tsk_thread_flag(current, TIF_RDS); |
|
| 165 |
-+ else |
|
| 166 |
-+ set_tsk_thread_flag(current, TIF_RDS); |
|
| 167 |
-+ |
|
| 168 |
-+ if (rds != !!test_tsk_thread_flag(current, TIF_RDS)) |
|
| 169 |
-+ speculative_store_bypass_update(); |
|
| 170 |
-+ |
|
| 171 |
-+ return 0; |
|
| 172 |
-+} |
|
| 173 |
-+ |
|
| 174 |
-+static int ssb_prctl_get(void) |
|
| 175 |
-+{
|
|
| 176 |
-+ switch (ssb_mode) {
|
|
| 177 |
-+ case SPEC_STORE_BYPASS_DISABLE: |
|
| 178 |
-+ return PR_SPEC_DISABLE; |
|
| 179 |
-+ case SPEC_STORE_BYPASS_PRCTL: |
|
| 180 |
-+ if (test_tsk_thread_flag(current, TIF_RDS)) |
|
| 181 |
-+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
|
| 182 |
-+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
|
| 183 |
-+ default: |
|
| 184 |
-+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
|
| 185 |
-+ return PR_SPEC_ENABLE; |
|
| 186 |
-+ return PR_SPEC_NOT_AFFECTED; |
|
| 187 |
-+ } |
|
| 188 |
-+} |
|
| 189 |
-+ |
|
| 190 |
-+int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl) |
|
| 191 |
-+{
|
|
| 192 |
-+ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE) |
|
| 193 |
-+ return -ERANGE; |
|
| 194 |
-+ |
|
| 195 |
-+ switch (which) {
|
|
| 196 |
-+ case PR_SPEC_STORE_BYPASS: |
|
| 197 |
-+ return ssb_prctl_set(ctrl); |
|
| 198 |
-+ default: |
|
| 199 |
-+ return -ENODEV; |
|
| 200 |
-+ } |
|
| 201 |
-+} |
|
| 202 |
-+ |
|
| 203 |
-+int arch_prctl_spec_ctrl_get(unsigned long which) |
|
| 204 |
-+{
|
|
| 205 |
-+ switch (which) {
|
|
| 206 |
-+ case PR_SPEC_STORE_BYPASS: |
|
| 207 |
-+ return ssb_prctl_get(); |
|
| 208 |
-+ default: |
|
| 209 |
-+ return -ENODEV; |
|
| 210 |
-+ } |
|
| 211 |
-+} |
|
| 212 |
-+ |
|
| 213 |
- void x86_spec_ctrl_setup_ap(void) |
|
| 214 |
- {
|
|
| 215 |
- if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 216 |
-2.7.4 |
|
| 217 |
- |
| 218 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,162 +0,0 @@ |
| 1 |
-From b78a78bbe1529796b94416af04e302f872bb9646 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kees Cook <keescook@chromium.org> |
|
| 3 |
-Date: Tue, 1 May 2018 15:19:04 -0700 |
|
| 4 |
-Subject: [PATCH 21/54] nospec: Allow getting/setting on non-current task |
|
| 5 |
- |
|
| 6 |
-commit 7bbf1373e228840bb0295a2ca26d548ef37f448e upstream |
|
| 7 |
- |
|
| 8 |
-Adjust arch_prctl_get/set_spec_ctrl() to operate on tasks other than |
|
| 9 |
-current. |
|
| 10 |
- |
|
| 11 |
-This is needed both for /proc/$pid/status queries and for seccomp (since |
|
| 12 |
-thread-syncing can trigger seccomp in non-current threads). |
|
| 13 |
- |
|
| 14 |
-Signed-off-by: Kees Cook <keescook@chromium.org> |
|
| 15 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 16 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 17 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 18 |
- arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++----------- |
|
| 19 |
- include/linux/nospec.h | 7 +++++-- |
|
| 20 |
- kernel/sys.c | 9 +++++---- |
|
| 21 |
- 3 files changed, 26 insertions(+), 17 deletions(-) |
|
| 22 |
- |
|
| 23 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 24 |
-index b7d9adf..3760931 100644 |
|
| 25 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 26 |
-@@ -529,31 +529,35 @@ static void ssb_select_mitigation() |
|
| 27 |
- |
|
| 28 |
- #undef pr_fmt |
|
| 29 |
- |
|
| 30 |
--static int ssb_prctl_set(unsigned long ctrl) |
|
| 31 |
-+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
|
| 32 |
- {
|
|
| 33 |
-- bool rds = !!test_tsk_thread_flag(current, TIF_RDS); |
|
| 34 |
-+ bool rds = !!test_tsk_thread_flag(task, TIF_RDS); |
|
| 35 |
- |
|
| 36 |
- if (ssb_mode != SPEC_STORE_BYPASS_PRCTL) |
|
| 37 |
- return -ENXIO; |
|
| 38 |
- |
|
| 39 |
- if (ctrl == PR_SPEC_ENABLE) |
|
| 40 |
-- clear_tsk_thread_flag(current, TIF_RDS); |
|
| 41 |
-+ clear_tsk_thread_flag(task, TIF_RDS); |
|
| 42 |
- else |
|
| 43 |
-- set_tsk_thread_flag(current, TIF_RDS); |
|
| 44 |
-+ set_tsk_thread_flag(task, TIF_RDS); |
|
| 45 |
- |
|
| 46 |
-- if (rds != !!test_tsk_thread_flag(current, TIF_RDS)) |
|
| 47 |
-+ /* |
|
| 48 |
-+ * If being set on non-current task, delay setting the CPU |
|
| 49 |
-+ * mitigation until it is next scheduled. |
|
| 50 |
-+ */ |
|
| 51 |
-+ if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS)) |
|
| 52 |
- speculative_store_bypass_update(); |
|
| 53 |
- |
|
| 54 |
- return 0; |
|
| 55 |
- } |
|
| 56 |
- |
|
| 57 |
--static int ssb_prctl_get(void) |
|
| 58 |
-+static int ssb_prctl_get(struct task_struct *task) |
|
| 59 |
- {
|
|
| 60 |
- switch (ssb_mode) {
|
|
| 61 |
- case SPEC_STORE_BYPASS_DISABLE: |
|
| 62 |
- return PR_SPEC_DISABLE; |
|
| 63 |
- case SPEC_STORE_BYPASS_PRCTL: |
|
| 64 |
-- if (test_tsk_thread_flag(current, TIF_RDS)) |
|
| 65 |
-+ if (test_tsk_thread_flag(task, TIF_RDS)) |
|
| 66 |
- return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
|
| 67 |
- return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
|
| 68 |
- default: |
|
| 69 |
-@@ -563,24 +567,25 @@ static int ssb_prctl_get(void) |
|
| 70 |
- } |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 |
--int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl) |
|
| 74 |
-+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
|
| 75 |
-+ unsigned long ctrl) |
|
| 76 |
- {
|
|
| 77 |
- if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE) |
|
| 78 |
- return -ERANGE; |
|
| 79 |
- |
|
| 80 |
- switch (which) {
|
|
| 81 |
- case PR_SPEC_STORE_BYPASS: |
|
| 82 |
-- return ssb_prctl_set(ctrl); |
|
| 83 |
-+ return ssb_prctl_set(task, ctrl); |
|
| 84 |
- default: |
|
| 85 |
- return -ENODEV; |
|
| 86 |
- } |
|
| 87 |
- } |
|
| 88 |
- |
|
| 89 |
--int arch_prctl_spec_ctrl_get(unsigned long which) |
|
| 90 |
-+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
|
| 91 |
- {
|
|
| 92 |
- switch (which) {
|
|
| 93 |
- case PR_SPEC_STORE_BYPASS: |
|
| 94 |
-- return ssb_prctl_get(); |
|
| 95 |
-+ return ssb_prctl_get(task); |
|
| 96 |
- default: |
|
| 97 |
- return -ENODEV; |
|
| 98 |
- } |
|
| 99 |
-diff --git a/include/linux/nospec.h b/include/linux/nospec.h |
|
| 100 |
-index 700bb8a..a908c95 100644 |
|
| 101 |
-+++ b/include/linux/nospec.h |
|
| 102 |
-@@ -7,6 +7,8 @@ |
|
| 103 |
- #define _LINUX_NOSPEC_H |
|
| 104 |
- #include <asm/barrier.h> |
|
| 105 |
- |
|
| 106 |
-+struct task_struct; |
|
| 107 |
-+ |
|
| 108 |
- /** |
|
| 109 |
- * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise |
|
| 110 |
- * @index: array element index |
|
| 111 |
-@@ -57,7 +59,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, |
|
| 112 |
- }) |
|
| 113 |
- |
|
| 114 |
- /* Speculation control prctl */ |
|
| 115 |
--int arch_prctl_spec_ctrl_get(unsigned long which); |
|
| 116 |
--int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl); |
|
| 117 |
-+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); |
|
| 118 |
-+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
|
| 119 |
-+ unsigned long ctrl); |
|
| 120 |
- |
|
| 121 |
- #endif /* _LINUX_NOSPEC_H */ |
|
| 122 |
-diff --git a/kernel/sys.c b/kernel/sys.c |
|
| 123 |
-index 312c985..143cd63 100644 |
|
| 124 |
-+++ b/kernel/sys.c |
|
| 125 |
-@@ -2074,12 +2074,13 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) |
|
| 126 |
- } |
|
| 127 |
- #endif |
|
| 128 |
- |
|
| 129 |
--int __weak arch_prctl_spec_ctrl_get(unsigned long which) |
|
| 130 |
-+int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) |
|
| 131 |
- {
|
|
| 132 |
- return -EINVAL; |
|
| 133 |
- } |
|
| 134 |
- |
|
| 135 |
--int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl) |
|
| 136 |
-+int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, |
|
| 137 |
-+ unsigned long ctrl) |
|
| 138 |
- {
|
|
| 139 |
- return -EINVAL; |
|
| 140 |
- } |
|
| 141 |
-@@ -2285,12 +2286,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
|
| 142 |
- case PR_GET_SPECULATION_CTRL: |
|
| 143 |
- if (arg3 || arg4 || arg5) |
|
| 144 |
- return -EINVAL; |
|
| 145 |
-- error = arch_prctl_spec_ctrl_get(arg2); |
|
| 146 |
-+ error = arch_prctl_spec_ctrl_get(me, arg2); |
|
| 147 |
- break; |
|
| 148 |
- case PR_SET_SPECULATION_CTRL: |
|
| 149 |
- if (arg4 || arg5) |
|
| 150 |
- return -EINVAL; |
|
| 151 |
-- error = arch_prctl_spec_ctrl_set(arg2, arg3); |
|
| 152 |
-+ error = arch_prctl_spec_ctrl_set(me, arg2, arg3); |
|
| 153 |
- break; |
|
| 154 |
- default: |
|
| 155 |
- error = -EINVAL; |
|
| 156 |
-2.7.4 |
|
| 157 |
- |
| 158 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,64 +0,0 @@ |
| 1 |
-From 35ac7b7721fcf4ec87ddbcff4cda073bec0175e6 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kees Cook <keescook@chromium.org> |
|
| 3 |
-Date: Tue, 1 May 2018 15:31:45 -0700 |
|
| 4 |
-Subject: [PATCH 22/54] proc: Provide details on speculation flaw mitigations |
|
| 5 |
- |
|
| 6 |
-commit fae1fa0fc6cca8beee3ab8ed71d54f9a78fa3f64 upstream |
|
| 7 |
- |
|
| 8 |
-As done with seccomp and no_new_privs, also show speculation flaw |
|
| 9 |
-mitigation state in /proc/$pid/status. |
|
| 10 |
- |
|
| 11 |
-Signed-off-by: Kees Cook <keescook@chromium.org> |
|
| 12 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 13 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 14 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 15 |
- fs/proc/array.c | 24 +++++++++++++++++++++++- |
|
| 16 |
- 1 file changed, 23 insertions(+), 1 deletion(-) |
|
| 17 |
- |
|
| 18 |
-diff --git a/fs/proc/array.c b/fs/proc/array.c |
|
| 19 |
-index 794b52a..64f3f20 100644 |
|
| 20 |
-+++ b/fs/proc/array.c |
|
| 21 |
-@@ -80,6 +80,7 @@ |
|
| 22 |
- #include <linux/delayacct.h> |
|
| 23 |
- #include <linux/seq_file.h> |
|
| 24 |
- #include <linux/pid_namespace.h> |
|
| 25 |
-+#include <linux/prctl.h> |
|
| 26 |
- #include <linux/ptrace.h> |
|
| 27 |
- #include <linux/tracehook.h> |
|
| 28 |
- #include <linux/string_helpers.h> |
|
| 29 |
-@@ -345,8 +346,29 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) |
|
| 30 |
- {
|
|
| 31 |
- #ifdef CONFIG_SECCOMP |
|
| 32 |
- seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode); |
|
| 33 |
-- seq_putc(m, '\n'); |
|
| 34 |
- #endif |
|
| 35 |
-+ seq_printf(m, "\nSpeculation Store Bypass:\t"); |
|
| 36 |
-+ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
|
|
| 37 |
-+ case -EINVAL: |
|
| 38 |
-+ seq_printf(m, "unknown"); |
|
| 39 |
-+ break; |
|
| 40 |
-+ case PR_SPEC_NOT_AFFECTED: |
|
| 41 |
-+ seq_printf(m, "not vulnerable"); |
|
| 42 |
-+ break; |
|
| 43 |
-+ case PR_SPEC_PRCTL | PR_SPEC_DISABLE: |
|
| 44 |
-+ seq_printf(m, "thread mitigated"); |
|
| 45 |
-+ break; |
|
| 46 |
-+ case PR_SPEC_PRCTL | PR_SPEC_ENABLE: |
|
| 47 |
-+ seq_printf(m, "thread vulnerable"); |
|
| 48 |
-+ break; |
|
| 49 |
-+ case PR_SPEC_DISABLE: |
|
| 50 |
-+ seq_printf(m, "globally mitigated"); |
|
| 51 |
-+ break; |
|
| 52 |
-+ default: |
|
| 53 |
-+ seq_printf(m, "vulnerable"); |
|
| 54 |
-+ break; |
|
| 55 |
-+ } |
|
| 56 |
-+ seq_putc(m, '\n'); |
|
| 57 |
- } |
|
| 58 |
- |
|
| 59 |
- static inline void task_context_switch_counts(struct seq_file *m, |
|
| 60 |
-2.7.4 |
|
| 61 |
- |
| 62 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,64 +0,0 @@ |
| 1 |
-From 8f4e8b49f542bef6dbdbf8b5373d6222e524d8c9 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kees Cook <keescook@chromium.org> |
|
| 3 |
-Date: Tue, 1 May 2018 15:07:31 -0700 |
|
| 4 |
-Subject: [PATCH 23/54] seccomp: Enable speculation flaw mitigations |
|
| 5 |
- |
|
| 6 |
-commit 5c3070890d06ff82eecb808d02d2ca39169533ef upstream |
|
| 7 |
- |
|
| 8 |
-When speculation flaw mitigations are opt-in (via prctl), using seccomp |
|
| 9 |
-will automatically opt-in to these protections, since using seccomp |
|
| 10 |
-indicates at least some level of sandboxing is desired. |
|
| 11 |
- |
|
| 12 |
-Signed-off-by: Kees Cook <keescook@chromium.org> |
|
| 13 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 14 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 15 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 16 |
- kernel/seccomp.c | 17 +++++++++++++++++ |
|
| 17 |
- 1 file changed, 17 insertions(+) |
|
| 18 |
- |
|
| 19 |
-diff --git a/kernel/seccomp.c b/kernel/seccomp.c |
|
| 20 |
-index af182a6..1d3078b 100644 |
|
| 21 |
-+++ b/kernel/seccomp.c |
|
| 22 |
-@@ -16,6 +16,8 @@ |
|
| 23 |
- #include <linux/atomic.h> |
|
| 24 |
- #include <linux/audit.h> |
|
| 25 |
- #include <linux/compat.h> |
|
| 26 |
-+#include <linux/nospec.h> |
|
| 27 |
-+#include <linux/prctl.h> |
|
| 28 |
- #include <linux/sched.h> |
|
| 29 |
- #include <linux/seccomp.h> |
|
| 30 |
- #include <linux/slab.h> |
|
| 31 |
-@@ -214,6 +216,19 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) |
|
| 32 |
- return true; |
|
| 33 |
- } |
|
| 34 |
- |
|
| 35 |
-+/* |
|
| 36 |
-+ * If a given speculation mitigation is opt-in (prctl()-controlled), |
|
| 37 |
-+ * select it, by disabling speculation (enabling mitigation). |
|
| 38 |
-+ */ |
|
| 39 |
-+static inline void spec_mitigate(struct task_struct *task, |
|
| 40 |
-+ unsigned long which) |
|
| 41 |
-+{
|
|
| 42 |
-+ int state = arch_prctl_spec_ctrl_get(task, which); |
|
| 43 |
-+ |
|
| 44 |
-+ if (state > 0 && (state & PR_SPEC_PRCTL)) |
|
| 45 |
-+ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE); |
|
| 46 |
-+} |
|
| 47 |
-+ |
|
| 48 |
- static inline void seccomp_assign_mode(struct task_struct *task, |
|
| 49 |
- unsigned long seccomp_mode) |
|
| 50 |
- {
|
|
| 51 |
-@@ -225,6 +240,8 @@ static inline void seccomp_assign_mode(struct task_struct *task, |
|
| 52 |
- * filter) is set. |
|
| 53 |
- */ |
|
| 54 |
- smp_mb__before_atomic(); |
|
| 55 |
-+ /* Assume seccomp processes want speculation flaw mitigation. */ |
|
| 56 |
-+ spec_mitigate(task, PR_SPEC_STORE_BYPASS); |
|
| 57 |
- set_tsk_thread_flag(task, TIF_SECCOMP); |
|
| 58 |
- } |
|
| 59 |
- |
|
| 60 |
-2.7.4 |
|
| 61 |
- |
| 62 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,43 +0,0 @@ |
| 1 |
-From bb7394efb622c21daa838048937e0ef45674f413 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kees Cook <keescook@chromium.org> |
|
| 3 |
-Date: Thu, 3 May 2018 15:03:30 -0700 |
|
| 4 |
-Subject: [PATCH 24/54] x86/bugs: Make boot modes __ro_after_init |
|
| 5 |
- |
|
| 6 |
-commit f9544b2b076ca90d887c5ae5d74fab4c21bb7c13 upstream |
|
| 7 |
- |
|
| 8 |
-There's no reason for these to be changed after boot. |
|
| 9 |
- |
|
| 10 |
-Signed-off-by: Kees Cook <keescook@chromium.org> |
|
| 11 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 12 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 13 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 14 |
- arch/x86/kernel/cpu/bugs.c | 5 +++-- |
|
| 15 |
- 1 file changed, 3 insertions(+), 2 deletions(-) |
|
| 16 |
- |
|
| 17 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 18 |
-index 3760931..65114d2 100644 |
|
| 19 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 20 |
-@@ -128,7 +128,8 @@ static const char *spectre_v2_strings[] = {
|
|
| 21 |
- #undef pr_fmt |
|
| 22 |
- #define pr_fmt(fmt) "Spectre V2 : " fmt |
|
| 23 |
- |
|
| 24 |
--static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; |
|
| 25 |
-+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
|
| 26 |
-+ SPECTRE_V2_NONE; |
|
| 27 |
- |
|
| 28 |
- void x86_spec_ctrl_set(u64 val) |
|
| 29 |
- {
|
|
| 30 |
-@@ -406,7 +407,7 @@ static void __init spectre_v2_select_mitigation(void) |
|
| 31 |
- #undef pr_fmt |
|
| 32 |
- #define pr_fmt(fmt) "Speculative Store Bypass: " fmt |
|
| 33 |
- |
|
| 34 |
--static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE; |
|
| 35 |
-+static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; |
|
| 36 |
- |
|
| 37 |
- /* The kernel command line selection */ |
|
| 38 |
- enum ssb_mitigation_cmd {
|
|
| 39 |
-2.7.4 |
|
| 40 |
- |
| 41 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,218 +0,0 @@ |
| 1 |
-From 512e11d2918b279a0f895d6f9eb560d815ce2ea2 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Thu, 3 May 2018 22:09:15 +0200 |
|
| 4 |
-Subject: [PATCH 25/54] prctl: Add force disable speculation |
|
| 5 |
- |
|
| 6 |
-commit 356e4bfff2c5489e016fdb925adbf12a1e3950ee upstream |
|
| 7 |
- |
|
| 8 |
-For certain use cases it is desired to enforce mitigations so they cannot |
|
| 9 |
-be undone afterwards. That's important for loader stubs which want to |
|
| 10 |
-prevent a child from disabling the mitigation again. Will also be used for |
|
| 11 |
-seccomp(). The extra state preserving of the prctl state for SSB is a |
|
| 12 |
-preparatory step for EBPF dymanic speculation control. |
|
| 13 |
- |
|
| 14 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 15 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 16 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 17 |
- Documentation/spec_ctrl.txt | 34 +++++++++++++++++++++------------- |
|
| 18 |
- arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++++++---------- |
|
| 19 |
- fs/proc/array.c | 3 +++ |
|
| 20 |
- include/linux/sched.h | 9 +++++++++ |
|
| 21 |
- include/uapi/linux/prctl.h | 1 + |
|
| 22 |
- 5 files changed, 59 insertions(+), 23 deletions(-) |
|
| 23 |
- |
|
| 24 |
-diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt |
|
| 25 |
-index ddbebcd..1b3690d 100644 |
|
| 26 |
-+++ b/Documentation/spec_ctrl.txt |
|
| 27 |
-@@ -25,19 +25,21 @@ PR_GET_SPECULATION_CTRL |
|
| 28 |
- ----------------------- |
|
| 29 |
- |
|
| 30 |
- PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature |
|
| 31 |
--which is selected with arg2 of prctl(2). The return value uses bits 0-2 with |
|
| 32 |
-+which is selected with arg2 of prctl(2). The return value uses bits 0-3 with |
|
| 33 |
- the following meaning: |
|
| 34 |
- |
|
| 35 |
--==== ================ =================================================== |
|
| 36 |
--Bit Define Description |
|
| 37 |
--==== ================ =================================================== |
|
| 38 |
--0 PR_SPEC_PRCTL Mitigation can be controlled per task by |
|
| 39 |
-- PR_SET_SPECULATION_CTRL |
|
| 40 |
--1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is |
|
| 41 |
-- disabled |
|
| 42 |
--2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is |
|
| 43 |
-- enabled |
|
| 44 |
--==== ================ =================================================== |
|
| 45 |
-+==== ===================== =================================================== |
|
| 46 |
-+Bit Define Description |
|
| 47 |
-+==== ===================== =================================================== |
|
| 48 |
-+0 PR_SPEC_PRCTL Mitigation can be controlled per task by |
|
| 49 |
-+ PR_SET_SPECULATION_CTRL |
|
| 50 |
-+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is |
|
| 51 |
-+ disabled |
|
| 52 |
-+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is |
|
| 53 |
-+ enabled |
|
| 54 |
-+3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A |
|
| 55 |
-+ subsequent prctl(..., PR_SPEC_ENABLE) will fail. |
|
| 56 |
-+==== ===================== =================================================== |
|
| 57 |
- |
|
| 58 |
- If all bits are 0 the CPU is not affected by the speculation misfeature. |
|
| 59 |
- |
|
| 60 |
-@@ -47,9 +49,11 @@ misfeature will fail. |
|
| 61 |
- |
|
| 62 |
- PR_SET_SPECULATION_CTRL |
|
| 63 |
- ----------------------- |
|
| 64 |
-+ |
|
| 65 |
- PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which |
|
| 66 |
- is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand |
|
| 67 |
--in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE. |
|
| 68 |
-+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or |
|
| 69 |
-+PR_SPEC_FORCE_DISABLE. |
|
| 70 |
- |
|
| 71 |
- Common error codes |
|
| 72 |
- ------------------ |
|
| 73 |
-@@ -70,10 +74,13 @@ Value Meaning |
|
| 74 |
- 0 Success |
|
| 75 |
- |
|
| 76 |
- ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor |
|
| 77 |
-- PR_SPEC_DISABLE |
|
| 78 |
-+ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE |
|
| 79 |
- |
|
| 80 |
- ENXIO Control of the selected speculation misfeature is not possible. |
|
| 81 |
- See PR_GET_SPECULATION_CTRL. |
|
| 82 |
-+ |
|
| 83 |
-+EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller |
|
| 84 |
-+ tried to enable it again. |
|
| 85 |
- ======= ================================================================= |
|
| 86 |
- |
|
| 87 |
- Speculation misfeature controls |
|
| 88 |
-@@ -84,3 +91,4 @@ Speculation misfeature controls |
|
| 89 |
- * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0); |
|
| 90 |
- * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); |
|
| 91 |
- * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); |
|
| 92 |
-+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); |
|
| 93 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 94 |
-index 65114d2..fdbd8e5 100644 |
|
| 95 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 96 |
-@@ -532,21 +532,37 @@ static void ssb_select_mitigation() |
|
| 97 |
- |
|
| 98 |
- static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
|
| 99 |
- {
|
|
| 100 |
-- bool rds = !!test_tsk_thread_flag(task, TIF_RDS); |
|
| 101 |
-+ bool update; |
|
| 102 |
- |
|
| 103 |
- if (ssb_mode != SPEC_STORE_BYPASS_PRCTL) |
|
| 104 |
- return -ENXIO; |
|
| 105 |
- |
|
| 106 |
-- if (ctrl == PR_SPEC_ENABLE) |
|
| 107 |
-- clear_tsk_thread_flag(task, TIF_RDS); |
|
| 108 |
-- else |
|
| 109 |
-- set_tsk_thread_flag(task, TIF_RDS); |
|
| 110 |
-+ switch (ctrl) {
|
|
| 111 |
-+ case PR_SPEC_ENABLE: |
|
| 112 |
-+ /* If speculation is force disabled, enable is not allowed */ |
|
| 113 |
-+ if (task_spec_ssb_force_disable(task)) |
|
| 114 |
-+ return -EPERM; |
|
| 115 |
-+ task_clear_spec_ssb_disable(task); |
|
| 116 |
-+ update = test_and_clear_tsk_thread_flag(task, TIF_RDS); |
|
| 117 |
-+ break; |
|
| 118 |
-+ case PR_SPEC_DISABLE: |
|
| 119 |
-+ task_set_spec_ssb_disable(task); |
|
| 120 |
-+ update = !test_and_set_tsk_thread_flag(task, TIF_RDS); |
|
| 121 |
-+ break; |
|
| 122 |
-+ case PR_SPEC_FORCE_DISABLE: |
|
| 123 |
-+ task_set_spec_ssb_disable(task); |
|
| 124 |
-+ task_set_spec_ssb_force_disable(task); |
|
| 125 |
-+ update = !test_and_set_tsk_thread_flag(task, TIF_RDS); |
|
| 126 |
-+ break; |
|
| 127 |
-+ default: |
|
| 128 |
-+ return -ERANGE; |
|
| 129 |
-+ } |
|
| 130 |
- |
|
| 131 |
- /* |
|
| 132 |
- * If being set on non-current task, delay setting the CPU |
|
| 133 |
- * mitigation until it is next scheduled. |
|
| 134 |
- */ |
|
| 135 |
-- if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS)) |
|
| 136 |
-+ if (task == current && update) |
|
| 137 |
- speculative_store_bypass_update(); |
|
| 138 |
- |
|
| 139 |
- return 0; |
|
| 140 |
-@@ -558,7 +574,9 @@ static int ssb_prctl_get(struct task_struct *task) |
|
| 141 |
- case SPEC_STORE_BYPASS_DISABLE: |
|
| 142 |
- return PR_SPEC_DISABLE; |
|
| 143 |
- case SPEC_STORE_BYPASS_PRCTL: |
|
| 144 |
-- if (test_tsk_thread_flag(task, TIF_RDS)) |
|
| 145 |
-+ if (task_spec_ssb_force_disable(task)) |
|
| 146 |
-+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
|
| 147 |
-+ if (task_spec_ssb_disable(task)) |
|
| 148 |
- return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
|
| 149 |
- return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
|
| 150 |
- default: |
|
| 151 |
-@@ -571,9 +589,6 @@ static int ssb_prctl_get(struct task_struct *task) |
|
| 152 |
- int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
|
| 153 |
- unsigned long ctrl) |
|
| 154 |
- {
|
|
| 155 |
-- if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE) |
|
| 156 |
-- return -ERANGE; |
|
| 157 |
-- |
|
| 158 |
- switch (which) {
|
|
| 159 |
- case PR_SPEC_STORE_BYPASS: |
|
| 160 |
- return ssb_prctl_set(task, ctrl); |
|
| 161 |
-diff --git a/fs/proc/array.c b/fs/proc/array.c |
|
| 162 |
-index 64f3f20..3e37195 100644 |
|
| 163 |
-+++ b/fs/proc/array.c |
|
| 164 |
-@@ -355,6 +355,9 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) |
|
| 165 |
- case PR_SPEC_NOT_AFFECTED: |
|
| 166 |
- seq_printf(m, "not vulnerable"); |
|
| 167 |
- break; |
|
| 168 |
-+ case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE: |
|
| 169 |
-+ seq_printf(m, "thread force mitigated"); |
|
| 170 |
-+ break; |
|
| 171 |
- case PR_SPEC_PRCTL | PR_SPEC_DISABLE: |
|
| 172 |
- seq_printf(m, "thread mitigated"); |
|
| 173 |
- break; |
|
| 174 |
-diff --git a/include/linux/sched.h b/include/linux/sched.h |
|
| 175 |
-index c549c8c..5ebef8c 100644 |
|
| 176 |
-+++ b/include/linux/sched.h |
|
| 177 |
-@@ -2354,6 +2354,8 @@ static inline void memalloc_noio_restore(unsigned int flags) |
|
| 178 |
- #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ |
|
| 179 |
- #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ |
|
| 180 |
- #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ |
|
| 181 |
-+#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */ |
|
| 182 |
-+#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/ |
|
| 183 |
- |
|
| 184 |
- |
|
| 185 |
- #define TASK_PFA_TEST(name, func) \ |
|
| 186 |
-@@ -2380,6 +2382,13 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) |
|
| 187 |
- TASK_PFA_TEST(LMK_WAITING, lmk_waiting) |
|
| 188 |
- TASK_PFA_SET(LMK_WAITING, lmk_waiting) |
|
| 189 |
- |
|
| 190 |
-+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) |
|
| 191 |
-+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) |
|
| 192 |
-+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) |
|
| 193 |
-+ |
|
| 194 |
-+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
|
| 195 |
-+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
|
| 196 |
-+ |
|
| 197 |
- /* |
|
| 198 |
- * task->jobctl flags |
|
| 199 |
- */ |
|
| 200 |
-diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h |
|
| 201 |
-index 3b316be..64776b7 100644 |
|
| 202 |
-+++ b/include/uapi/linux/prctl.h |
|
| 203 |
-@@ -207,5 +207,6 @@ struct prctl_mm_map {
|
|
| 204 |
- # define PR_SPEC_PRCTL (1UL << 0) |
|
| 205 |
- # define PR_SPEC_ENABLE (1UL << 1) |
|
| 206 |
- # define PR_SPEC_DISABLE (1UL << 2) |
|
| 207 |
-+# define PR_SPEC_FORCE_DISABLE (1UL << 3) |
|
| 208 |
- |
|
| 209 |
- #endif /* _LINUX_PRCTL_H */ |
|
| 210 |
-2.7.4 |
|
| 211 |
- |
| 212 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,33 +0,0 @@ |
| 1 |
-From 80e6b62a0da8f9769f3480b42998b4fead364e06 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Fri, 4 May 2018 09:40:03 +0200 |
|
| 4 |
-Subject: [PATCH 26/54] seccomp: Use PR_SPEC_FORCE_DISABLE |
|
| 5 |
- |
|
| 6 |
-commit b849a812f7eb92e96d1c8239b06581b2cfd8b275 upstream |
|
| 7 |
- |
|
| 8 |
-Use PR_SPEC_FORCE_DISABLE in seccomp() because seccomp does not allow to |
|
| 9 |
-widen restrictions. |
|
| 10 |
- |
|
| 11 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 12 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 13 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 14 |
- kernel/seccomp.c | 2 +- |
|
| 15 |
- 1 file changed, 1 insertion(+), 1 deletion(-) |
|
| 16 |
- |
|
| 17 |
-diff --git a/kernel/seccomp.c b/kernel/seccomp.c |
|
| 18 |
-index 1d3078b..a0bd6ea 100644 |
|
| 19 |
-+++ b/kernel/seccomp.c |
|
| 20 |
-@@ -226,7 +226,7 @@ static inline void spec_mitigate(struct task_struct *task, |
|
| 21 |
- int state = arch_prctl_spec_ctrl_get(task, which); |
|
| 22 |
- |
|
| 23 |
- if (state > 0 && (state & PR_SPEC_PRCTL)) |
|
| 24 |
-- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE); |
|
| 25 |
-+ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE); |
|
| 26 |
- } |
|
| 27 |
- |
|
| 28 |
- static inline void seccomp_assign_mode(struct task_struct *task, |
|
| 29 |
-2.7.4 |
|
| 30 |
- |
| 31 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,222 +0,0 @@ |
| 1 |
-From 85ad3fad7eefe018f2400c609658dacb060cfeb1 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kees Cook <keescook@chromium.org> |
|
| 3 |
-Date: Thu, 3 May 2018 14:56:12 -0700 |
|
| 4 |
-Subject: [PATCH 27/54] seccomp: Add filter flag to opt-out of SSB mitigation |
|
| 5 |
- |
|
| 6 |
-commit 00a02d0c502a06d15e07b857f8ff921e3e402675 upstream |
|
| 7 |
- |
|
| 8 |
-If a seccomp user is not interested in Speculative Store Bypass mitigation |
|
| 9 |
-by default, it can set the new SECCOMP_FILTER_FLAG_SPEC_ALLOW flag when |
|
| 10 |
-adding filters. |
|
| 11 |
- |
|
| 12 |
-Signed-off-by: Kees Cook <keescook@chromium.org> |
|
| 13 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 14 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 15 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 16 |
- include/linux/seccomp.h | 3 +- |
|
| 17 |
- include/uapi/linux/seccomp.h | 4 +- |
|
| 18 |
- kernel/seccomp.c | 19 ++++--- |
|
| 19 |
- tools/testing/selftests/seccomp/seccomp_bpf.c | 78 ++++++++++++++++++++++++++- |
|
| 20 |
- 4 files changed, 93 insertions(+), 11 deletions(-) |
|
| 21 |
- |
|
| 22 |
-diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h |
|
| 23 |
-index ecc296c..50c460a 100644 |
|
| 24 |
-+++ b/include/linux/seccomp.h |
|
| 25 |
-@@ -3,7 +3,8 @@ |
|
| 26 |
- |
|
| 27 |
- #include <uapi/linux/seccomp.h> |
|
| 28 |
- |
|
| 29 |
--#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) |
|
| 30 |
-+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ |
|
| 31 |
-+ SECCOMP_FILTER_FLAG_SPEC_ALLOW) |
|
| 32 |
- |
|
| 33 |
- #ifdef CONFIG_SECCOMP |
|
| 34 |
- |
|
| 35 |
-diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h |
|
| 36 |
-index 0f238a4..e4acb61 100644 |
|
| 37 |
-+++ b/include/uapi/linux/seccomp.h |
|
| 38 |
-@@ -15,7 +15,9 @@ |
|
| 39 |
- #define SECCOMP_SET_MODE_FILTER 1 |
|
| 40 |
- |
|
| 41 |
- /* Valid flags for SECCOMP_SET_MODE_FILTER */ |
|
| 42 |
--#define SECCOMP_FILTER_FLAG_TSYNC 1 |
|
| 43 |
-+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) |
|
| 44 |
-+/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */ |
|
| 45 |
-+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) |
|
| 46 |
- |
|
| 47 |
- /* |
|
| 48 |
- * All BPF programs must return a 32-bit value. |
|
| 49 |
-diff --git a/kernel/seccomp.c b/kernel/seccomp.c |
|
| 50 |
-index a0bd6ea..62a60e7 100644 |
|
| 51 |
-+++ b/kernel/seccomp.c |
|
| 52 |
-@@ -230,7 +230,8 @@ static inline void spec_mitigate(struct task_struct *task, |
|
| 53 |
- } |
|
| 54 |
- |
|
| 55 |
- static inline void seccomp_assign_mode(struct task_struct *task, |
|
| 56 |
-- unsigned long seccomp_mode) |
|
| 57 |
-+ unsigned long seccomp_mode, |
|
| 58 |
-+ unsigned long flags) |
|
| 59 |
- {
|
|
| 60 |
- assert_spin_locked(&task->sighand->siglock); |
|
| 61 |
- |
|
| 62 |
-@@ -240,8 +241,9 @@ static inline void seccomp_assign_mode(struct task_struct *task, |
|
| 63 |
- * filter) is set. |
|
| 64 |
- */ |
|
| 65 |
- smp_mb__before_atomic(); |
|
| 66 |
-- /* Assume seccomp processes want speculation flaw mitigation. */ |
|
| 67 |
-- spec_mitigate(task, PR_SPEC_STORE_BYPASS); |
|
| 68 |
-+ /* Assume default seccomp processes want spec flaw mitigation. */ |
|
| 69 |
-+ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) |
|
| 70 |
-+ spec_mitigate(task, PR_SPEC_STORE_BYPASS); |
|
| 71 |
- set_tsk_thread_flag(task, TIF_SECCOMP); |
|
| 72 |
- } |
|
| 73 |
- |
|
| 74 |
-@@ -309,7 +311,7 @@ static inline pid_t seccomp_can_sync_threads(void) |
|
| 75 |
- * without dropping the locks. |
|
| 76 |
- * |
|
| 77 |
- */ |
|
| 78 |
--static inline void seccomp_sync_threads(void) |
|
| 79 |
-+static inline void seccomp_sync_threads(unsigned long flags) |
|
| 80 |
- {
|
|
| 81 |
- struct task_struct *thread, *caller; |
|
| 82 |
- |
|
| 83 |
-@@ -350,7 +352,8 @@ static inline void seccomp_sync_threads(void) |
|
| 84 |
- * allow one thread to transition the other. |
|
| 85 |
- */ |
|
| 86 |
- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) |
|
| 87 |
-- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); |
|
| 88 |
-+ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER, |
|
| 89 |
-+ flags); |
|
| 90 |
- } |
|
| 91 |
- } |
|
| 92 |
- |
|
| 93 |
-@@ -469,7 +472,7 @@ static long seccomp_attach_filter(unsigned int flags, |
|
| 94 |
- |
|
| 95 |
- /* Now that the new filter is in place, synchronize to all threads. */ |
|
| 96 |
- if (flags & SECCOMP_FILTER_FLAG_TSYNC) |
|
| 97 |
-- seccomp_sync_threads(); |
|
| 98 |
-+ seccomp_sync_threads(flags); |
|
| 99 |
- |
|
| 100 |
- return 0; |
|
| 101 |
- } |
|
| 102 |
-@@ -729,7 +732,7 @@ static long seccomp_set_mode_strict(void) |
|
| 103 |
- #ifdef TIF_NOTSC |
|
| 104 |
- disable_TSC(); |
|
| 105 |
- #endif |
|
| 106 |
-- seccomp_assign_mode(current, seccomp_mode); |
|
| 107 |
-+ seccomp_assign_mode(current, seccomp_mode, 0); |
|
| 108 |
- ret = 0; |
|
| 109 |
- |
|
| 110 |
- out: |
|
| 111 |
-@@ -787,7 +790,7 @@ static long seccomp_set_mode_filter(unsigned int flags, |
|
| 112 |
- /* Do not free the successfully attached filter. */ |
|
| 113 |
- prepared = NULL; |
|
| 114 |
- |
|
| 115 |
-- seccomp_assign_mode(current, seccomp_mode); |
|
| 116 |
-+ seccomp_assign_mode(current, seccomp_mode, flags); |
|
| 117 |
- out: |
|
| 118 |
- spin_unlock_irq(¤t->sighand->siglock); |
|
| 119 |
- if (flags & SECCOMP_FILTER_FLAG_TSYNC) |
|
| 120 |
-diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c |
|
| 121 |
-index f689981..d5be7b5 100644 |
|
| 122 |
-+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c |
|
| 123 |
-@@ -1692,7 +1692,11 @@ TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS) |
|
| 124 |
- #endif |
|
| 125 |
- |
|
| 126 |
- #ifndef SECCOMP_FILTER_FLAG_TSYNC |
|
| 127 |
--#define SECCOMP_FILTER_FLAG_TSYNC 1 |
|
| 128 |
-+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) |
|
| 129 |
-+#endif |
|
| 130 |
-+ |
|
| 131 |
-+#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW |
|
| 132 |
-+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) |
|
| 133 |
- #endif |
|
| 134 |
- |
|
| 135 |
- #ifndef seccomp |
|
| 136 |
-@@ -1791,6 +1795,78 @@ TEST(seccomp_syscall_mode_lock) |
|
| 137 |
- } |
|
| 138 |
- } |
|
| 139 |
- |
|
| 140 |
-+/* |
|
| 141 |
-+ * Test detection of known and unknown filter flags. Userspace needs to be able |
|
| 142 |
-+ * to check if a filter flag is supported by the current kernel and a good way |
|
| 143 |
-+ * of doing that is by attempting to enter filter mode, with the flag bit in |
|
| 144 |
-+ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates |
|
| 145 |
-+ * that the flag is valid and EINVAL indicates that the flag is invalid. |
|
| 146 |
-+ */ |
|
| 147 |
-+TEST(detect_seccomp_filter_flags) |
|
| 148 |
-+{
|
|
| 149 |
-+ unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
|
|
| 150 |
-+ SECCOMP_FILTER_FLAG_SPEC_ALLOW }; |
|
| 151 |
-+ unsigned int flag, all_flags; |
|
| 152 |
-+ int i; |
|
| 153 |
-+ long ret; |
|
| 154 |
-+ |
|
| 155 |
-+ /* Test detection of known-good filter flags */ |
|
| 156 |
-+ for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
|
|
| 157 |
-+ int bits = 0; |
|
| 158 |
-+ |
|
| 159 |
-+ flag = flags[i]; |
|
| 160 |
-+ /* Make sure the flag is a single bit! */ |
|
| 161 |
-+ while (flag) {
|
|
| 162 |
-+ if (flag & 0x1) |
|
| 163 |
-+ bits ++; |
|
| 164 |
-+ flag >>= 1; |
|
| 165 |
-+ } |
|
| 166 |
-+ ASSERT_EQ(1, bits); |
|
| 167 |
-+ flag = flags[i]; |
|
| 168 |
-+ |
|
| 169 |
-+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); |
|
| 170 |
-+ ASSERT_NE(ENOSYS, errno) {
|
|
| 171 |
-+ TH_LOG("Kernel does not support seccomp syscall!");
|
|
| 172 |
-+ } |
|
| 173 |
-+ EXPECT_EQ(-1, ret); |
|
| 174 |
-+ EXPECT_EQ(EFAULT, errno) {
|
|
| 175 |
-+ TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
|
|
| 176 |
-+ flag); |
|
| 177 |
-+ } |
|
| 178 |
-+ |
|
| 179 |
-+ all_flags |= flag; |
|
| 180 |
-+ } |
|
| 181 |
-+ |
|
| 182 |
-+ /* Test detection of all known-good filter flags */ |
|
| 183 |
-+ ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL); |
|
| 184 |
-+ EXPECT_EQ(-1, ret); |
|
| 185 |
-+ EXPECT_EQ(EFAULT, errno) {
|
|
| 186 |
-+ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
|
|
| 187 |
-+ all_flags); |
|
| 188 |
-+ } |
|
| 189 |
-+ |
|
| 190 |
-+ /* Test detection of an unknown filter flag */ |
|
| 191 |
-+ flag = -1; |
|
| 192 |
-+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); |
|
| 193 |
-+ EXPECT_EQ(-1, ret); |
|
| 194 |
-+ EXPECT_EQ(EINVAL, errno) {
|
|
| 195 |
-+ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
|
|
| 196 |
-+ flag); |
|
| 197 |
-+ } |
|
| 198 |
-+ |
|
| 199 |
-+ /* |
|
| 200 |
-+ * Test detection of an unknown filter flag that may simply need to be |
|
| 201 |
-+ * added to this test |
|
| 202 |
-+ */ |
|
| 203 |
-+ flag = flags[ARRAY_SIZE(flags) - 1] << 1; |
|
| 204 |
-+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); |
|
| 205 |
-+ EXPECT_EQ(-1, ret); |
|
| 206 |
-+ EXPECT_EQ(EINVAL, errno) {
|
|
| 207 |
-+ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
|
|
| 208 |
-+ flag); |
|
| 209 |
-+ } |
|
| 210 |
-+} |
|
| 211 |
-+ |
|
| 212 |
- TEST(TSYNC_first) |
|
| 213 |
- {
|
|
| 214 |
- struct sock_filter filter[] = {
|
|
| 215 |
-2.7.4 |
|
| 216 |
- |
| 217 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,121 +0,0 @@ |
| 1 |
-From 2dfae1442193d33c0a0de1fa83d37afe6b000fb8 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Fri, 4 May 2018 15:12:06 +0200 |
|
| 4 |
-Subject: [PATCH 28/54] seccomp: Move speculation migitation control to arch |
|
| 5 |
- code |
|
| 6 |
- |
|
| 7 |
-commit 8bf37d8c067bb7eb8e7c381bdadf9bd89182b6bc upstream |
|
| 8 |
- |
|
| 9 |
-The migitation control is simpler to implement in architecture code as it |
|
| 10 |
-avoids the extra function call to check the mode. Aside of that having an |
|
| 11 |
-explicit seccomp enabled mode in the architecture mitigations would require |
|
| 12 |
-even more workarounds. |
|
| 13 |
- |
|
| 14 |
-Move it into architecture code and provide a weak function in the seccomp |
|
| 15 |
-code. Remove the 'which' argument as this allows the architecture to decide |
|
| 16 |
-which mitigations are relevant for seccomp. |
|
| 17 |
- |
|
| 18 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 19 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 20 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 21 |
- arch/x86/kernel/cpu/bugs.c | 29 ++++++++++++++++++----------- |
|
| 22 |
- include/linux/nospec.h | 2 ++ |
|
| 23 |
- kernel/seccomp.c | 15 ++------------- |
|
| 24 |
- 3 files changed, 22 insertions(+), 24 deletions(-) |
|
| 25 |
- |
|
| 26 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 27 |
-index fdbd8e5..131617d 100644 |
|
| 28 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 29 |
-@@ -568,6 +568,24 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
|
| 30 |
- return 0; |
|
| 31 |
- } |
|
| 32 |
- |
|
| 33 |
-+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
|
| 34 |
-+ unsigned long ctrl) |
|
| 35 |
-+{
|
|
| 36 |
-+ switch (which) {
|
|
| 37 |
-+ case PR_SPEC_STORE_BYPASS: |
|
| 38 |
-+ return ssb_prctl_set(task, ctrl); |
|
| 39 |
-+ default: |
|
| 40 |
-+ return -ENODEV; |
|
| 41 |
-+ } |
|
| 42 |
-+} |
|
| 43 |
-+ |
|
| 44 |
-+#ifdef CONFIG_SECCOMP |
|
| 45 |
-+void arch_seccomp_spec_mitigate(struct task_struct *task) |
|
| 46 |
-+{
|
|
| 47 |
-+ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
|
| 48 |
-+} |
|
| 49 |
-+#endif |
|
| 50 |
-+ |
|
| 51 |
- static int ssb_prctl_get(struct task_struct *task) |
|
| 52 |
- {
|
|
| 53 |
- switch (ssb_mode) {
|
|
| 54 |
-@@ -586,17 +604,6 @@ static int ssb_prctl_get(struct task_struct *task) |
|
| 55 |
- } |
|
| 56 |
- } |
|
| 57 |
- |
|
| 58 |
--int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
|
| 59 |
-- unsigned long ctrl) |
|
| 60 |
--{
|
|
| 61 |
-- switch (which) {
|
|
| 62 |
-- case PR_SPEC_STORE_BYPASS: |
|
| 63 |
-- return ssb_prctl_set(task, ctrl); |
|
| 64 |
-- default: |
|
| 65 |
-- return -ENODEV; |
|
| 66 |
-- } |
|
| 67 |
--} |
|
| 68 |
-- |
|
| 69 |
- int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
|
| 70 |
- {
|
|
| 71 |
- switch (which) {
|
|
| 72 |
-diff --git a/include/linux/nospec.h b/include/linux/nospec.h |
|
| 73 |
-index a908c95..0c5ef54 100644 |
|
| 74 |
-+++ b/include/linux/nospec.h |
|
| 75 |
-@@ -62,5 +62,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, |
|
| 76 |
- int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); |
|
| 77 |
- int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
|
| 78 |
- unsigned long ctrl); |
|
| 79 |
-+/* Speculation control for seccomp enforced mitigation */ |
|
| 80 |
-+void arch_seccomp_spec_mitigate(struct task_struct *task); |
|
| 81 |
- |
|
| 82 |
- #endif /* _LINUX_NOSPEC_H */ |
|
| 83 |
-diff --git a/kernel/seccomp.c b/kernel/seccomp.c |
|
| 84 |
-index 62a60e7..3975856 100644 |
|
| 85 |
-+++ b/kernel/seccomp.c |
|
| 86 |
-@@ -216,18 +216,7 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) |
|
| 87 |
- return true; |
|
| 88 |
- } |
|
| 89 |
- |
|
| 90 |
--/* |
|
| 91 |
-- * If a given speculation mitigation is opt-in (prctl()-controlled), |
|
| 92 |
-- * select it, by disabling speculation (enabling mitigation). |
|
| 93 |
-- */ |
|
| 94 |
--static inline void spec_mitigate(struct task_struct *task, |
|
| 95 |
-- unsigned long which) |
|
| 96 |
--{
|
|
| 97 |
-- int state = arch_prctl_spec_ctrl_get(task, which); |
|
| 98 |
-- |
|
| 99 |
-- if (state > 0 && (state & PR_SPEC_PRCTL)) |
|
| 100 |
-- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE); |
|
| 101 |
--} |
|
| 102 |
-+void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
|
|
| 103 |
- |
|
| 104 |
- static inline void seccomp_assign_mode(struct task_struct *task, |
|
| 105 |
- unsigned long seccomp_mode, |
|
| 106 |
-@@ -243,7 +232,7 @@ static inline void seccomp_assign_mode(struct task_struct *task, |
|
| 107 |
- smp_mb__before_atomic(); |
|
| 108 |
- /* Assume default seccomp processes want spec flaw mitigation. */ |
|
| 109 |
- if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) |
|
| 110 |
-- spec_mitigate(task, PR_SPEC_STORE_BYPASS); |
|
| 111 |
-+ arch_seccomp_spec_mitigate(task); |
|
| 112 |
- set_tsk_thread_flag(task, TIF_SECCOMP); |
|
| 113 |
- } |
|
| 114 |
- |
|
| 115 |
-2.7.4 |
|
| 116 |
- |
| 117 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,166 +0,0 @@ |
| 1 |
-From e72eb1d28d45418efd77444d3b5935212f6e7e6c Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Kees Cook <keescook@chromium.org> |
|
| 3 |
-Date: Thu, 3 May 2018 14:37:54 -0700 |
|
| 4 |
-Subject: [PATCH 29/54] x86/speculation: Make "seccomp" the default mode for |
|
| 5 |
- Speculative Store Bypass |
|
| 6 |
- |
|
| 7 |
-commit f21b53b20c754021935ea43364dbf53778eeba32 upstream |
|
| 8 |
- |
|
| 9 |
-Unless explicitly opted out of, anything running under seccomp will have |
|
| 10 |
-SSB mitigations enabled. Choosing the "prctl" mode will disable this. |
|
| 11 |
- |
|
| 12 |
-[ tglx: Adjusted it to the new arch_seccomp_spec_mitigate() mechanism ] |
|
| 13 |
- |
|
| 14 |
-Signed-off-by: Kees Cook <keescook@chromium.org> |
|
| 15 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 16 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 17 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 18 |
- Documentation/kernel-parameters.txt | 26 +++++++++++++++++--------- |
|
| 19 |
- arch/x86/include/asm/nospec-branch.h | 1 + |
|
| 20 |
- arch/x86/kernel/cpu/bugs.c | 32 +++++++++++++++++++++++--------- |
|
| 21 |
- 3 files changed, 41 insertions(+), 18 deletions(-) |
|
| 22 |
- |
|
| 23 |
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt |
|
| 24 |
-index 543923b..52240a6 100644 |
|
| 25 |
-+++ b/Documentation/kernel-parameters.txt |
|
| 26 |
-@@ -3997,19 +3997,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted. |
|
| 27 |
- This parameter controls whether the Speculative Store |
|
| 28 |
- Bypass optimization is used. |
|
| 29 |
- |
|
| 30 |
-- on - Unconditionally disable Speculative Store Bypass |
|
| 31 |
-- off - Unconditionally enable Speculative Store Bypass |
|
| 32 |
-- auto - Kernel detects whether the CPU model contains an |
|
| 33 |
-- implementation of Speculative Store Bypass and |
|
| 34 |
-- picks the most appropriate mitigation. |
|
| 35 |
-- prctl - Control Speculative Store Bypass per thread |
|
| 36 |
-- via prctl. Speculative Store Bypass is enabled |
|
| 37 |
-- for a process by default. The state of the control |
|
| 38 |
-- is inherited on fork. |
|
| 39 |
-+ on - Unconditionally disable Speculative Store Bypass |
|
| 40 |
-+ off - Unconditionally enable Speculative Store Bypass |
|
| 41 |
-+ auto - Kernel detects whether the CPU model contains an |
|
| 42 |
-+ implementation of Speculative Store Bypass and |
|
| 43 |
-+ picks the most appropriate mitigation. If the |
|
| 44 |
-+ CPU is not vulnerable, "off" is selected. If the |
|
| 45 |
-+ CPU is vulnerable the default mitigation is |
|
| 46 |
-+ architecture and Kconfig dependent. See below. |
|
| 47 |
-+ prctl - Control Speculative Store Bypass per thread |
|
| 48 |
-+ via prctl. Speculative Store Bypass is enabled |
|
| 49 |
-+ for a process by default. The state of the control |
|
| 50 |
-+ is inherited on fork. |
|
| 51 |
-+ seccomp - Same as "prctl" above, but all seccomp threads |
|
| 52 |
-+ will disable SSB unless they explicitly opt out. |
|
| 53 |
- |
|
| 54 |
- Not specifying this option is equivalent to |
|
| 55 |
- spec_store_bypass_disable=auto. |
|
| 56 |
- |
|
| 57 |
-+ Default mitigations: |
|
| 58 |
-+ X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl" |
|
| 59 |
-+ |
|
| 60 |
- spia_io_base= [HW,MTD] |
|
| 61 |
- spia_fio_base= |
|
| 62 |
- spia_pedr= |
|
| 63 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 64 |
-index 71ad014..328ea3c 100644 |
|
| 65 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 66 |
-@@ -233,6 +233,7 @@ enum ssb_mitigation {
|
|
| 67 |
- SPEC_STORE_BYPASS_NONE, |
|
| 68 |
- SPEC_STORE_BYPASS_DISABLE, |
|
| 69 |
- SPEC_STORE_BYPASS_PRCTL, |
|
| 70 |
-+ SPEC_STORE_BYPASS_SECCOMP, |
|
| 71 |
- }; |
|
| 72 |
- |
|
| 73 |
- extern char __indirect_thunk_start[]; |
|
| 74 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 75 |
-index 131617d..9a3bb65 100644 |
|
| 76 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 77 |
-@@ -415,22 +415,25 @@ enum ssb_mitigation_cmd {
|
|
| 78 |
- SPEC_STORE_BYPASS_CMD_AUTO, |
|
| 79 |
- SPEC_STORE_BYPASS_CMD_ON, |
|
| 80 |
- SPEC_STORE_BYPASS_CMD_PRCTL, |
|
| 81 |
-+ SPEC_STORE_BYPASS_CMD_SECCOMP, |
|
| 82 |
- }; |
|
| 83 |
- |
|
| 84 |
- static const char *ssb_strings[] = {
|
|
| 85 |
- [SPEC_STORE_BYPASS_NONE] = "Vulnerable", |
|
| 86 |
- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", |
|
| 87 |
-- [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl" |
|
| 88 |
-+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", |
|
| 89 |
-+ [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", |
|
| 90 |
- }; |
|
| 91 |
- |
|
| 92 |
- static const struct {
|
|
| 93 |
- const char *option; |
|
| 94 |
- enum ssb_mitigation_cmd cmd; |
|
| 95 |
- } ssb_mitigation_options[] = {
|
|
| 96 |
-- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
| 97 |
-- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
| 98 |
-- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
| 99 |
-- { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
|
| 100 |
-+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
| 101 |
-+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
| 102 |
-+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
| 103 |
-+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
|
| 104 |
-+ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
|
|
| 105 |
- }; |
|
| 106 |
- |
|
| 107 |
- static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) |
|
| 108 |
-@@ -480,8 +483,15 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 109 |
- |
|
| 110 |
- switch (cmd) {
|
|
| 111 |
- case SPEC_STORE_BYPASS_CMD_AUTO: |
|
| 112 |
-- /* Choose prctl as the default mode */ |
|
| 113 |
-- mode = SPEC_STORE_BYPASS_PRCTL; |
|
| 114 |
-+ case SPEC_STORE_BYPASS_CMD_SECCOMP: |
|
| 115 |
-+ /* |
|
| 116 |
-+ * Choose prctl+seccomp as the default mode if seccomp is |
|
| 117 |
-+ * enabled. |
|
| 118 |
-+ */ |
|
| 119 |
-+ if (IS_ENABLED(CONFIG_SECCOMP)) |
|
| 120 |
-+ mode = SPEC_STORE_BYPASS_SECCOMP; |
|
| 121 |
-+ else |
|
| 122 |
-+ mode = SPEC_STORE_BYPASS_PRCTL; |
|
| 123 |
- break; |
|
| 124 |
- case SPEC_STORE_BYPASS_CMD_ON: |
|
| 125 |
- mode = SPEC_STORE_BYPASS_DISABLE; |
|
| 126 |
-@@ -529,12 +539,14 @@ static void ssb_select_mitigation() |
|
| 127 |
- } |
|
| 128 |
- |
|
| 129 |
- #undef pr_fmt |
|
| 130 |
-+#define pr_fmt(fmt) "Speculation prctl: " fmt |
|
| 131 |
- |
|
| 132 |
- static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
|
| 133 |
- {
|
|
| 134 |
- bool update; |
|
| 135 |
- |
|
| 136 |
-- if (ssb_mode != SPEC_STORE_BYPASS_PRCTL) |
|
| 137 |
-+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && |
|
| 138 |
-+ ssb_mode != SPEC_STORE_BYPASS_SECCOMP) |
|
| 139 |
- return -ENXIO; |
|
| 140 |
- |
|
| 141 |
- switch (ctrl) {
|
|
| 142 |
-@@ -582,7 +594,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
|
| 143 |
- #ifdef CONFIG_SECCOMP |
|
| 144 |
- void arch_seccomp_spec_mitigate(struct task_struct *task) |
|
| 145 |
- {
|
|
| 146 |
-- ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
|
| 147 |
-+ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) |
|
| 148 |
-+ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
|
| 149 |
- } |
|
| 150 |
- #endif |
|
| 151 |
- |
|
| 152 |
-@@ -591,6 +604,7 @@ static int ssb_prctl_get(struct task_struct *task) |
|
| 153 |
- switch (ssb_mode) {
|
|
| 154 |
- case SPEC_STORE_BYPASS_DISABLE: |
|
| 155 |
- return PR_SPEC_DISABLE; |
|
| 156 |
-+ case SPEC_STORE_BYPASS_SECCOMP: |
|
| 157 |
- case SPEC_STORE_BYPASS_PRCTL: |
|
| 158 |
- if (task_spec_ssb_force_disable(task)) |
|
| 159 |
- return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
|
| 160 |
-2.7.4 |
|
| 161 |
- |
| 162 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,405 +0,0 @@ |
| 1 |
-From 3418b83309406d775ef700aefaedd0665e7f7855 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 9 May 2018 21:41:38 +0200 |
|
| 4 |
-Subject: [PATCH 30/54] x86/bugs: Rename _RDS to _SSBD |
|
| 5 |
- |
|
| 6 |
-commit 9f65fb29374ee37856dbad847b4e121aab72b510 upstream |
|
| 7 |
- |
|
| 8 |
-Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2] |
|
| 9 |
-as SSBD (Speculative Store Bypass Disable). |
|
| 10 |
- |
|
| 11 |
-Hence changing it. |
|
| 12 |
- |
|
| 13 |
-It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name |
|
| 14 |
-is going to be. Following the rename it would be SSBD_NO but that rolls out |
|
| 15 |
-to Speculative Store Bypass Disable No. |
|
| 16 |
- |
|
| 17 |
-Also fixed the missing space in X86_FEATURE_AMD_SSBD. |
|
| 18 |
- |
|
| 19 |
-[ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ] |
|
| 20 |
- |
|
| 21 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 22 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 23 |
- |
|
| 24 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 25 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 26 |
- arch/x86/include/asm/cpufeatures.h | 4 ++-- |
|
| 27 |
- arch/x86/include/asm/msr-index.h | 10 +++++----- |
|
| 28 |
- arch/x86/include/asm/spec-ctrl.h | 12 ++++++------ |
|
| 29 |
- arch/x86/include/asm/thread_info.h | 6 +++--- |
|
| 30 |
- arch/x86/kernel/cpu/amd.c | 14 +++++++------- |
|
| 31 |
- arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++------------------ |
|
| 32 |
- arch/x86/kernel/cpu/common.c | 2 +- |
|
| 33 |
- arch/x86/kernel/cpu/intel.c | 2 +- |
|
| 34 |
- arch/x86/kernel/process.c | 8 ++++---- |
|
| 35 |
- arch/x86/kvm/cpuid.c | 2 +- |
|
| 36 |
- arch/x86/kvm/cpuid.h | 2 +- |
|
| 37 |
- arch/x86/kvm/vmx.c | 2 +- |
|
| 38 |
- 12 files changed, 50 insertions(+), 50 deletions(-) |
|
| 39 |
- |
|
| 40 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 41 |
-index 8797069..0ed8ea5 100644 |
|
| 42 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 43 |
-@@ -205,7 +205,7 @@ |
|
| 44 |
- #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
|
| 45 |
- #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ |
|
| 46 |
- #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ |
|
| 47 |
--#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */ |
|
| 48 |
-+#define X86_FEATURE_AMD_SSBD (7*32+24) /* "" AMD SSBD implementation */ |
|
| 49 |
- |
|
| 50 |
- /* Virtualization flags: Linux defined, word 8 */ |
|
| 51 |
- #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
|
| 52 |
-@@ -308,7 +308,7 @@ |
|
| 53 |
- #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
|
| 54 |
- #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
|
| 55 |
- #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
|
| 56 |
--#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */ |
|
| 57 |
-+#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */ |
|
| 58 |
- |
|
| 59 |
- /* |
|
| 60 |
- * BUG word(s) |
|
| 61 |
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
|
| 62 |
-index 5dd28d0..b67d57e 100644 |
|
| 63 |
-+++ b/arch/x86/include/asm/msr-index.h |
|
| 64 |
-@@ -40,8 +40,8 @@ |
|
| 65 |
- #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ |
|
| 66 |
- #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ |
|
| 67 |
- #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ |
|
| 68 |
--#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */ |
|
| 69 |
--#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */ |
|
| 70 |
-+#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ |
|
| 71 |
-+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ |
|
| 72 |
- |
|
| 73 |
- #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ |
|
| 74 |
- #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ |
|
| 75 |
-@@ -63,10 +63,10 @@ |
|
| 76 |
- #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a |
|
| 77 |
- #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ |
|
| 78 |
- #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ |
|
| 79 |
--#define ARCH_CAP_RDS_NO (1 << 4) /* |
|
| 80 |
-+#define ARCH_CAP_SSBD_NO (1 << 4) /* |
|
| 81 |
- * Not susceptible to Speculative Store Bypass |
|
| 82 |
-- * attack, so no Reduced Data Speculation control |
|
| 83 |
-- * required. |
|
| 84 |
-+ * attack, so no Speculative Store Bypass |
|
| 85 |
-+ * control required. |
|
| 86 |
- */ |
|
| 87 |
- |
|
| 88 |
- #define MSR_IA32_BBL_CR_CTL 0x00000119 |
|
| 89 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 90 |
-index 45ef00a..dc21209 100644 |
|
| 91 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 92 |
-@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u64); |
|
| 93 |
- |
|
| 94 |
- /* AMD specific Speculative Store Bypass MSR data */ |
|
| 95 |
- extern u64 x86_amd_ls_cfg_base; |
|
| 96 |
--extern u64 x86_amd_ls_cfg_rds_mask; |
|
| 97 |
-+extern u64 x86_amd_ls_cfg_ssbd_mask; |
|
| 98 |
- |
|
| 99 |
- /* The Intel SPEC CTRL MSR base value cache */ |
|
| 100 |
- extern u64 x86_spec_ctrl_base; |
|
| 101 |
- |
|
| 102 |
--static inline u64 rds_tif_to_spec_ctrl(u64 tifn) |
|
| 103 |
-+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) |
|
| 104 |
- {
|
|
| 105 |
-- BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT); |
|
| 106 |
-- return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT); |
|
| 107 |
-+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); |
|
| 108 |
-+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); |
|
| 109 |
- } |
|
| 110 |
- |
|
| 111 |
--static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn) |
|
| 112 |
-+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) |
|
| 113 |
- {
|
|
| 114 |
-- return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL; |
|
| 115 |
-+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; |
|
| 116 |
- } |
|
| 117 |
- |
|
| 118 |
- extern void speculative_store_bypass_update(void); |
|
| 119 |
-diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h |
|
| 120 |
-index 661afac..2d8788a 100644 |
|
| 121 |
-+++ b/arch/x86/include/asm/thread_info.h |
|
| 122 |
-@@ -83,7 +83,7 @@ struct thread_info {
|
|
| 123 |
- #define TIF_SIGPENDING 2 /* signal pending */ |
|
| 124 |
- #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
|
| 125 |
- #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ |
|
| 126 |
--#define TIF_RDS 5 /* Reduced data speculation */ |
|
| 127 |
-+#define TIF_SSBD 5 /* Reduced data speculation */ |
|
| 128 |
- #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
|
| 129 |
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
|
| 130 |
- #define TIF_SECCOMP 8 /* secure computing */ |
|
| 131 |
-@@ -107,7 +107,7 @@ struct thread_info {
|
|
| 132 |
- #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
|
| 133 |
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
|
| 134 |
- #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
|
| 135 |
--#define _TIF_RDS (1 << TIF_RDS) |
|
| 136 |
-+#define _TIF_SSBD (1 << TIF_SSBD) |
|
| 137 |
- #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
|
| 138 |
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
|
| 139 |
- #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
|
| 140 |
-@@ -141,7 +141,7 @@ struct thread_info {
|
|
| 141 |
- |
|
| 142 |
- /* flags to check in __switch_to() */ |
|
| 143 |
- #define _TIF_WORK_CTXSW \ |
|
| 144 |
-- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS) |
|
| 145 |
-+ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) |
|
| 146 |
- |
|
| 147 |
- #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
|
| 148 |
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
|
| 149 |
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
|
| 150 |
-index a176c81..acb2fcc 100644 |
|
| 151 |
-+++ b/arch/x86/kernel/cpu/amd.c |
|
| 152 |
-@@ -555,12 +555,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) |
|
| 153 |
- } |
|
| 154 |
- /* |
|
| 155 |
- * Try to cache the base value so further operations can |
|
| 156 |
-- * avoid RMW. If that faults, do not enable RDS. |
|
| 157 |
-+ * avoid RMW. If that faults, do not enable SSBD. |
|
| 158 |
- */ |
|
| 159 |
- if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
|
| 160 |
-- setup_force_cpu_cap(X86_FEATURE_RDS); |
|
| 161 |
-- setup_force_cpu_cap(X86_FEATURE_AMD_RDS); |
|
| 162 |
-- x86_amd_ls_cfg_rds_mask = 1ULL << bit; |
|
| 163 |
-+ setup_force_cpu_cap(X86_FEATURE_SSBD); |
|
| 164 |
-+ setup_force_cpu_cap(X86_FEATURE_AMD_SSBD); |
|
| 165 |
-+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; |
|
| 166 |
- } |
|
| 167 |
- } |
|
| 168 |
- } |
|
| 169 |
-@@ -849,9 +849,9 @@ static void init_amd(struct cpuinfo_x86 *c) |
|
| 170 |
- if (!cpu_has(c, X86_FEATURE_XENPV)) |
|
| 171 |
- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); |
|
| 172 |
- |
|
| 173 |
-- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
| 174 |
-- set_cpu_cap(c, X86_FEATURE_RDS); |
|
| 175 |
-- set_cpu_cap(c, X86_FEATURE_AMD_RDS); |
|
| 176 |
-+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
| 177 |
-+ set_cpu_cap(c, X86_FEATURE_SSBD); |
|
| 178 |
-+ set_cpu_cap(c, X86_FEATURE_AMD_SSBD); |
|
| 179 |
- } |
|
| 180 |
- } |
|
| 181 |
- |
|
| 182 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 183 |
-index 9a3bb65..ae6f9ba 100644 |
|
| 184 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 185 |
-@@ -44,10 +44,10 @@ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; |
|
| 186 |
- |
|
| 187 |
- /* |
|
| 188 |
- * AMD specific MSR info for Speculative Store Bypass control. |
|
| 189 |
-- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu(). |
|
| 190 |
-+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). |
|
| 191 |
- */ |
|
| 192 |
- u64 __ro_after_init x86_amd_ls_cfg_base; |
|
| 193 |
--u64 __ro_after_init x86_amd_ls_cfg_rds_mask; |
|
| 194 |
-+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; |
|
| 195 |
- |
|
| 196 |
- void __init check_bugs(void) |
|
| 197 |
- {
|
|
| 198 |
-@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void) |
|
| 199 |
- u64 msrval = x86_spec_ctrl_base; |
|
| 200 |
- |
|
| 201 |
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 202 |
-- msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 203 |
-+ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 204 |
- return msrval; |
|
| 205 |
- } |
|
| 206 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); |
|
| 207 |
-@@ -158,7 +158,7 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) |
|
| 208 |
- return; |
|
| 209 |
- |
|
| 210 |
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 211 |
-- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 212 |
-+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 213 |
- |
|
| 214 |
- if (host != guest_spec_ctrl) |
|
| 215 |
- wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); |
|
| 216 |
-@@ -173,18 +173,18 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) |
|
| 217 |
- return; |
|
| 218 |
- |
|
| 219 |
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 220 |
-- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 221 |
-+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 222 |
- |
|
| 223 |
- if (host != guest_spec_ctrl) |
|
| 224 |
- wrmsrl(MSR_IA32_SPEC_CTRL, host); |
|
| 225 |
- } |
|
| 226 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); |
|
| 227 |
- |
|
| 228 |
--static void x86_amd_rds_enable(void) |
|
| 229 |
-+static void x86_amd_ssb_disable(void) |
|
| 230 |
- {
|
|
| 231 |
-- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask; |
|
| 232 |
-+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; |
|
| 233 |
- |
|
| 234 |
-- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) |
|
| 235 |
-+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
|
| 236 |
- wrmsrl(MSR_AMD64_LS_CFG, msrval); |
|
| 237 |
- } |
|
| 238 |
- |
|
| 239 |
-@@ -472,7 +472,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 240 |
- enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; |
|
| 241 |
- enum ssb_mitigation_cmd cmd; |
|
| 242 |
- |
|
| 243 |
-- if (!boot_cpu_has(X86_FEATURE_RDS)) |
|
| 244 |
-+ if (!boot_cpu_has(X86_FEATURE_SSBD)) |
|
| 245 |
- return mode; |
|
| 246 |
- |
|
| 247 |
- cmd = ssb_parse_cmdline(); |
|
| 248 |
-@@ -506,7 +506,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 249 |
- /* |
|
| 250 |
- * We have three CPU feature flags that are in play here: |
|
| 251 |
- * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. |
|
| 252 |
-- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass |
|
| 253 |
-+ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass |
|
| 254 |
- * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation |
|
| 255 |
- */ |
|
| 256 |
- if (mode == SPEC_STORE_BYPASS_DISABLE) {
|
|
| 257 |
-@@ -517,12 +517,12 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 258 |
- */ |
|
| 259 |
- switch (boot_cpu_data.x86_vendor) {
|
|
| 260 |
- case X86_VENDOR_INTEL: |
|
| 261 |
-- x86_spec_ctrl_base |= SPEC_CTRL_RDS; |
|
| 262 |
-- x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS; |
|
| 263 |
-- x86_spec_ctrl_set(SPEC_CTRL_RDS); |
|
| 264 |
-+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
|
| 265 |
-+ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD; |
|
| 266 |
-+ x86_spec_ctrl_set(SPEC_CTRL_SSBD); |
|
| 267 |
- break; |
|
| 268 |
- case X86_VENDOR_AMD: |
|
| 269 |
-- x86_amd_rds_enable(); |
|
| 270 |
-+ x86_amd_ssb_disable(); |
|
| 271 |
- break; |
|
| 272 |
- } |
|
| 273 |
- } |
|
| 274 |
-@@ -555,16 +555,16 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
|
| 275 |
- if (task_spec_ssb_force_disable(task)) |
|
| 276 |
- return -EPERM; |
|
| 277 |
- task_clear_spec_ssb_disable(task); |
|
| 278 |
-- update = test_and_clear_tsk_thread_flag(task, TIF_RDS); |
|
| 279 |
-+ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); |
|
| 280 |
- break; |
|
| 281 |
- case PR_SPEC_DISABLE: |
|
| 282 |
- task_set_spec_ssb_disable(task); |
|
| 283 |
-- update = !test_and_set_tsk_thread_flag(task, TIF_RDS); |
|
| 284 |
-+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); |
|
| 285 |
- break; |
|
| 286 |
- case PR_SPEC_FORCE_DISABLE: |
|
| 287 |
- task_set_spec_ssb_disable(task); |
|
| 288 |
- task_set_spec_ssb_force_disable(task); |
|
| 289 |
-- update = !test_and_set_tsk_thread_flag(task, TIF_RDS); |
|
| 290 |
-+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); |
|
| 291 |
- break; |
|
| 292 |
- default: |
|
| 293 |
- return -ERANGE; |
|
| 294 |
-@@ -634,7 +634,7 @@ void x86_spec_ctrl_setup_ap(void) |
|
| 295 |
- x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); |
|
| 296 |
- |
|
| 297 |
- if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) |
|
| 298 |
-- x86_amd_rds_enable(); |
|
| 299 |
-+ x86_amd_ssb_disable(); |
|
| 300 |
- } |
|
| 301 |
- |
|
| 302 |
- #ifdef CONFIG_SYSFS |
|
| 303 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 304 |
-index beb1da8..d0dd736 100644 |
|
| 305 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 306 |
-@@ -911,7 +911,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
|
| 307 |
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
|
| 308 |
- |
|
| 309 |
- if (!x86_match_cpu(cpu_no_spec_store_bypass) && |
|
| 310 |
-- !(ia32_cap & ARCH_CAP_RDS_NO)) |
|
| 311 |
-+ !(ia32_cap & ARCH_CAP_SSBD_NO)) |
|
| 312 |
- setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
|
| 313 |
- |
|
| 314 |
- if (x86_match_cpu(cpu_no_speculation)) |
|
| 315 |
-diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c |
|
| 316 |
-index f15aea6..047adaa 100644 |
|
| 317 |
-+++ b/arch/x86/kernel/cpu/intel.c |
|
| 318 |
-@@ -154,7 +154,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
|
| 319 |
- setup_clear_cpu_cap(X86_FEATURE_STIBP); |
|
| 320 |
- setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); |
|
| 321 |
- setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); |
|
| 322 |
-- setup_clear_cpu_cap(X86_FEATURE_RDS); |
|
| 323 |
-+ setup_clear_cpu_cap(X86_FEATURE_SSBD); |
|
| 324 |
- } |
|
| 325 |
- |
|
| 326 |
- /* |
|
| 327 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 328 |
-index 9c48e18..c344230 100644 |
|
| 329 |
-+++ b/arch/x86/kernel/process.c |
|
| 330 |
-@@ -207,11 +207,11 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn |
|
| 331 |
- {
|
|
| 332 |
- u64 msr; |
|
| 333 |
- |
|
| 334 |
-- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
| 335 |
-- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn); |
|
| 336 |
-+ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
| 337 |
-+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); |
|
| 338 |
- wrmsrl(MSR_AMD64_LS_CFG, msr); |
|
| 339 |
- } else {
|
|
| 340 |
-- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn); |
|
| 341 |
-+ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); |
|
| 342 |
- wrmsrl(MSR_IA32_SPEC_CTRL, msr); |
|
| 343 |
- } |
|
| 344 |
- } |
|
| 345 |
-@@ -250,7 +250,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
|
| 346 |
- if ((tifp ^ tifn) & _TIF_NOTSC) |
|
| 347 |
- cr4_toggle_bits(X86_CR4_TSD); |
|
| 348 |
- |
|
| 349 |
-- if ((tifp ^ tifn) & _TIF_RDS) |
|
| 350 |
-+ if ((tifp ^ tifn) & _TIF_SSBD) |
|
| 351 |
- __speculative_store_bypass_update(tifn); |
|
| 352 |
- } |
|
| 353 |
- |
|
| 354 |
-diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c |
|
| 355 |
-index a9409f0..18e6db5 100644 |
|
| 356 |
-+++ b/arch/x86/kvm/cpuid.c |
|
| 357 |
-@@ -382,7 +382,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
|
| 358 |
- |
|
| 359 |
- /* cpuid 7.0.edx*/ |
|
| 360 |
- const u32 kvm_cpuid_7_0_edx_x86_features = |
|
| 361 |
-- F(SPEC_CTRL) | F(RDS) | F(ARCH_CAPABILITIES); |
|
| 362 |
-+ F(SPEC_CTRL) | F(SSBD) | F(ARCH_CAPABILITIES); |
|
| 363 |
- |
|
| 364 |
- /* all calls to cpuid_count() should be made on the same cpu */ |
|
| 365 |
- get_cpu(); |
|
| 366 |
-diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h |
|
| 367 |
-index 24187d0..aedeb6c 100644 |
|
| 368 |
-+++ b/arch/x86/kvm/cpuid.h |
|
| 369 |
-@@ -179,7 +179,7 @@ static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu) |
|
| 370 |
- if (best && (best->ebx & bit(X86_FEATURE_IBRS))) |
|
| 371 |
- return true; |
|
| 372 |
- best = kvm_find_cpuid_entry(vcpu, 7, 0); |
|
| 373 |
-- return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_RDS))); |
|
| 374 |
-+ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD))); |
|
| 375 |
- } |
|
| 376 |
- |
|
| 377 |
- static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu) |
|
| 378 |
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
|
| 379 |
-index 0eb3863..874b661 100644 |
|
| 380 |
-+++ b/arch/x86/kvm/vmx.c |
|
| 381 |
-@@ -3141,7 +3141,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
|
| 382 |
- return 1; |
|
| 383 |
- |
|
| 384 |
- /* The STIBP bit doesn't fault even if it's not advertised */ |
|
| 385 |
-- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS)) |
|
| 386 |
-+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) |
|
| 387 |
- return 1; |
|
| 388 |
- |
|
| 389 |
- vmx->spec_ctrl = data; |
|
| 390 |
-2.7.4 |
|
| 391 |
- |
| 392 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,34 +0,0 @@ |
| 1 |
-From c33e51ed9fc094af5b2469428a609b5977d783c6 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 9 May 2018 21:41:38 +0200 |
|
| 4 |
-Subject: [PATCH 31/54] proc: Use underscores for SSBD in 'status' |
|
| 5 |
- |
|
| 6 |
-commit e96f46ee8587607a828f783daa6eb5b44d25004d upstream |
|
| 7 |
- |
|
| 8 |
-The style for the 'status' file is CamelCase or this. _. |
|
| 9 |
- |
|
| 10 |
-Fixes: fae1fa0fc ("proc: Provide details on speculation flaw mitigations")
|
|
| 11 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 12 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 13 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 14 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 15 |
- fs/proc/array.c | 2 +- |
|
| 16 |
- 1 file changed, 1 insertion(+), 1 deletion(-) |
|
| 17 |
- |
|
| 18 |
-diff --git a/fs/proc/array.c b/fs/proc/array.c |
|
| 19 |
-index 3e37195..94f83e7 100644 |
|
| 20 |
-+++ b/fs/proc/array.c |
|
| 21 |
-@@ -347,7 +347,7 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) |
|
| 22 |
- #ifdef CONFIG_SECCOMP |
|
| 23 |
- seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode); |
|
| 24 |
- #endif |
|
| 25 |
-- seq_printf(m, "\nSpeculation Store Bypass:\t"); |
|
| 26 |
-+ seq_printf(m, "\nSpeculation_Store_Bypass:\t"); |
|
| 27 |
- switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
|
|
| 28 |
- case -EINVAL: |
|
| 29 |
- seq_printf(m, "unknown"); |
|
| 30 |
-2.7.4 |
|
| 31 |
- |
| 32 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,87 +0,0 @@ |
| 1 |
-From 4d01ab5d2407e797f7dbac38e3adfc5f5169073f Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Borislav Petkov <bp@suse.de> |
|
| 3 |
-Date: Tue, 8 May 2018 15:43:45 +0200 |
|
| 4 |
-Subject: [PATCH 32/54] Documentation/spec_ctrl: Do some minor cleanups |
|
| 5 |
- |
|
| 6 |
-commit dd0792699c4058e63c0715d9a7c2d40226fcdddc upstream |
|
| 7 |
- |
|
| 8 |
-Fix some typos, improve formulations, end sentences with a fullstop. |
|
| 9 |
- |
|
| 10 |
-Signed-off-by: Borislav Petkov <bp@suse.de> |
|
| 11 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 12 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 13 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 14 |
- Documentation/spec_ctrl.txt | 24 ++++++++++++------------ |
|
| 15 |
- 1 file changed, 12 insertions(+), 12 deletions(-) |
|
| 16 |
- |
|
| 17 |
-diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt |
|
| 18 |
-index 1b3690d..32f3d55 100644 |
|
| 19 |
-+++ b/Documentation/spec_ctrl.txt |
|
| 20 |
-@@ -2,13 +2,13 @@ |
|
| 21 |
- Speculation Control |
|
| 22 |
- =================== |
|
| 23 |
- |
|
| 24 |
--Quite some CPUs have speculation related misfeatures which are in fact |
|
| 25 |
--vulnerabilites causing data leaks in various forms even accross privilege |
|
| 26 |
--domains. |
|
| 27 |
-+Quite some CPUs have speculation-related misfeatures which are in |
|
| 28 |
-+fact vulnerabilities causing data leaks in various forms even across |
|
| 29 |
-+privilege domains. |
|
| 30 |
- |
|
| 31 |
- The kernel provides mitigation for such vulnerabilities in various |
|
| 32 |
--forms. Some of these mitigations are compile time configurable and some on |
|
| 33 |
--the kernel command line. |
|
| 34 |
-+forms. Some of these mitigations are compile-time configurable and some |
|
| 35 |
-+can be supplied on the kernel command line. |
|
| 36 |
- |
|
| 37 |
- There is also a class of mitigations which are very expensive, but they can |
|
| 38 |
- be restricted to a certain set of processes or tasks in controlled |
|
| 39 |
-@@ -32,18 +32,18 @@ the following meaning: |
|
| 40 |
- Bit Define Description |
|
| 41 |
- ==== ===================== =================================================== |
|
| 42 |
- 0 PR_SPEC_PRCTL Mitigation can be controlled per task by |
|
| 43 |
-- PR_SET_SPECULATION_CTRL |
|
| 44 |
-+ PR_SET_SPECULATION_CTRL. |
|
| 45 |
- 1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is |
|
| 46 |
-- disabled |
|
| 47 |
-+ disabled. |
|
| 48 |
- 2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is |
|
| 49 |
-- enabled |
|
| 50 |
-+ enabled. |
|
| 51 |
- 3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A |
|
| 52 |
- subsequent prctl(..., PR_SPEC_ENABLE) will fail. |
|
| 53 |
- ==== ===================== =================================================== |
|
| 54 |
- |
|
| 55 |
- If all bits are 0 the CPU is not affected by the speculation misfeature. |
|
| 56 |
- |
|
| 57 |
--If PR_SPEC_PRCTL is set, then the per task control of the mitigation is |
|
| 58 |
-+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is |
|
| 59 |
- available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation |
|
| 60 |
- misfeature will fail. |
|
| 61 |
- |
|
| 62 |
-@@ -61,9 +61,9 @@ Common error codes |
|
| 63 |
- Value Meaning |
|
| 64 |
- ======= ================================================================= |
|
| 65 |
- EINVAL The prctl is not implemented by the architecture or unused |
|
| 66 |
-- prctl(2) arguments are not 0 |
|
| 67 |
-+ prctl(2) arguments are not 0. |
|
| 68 |
- |
|
| 69 |
--ENODEV arg2 is selecting a not supported speculation misfeature |
|
| 70 |
-+ENODEV arg2 is selecting a not supported speculation misfeature. |
|
| 71 |
- ======= ================================================================= |
|
| 72 |
- |
|
| 73 |
- PR_SET_SPECULATION_CTRL error codes |
|
| 74 |
-@@ -74,7 +74,7 @@ Value Meaning |
|
| 75 |
- 0 Success |
|
| 76 |
- |
|
| 77 |
- ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor |
|
| 78 |
-- PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE |
|
| 79 |
-+ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE. |
|
| 80 |
- |
|
| 81 |
- ENXIO Control of the selected speculation misfeature is not possible. |
|
| 82 |
- See PR_GET_SPECULATION_CTRL. |
|
| 83 |
-2.7.4 |
|
| 84 |
- |
| 85 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,35 +0,0 @@ |
| 1 |
-From 721940f2f2f86c7d0303681a6729d67c52276436 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Jiri Kosina <jkosina@suse.cz> |
|
| 3 |
-Date: Thu, 10 May 2018 22:47:18 +0200 |
|
| 4 |
-Subject: [PATCH 33/54] x86/bugs: Fix __ssb_select_mitigation() return type |
|
| 5 |
- |
|
| 6 |
-commit d66d8ff3d21667b41eddbe86b35ab411e40d8c5f upstream |
|
| 7 |
- |
|
| 8 |
-__ssb_select_mitigation() returns one of the members of enum ssb_mitigation, |
|
| 9 |
-not ssb_mitigation_cmd; fix the prototype to reflect that. |
|
| 10 |
- |
|
| 11 |
-Fixes: 24f7fc83b9204 ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
|
|
| 12 |
-Signed-off-by: Jiri Kosina <jkosina@suse.cz> |
|
| 13 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 14 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 15 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 16 |
- arch/x86/kernel/cpu/bugs.c | 2 +- |
|
| 17 |
- 1 file changed, 1 insertion(+), 1 deletion(-) |
|
| 18 |
- |
|
| 19 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 20 |
-index ae6f9ba..c7b4d11 100644 |
|
| 21 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 22 |
-@@ -467,7 +467,7 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) |
|
| 23 |
- return cmd; |
|
| 24 |
- } |
|
| 25 |
- |
|
| 26 |
--static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) |
|
| 27 |
-+static enum ssb_mitigation __init __ssb_select_mitigation(void) |
|
| 28 |
- {
|
|
| 29 |
- enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; |
|
| 30 |
- enum ssb_mitigation_cmd cmd; |
|
| 31 |
-2.7.4 |
|
| 32 |
- |
| 33 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,34 +0,0 @@ |
| 1 |
-From 91c8d38470acf7b6b4a26087add879f512305237 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Jiri Kosina <jkosina@suse.cz> |
|
| 3 |
-Date: Thu, 10 May 2018 22:47:32 +0200 |
|
| 4 |
-Subject: [PATCH 34/54] x86/bugs: Make cpu_show_common() static |
|
| 5 |
- |
|
| 6 |
-commit 7bb4d366cba992904bffa4820d24e70a3de93e76 upstream |
|
| 7 |
- |
|
| 8 |
-cpu_show_common() is not used outside of arch/x86/kernel/cpu/bugs.c, so |
|
| 9 |
-make it static. |
|
| 10 |
- |
|
| 11 |
-Signed-off-by: Jiri Kosina <jkosina@suse.cz> |
|
| 12 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 13 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 14 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 15 |
- arch/x86/kernel/cpu/bugs.c | 2 +- |
|
| 16 |
- 1 file changed, 1 insertion(+), 1 deletion(-) |
|
| 17 |
- |
|
| 18 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 19 |
-index c7b4d11..8187642 100644 |
|
| 20 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 21 |
-@@ -639,7 +639,7 @@ void x86_spec_ctrl_setup_ap(void) |
|
| 22 |
- |
|
| 23 |
- #ifdef CONFIG_SYSFS |
|
| 24 |
- |
|
| 25 |
--ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
|
| 26 |
-+static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
|
| 27 |
- char *buf, unsigned int bug) |
|
| 28 |
- {
|
|
| 29 |
- if (!boot_cpu_has_bug(bug)) |
|
| 30 |
-2.7.4 |
|
| 31 |
- |
| 32 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,42 +0,0 @@ |
| 1 |
-From a972acb35aa7282fc471ec9ce4274547f5791460 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Fri, 11 May 2018 16:50:35 -0400 |
|
| 4 |
-Subject: [PATCH 35/54] x86/bugs: Fix the parameters alignment and missing void |
|
| 5 |
- |
|
| 6 |
-commit ffed645e3be0e32f8e9ab068d257aee8d0fe8eec upstream |
|
| 7 |
- |
|
| 8 |
-Fixes: 7bb4d366c ("x86/bugs: Make cpu_show_common() static")
|
|
| 9 |
-Fixes: 24f7fc83b ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
|
|
| 10 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 11 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 12 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 13 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 14 |
- arch/x86/kernel/cpu/bugs.c | 4 ++-- |
|
| 15 |
- 1 file changed, 2 insertions(+), 2 deletions(-) |
|
| 16 |
- |
|
| 17 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 18 |
-index 8187642..4f8c88e 100644 |
|
| 19 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 20 |
-@@ -530,7 +530,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) |
|
| 21 |
- return mode; |
|
| 22 |
- } |
|
| 23 |
- |
|
| 24 |
--static void ssb_select_mitigation() |
|
| 25 |
-+static void ssb_select_mitigation(void) |
|
| 26 |
- {
|
|
| 27 |
- ssb_mode = __ssb_select_mitigation(); |
|
| 28 |
- |
|
| 29 |
-@@ -640,7 +640,7 @@ void x86_spec_ctrl_setup_ap(void) |
|
| 30 |
- #ifdef CONFIG_SYSFS |
|
| 31 |
- |
|
| 32 |
- static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
|
| 33 |
-- char *buf, unsigned int bug) |
|
| 34 |
-+ char *buf, unsigned int bug) |
|
| 35 |
- {
|
|
| 36 |
- if (!boot_cpu_has_bug(bug)) |
|
| 37 |
- return sprintf(buf, "Not affected\n"); |
|
| 38 |
-2.7.4 |
|
| 39 |
- |
| 40 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,42 +0,0 @@ |
| 1 |
-From de18dfe897c77e37a955b82998ea832d2d1273ab Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Jim Mattson <jmattson@google.com> |
|
| 3 |
-Date: Sun, 13 May 2018 17:33:57 -0400 |
|
| 4 |
-Subject: [PATCH 36/54] x86/cpu: Make alternative_msr_write work for 32-bit |
|
| 5 |
- code |
|
| 6 |
- |
|
| 7 |
-commit 5f2b745f5e1304f438f9b2cd03ebc8120b6e0d3b upstream |
|
| 8 |
- |
|
| 9 |
-Cast val and (val >> 32) to (u32), so that they fit in a |
|
| 10 |
-general-purpose register in both 32-bit and 64-bit code. |
|
| 11 |
- |
|
| 12 |
-[ tglx: Made it u32 instead of uintptr_t ] |
|
| 13 |
- |
|
| 14 |
-Fixes: c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
|
|
| 15 |
-Signed-off-by: Jim Mattson <jmattson@google.com> |
|
| 16 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 17 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 18 |
-Acked-by: Linus Torvalds <torvalds@linux-foundation.org> |
|
| 19 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 20 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 21 |
- arch/x86/include/asm/nospec-branch.h | 4 ++-- |
|
| 22 |
- 1 file changed, 2 insertions(+), 2 deletions(-) |
|
| 23 |
- |
|
| 24 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 25 |
-index 328ea3c..bc258e6 100644 |
|
| 26 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 27 |
-@@ -265,8 +265,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) |
|
| 28 |
- {
|
|
| 29 |
- asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
|
| 30 |
- : : "c" (msr), |
|
| 31 |
-- "a" (val), |
|
| 32 |
-- "d" (val >> 32), |
|
| 33 |
-+ "a" ((u32)val), |
|
| 34 |
-+ "d" ((u32)(val >> 32)), |
|
| 35 |
- [feature] "i" (feature) |
|
| 36 |
- : "memory"); |
|
| 37 |
- } |
|
| 38 |
-2.7.4 |
|
| 39 |
- |
| 40 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,70 +0,0 @@ |
| 1 |
-From f30da25e77ba42e685d370e695759910625a885c Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Fri, 11 May 2018 15:21:01 +0200 |
|
| 4 |
-Subject: [PATCH 37/54] KVM: SVM: Move spec control call after restore of GS |
|
| 5 |
- |
|
| 6 |
-commit 15e6c22fd8e5a42c5ed6d487b7c9fe44c2517765 upstream |
|
| 7 |
- |
|
| 8 |
-svm_vcpu_run() invokes x86_spec_ctrl_restore_host() after VMEXIT, but |
|
| 9 |
-before the host GS is restored. x86_spec_ctrl_restore_host() uses 'current' |
|
| 10 |
-to determine the host SSBD state of the thread. 'current' is GS based, but |
|
| 11 |
-host GS is not yet restored and the access causes a triple fault. |
|
| 12 |
- |
|
| 13 |
-Move the call after the host GS restore. |
|
| 14 |
- |
|
| 15 |
-Fixes: 885f82bfbc6f x86/process: Allow runtime control of Speculative Store Bypass |
|
| 16 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 17 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 18 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 19 |
-Acked-by: Paolo Bonzini <pbonzini@redhat.com> |
|
| 20 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 21 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 22 |
- arch/x86/kvm/svm.c | 24 ++++++++++++------------ |
|
| 23 |
- 1 file changed, 12 insertions(+), 12 deletions(-) |
|
| 24 |
- |
|
| 25 |
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
|
| 26 |
-index 516ddff..d1a4321 100644 |
|
| 27 |
-+++ b/arch/x86/kvm/svm.c |
|
| 28 |
-@@ -5011,6 +5011,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 29 |
- #endif |
|
| 30 |
- ); |
|
| 31 |
- |
|
| 32 |
-+ /* Eliminate branch target predictions from guest mode */ |
|
| 33 |
-+ vmexit_fill_RSB(); |
|
| 34 |
-+ |
|
| 35 |
-+#ifdef CONFIG_X86_64 |
|
| 36 |
-+ wrmsrl(MSR_GS_BASE, svm->host.gs_base); |
|
| 37 |
-+#else |
|
| 38 |
-+ loadsegment(fs, svm->host.fs); |
|
| 39 |
-+#ifndef CONFIG_X86_32_LAZY_GS |
|
| 40 |
-+ loadsegment(gs, svm->host.gs); |
|
| 41 |
-+#endif |
|
| 42 |
-+#endif |
|
| 43 |
-+ |
|
| 44 |
- /* |
|
| 45 |
- * We do not use IBRS in the kernel. If this vCPU has used the |
|
| 46 |
- * SPEC_CTRL MSR it may have left it on; save the value and |
|
| 47 |
-@@ -5031,18 +5043,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 48 |
- |
|
| 49 |
- x86_spec_ctrl_restore_host(svm->spec_ctrl); |
|
| 50 |
- |
|
| 51 |
-- /* Eliminate branch target predictions from guest mode */ |
|
| 52 |
-- vmexit_fill_RSB(); |
|
| 53 |
-- |
|
| 54 |
--#ifdef CONFIG_X86_64 |
|
| 55 |
-- wrmsrl(MSR_GS_BASE, svm->host.gs_base); |
|
| 56 |
--#else |
|
| 57 |
-- loadsegment(fs, svm->host.fs); |
|
| 58 |
--#ifndef CONFIG_X86_32_LAZY_GS |
|
| 59 |
-- loadsegment(gs, svm->host.gs); |
|
| 60 |
--#endif |
|
| 61 |
--#endif |
|
| 62 |
-- |
|
| 63 |
- reload_tss(vcpu); |
|
| 64 |
- |
|
| 65 |
- local_irq_disable(); |
|
| 66 |
-2.7.4 |
|
| 67 |
- |
| 68 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,156 +0,0 @@ |
| 1 |
-From d359a8adfc5e7974969b45eaeb8569621b1dbe12 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Borislav Petkov <bp@suse.de> |
|
| 3 |
-Date: Wed, 2 May 2018 18:15:14 +0200 |
|
| 4 |
-Subject: [PATCH 38/54] x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP |
|
| 5 |
-MIME-Version: 1.0 |
|
| 6 |
-Content-Type: text/plain; charset=UTF-8 |
|
| 7 |
-Content-Transfer-Encoding: 8bit |
|
| 8 |
- |
|
| 9 |
-commit e7c587da125291db39ddf1f49b18e5970adbac17 upstream |
|
| 10 |
- |
|
| 11 |
-Intel and AMD have different CPUID bits hence for those use synthetic bits |
|
| 12 |
-which get set on the respective vendor's in init_speculation_control(). So |
|
| 13 |
-that debacles like what the commit message of |
|
| 14 |
- |
|
| 15 |
- c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
|
|
| 16 |
- |
|
| 17 |
-talks about don't happen anymore. |
|
| 18 |
- |
|
| 19 |
-Signed-off-by: Borislav Petkov <bp@suse.de> |
|
| 20 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 21 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 22 |
-Tested-by: Jörg Otte <jrg.otte@gmail.com> |
|
| 23 |
-Cc: Linus Torvalds <torvalds@linux-foundation.org> |
|
| 24 |
-Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> |
|
| 25 |
-Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic |
|
| 26 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 27 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 28 |
- arch/x86/include/asm/cpufeatures.h | 12 ++++++++---- |
|
| 29 |
- arch/x86/kernel/cpu/common.c | 14 ++++++++++---- |
|
| 30 |
- arch/x86/kvm/cpuid.c | 10 +++++----- |
|
| 31 |
- arch/x86/kvm/cpuid.h | 4 ++-- |
|
| 32 |
- 4 files changed, 25 insertions(+), 15 deletions(-) |
|
| 33 |
- |
|
| 34 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 35 |
-index 0ed8ea5..059437a 100644 |
|
| 36 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 37 |
-@@ -205,7 +205,10 @@ |
|
| 38 |
- #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
|
| 39 |
- #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ |
|
| 40 |
- #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ |
|
| 41 |
--#define X86_FEATURE_AMD_SSBD (7*32+24) /* "" AMD SSBD implementation */ |
|
| 42 |
-+#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */ |
|
| 43 |
-+#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ |
|
| 44 |
-+#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ |
|
| 45 |
-+#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ |
|
| 46 |
- |
|
| 47 |
- /* Virtualization flags: Linux defined, word 8 */ |
|
| 48 |
- #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
|
| 49 |
-@@ -263,9 +266,9 @@ |
|
| 50 |
- /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ |
|
| 51 |
- #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ |
|
| 52 |
- #define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ |
|
| 53 |
--#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ |
|
| 54 |
--#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ |
|
| 55 |
--#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ |
|
| 56 |
-+#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ |
|
| 57 |
-+#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ |
|
| 58 |
-+#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ |
|
| 59 |
- |
|
| 60 |
- /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ |
|
| 61 |
- #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ |
|
| 62 |
-@@ -301,6 +304,7 @@ |
|
| 63 |
- #define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ |
|
| 64 |
- #define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ |
|
| 65 |
- |
|
| 66 |
-+ |
|
| 67 |
- /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ |
|
| 68 |
- #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ |
|
| 69 |
- #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ |
|
| 70 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 71 |
-index d0dd736..67bfa3c 100644 |
|
| 72 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 73 |
-@@ -725,17 +725,23 @@ static void init_speculation_control(struct cpuinfo_x86 *c) |
|
| 74 |
- * and they also have a different bit for STIBP support. Also, |
|
| 75 |
- * a hypervisor might have set the individual AMD bits even on |
|
| 76 |
- * Intel CPUs, for finer-grained selection of what's available. |
|
| 77 |
-- * |
|
| 78 |
-- * We use the AMD bits in 0x8000_0008 EBX as the generic hardware |
|
| 79 |
-- * features, which are visible in /proc/cpuinfo and used by the |
|
| 80 |
-- * kernel. So set those accordingly from the Intel bits. |
|
| 81 |
- */ |
|
| 82 |
- if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
|
| 83 |
- set_cpu_cap(c, X86_FEATURE_IBRS); |
|
| 84 |
- set_cpu_cap(c, X86_FEATURE_IBPB); |
|
| 85 |
- } |
|
| 86 |
-+ |
|
| 87 |
- if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) |
|
| 88 |
- set_cpu_cap(c, X86_FEATURE_STIBP); |
|
| 89 |
-+ |
|
| 90 |
-+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) |
|
| 91 |
-+ set_cpu_cap(c, X86_FEATURE_IBRS); |
|
| 92 |
-+ |
|
| 93 |
-+ if (cpu_has(c, X86_FEATURE_AMD_IBPB)) |
|
| 94 |
-+ set_cpu_cap(c, X86_FEATURE_IBPB); |
|
| 95 |
-+ |
|
| 96 |
-+ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) |
|
| 97 |
-+ set_cpu_cap(c, X86_FEATURE_STIBP); |
|
| 98 |
- } |
|
| 99 |
- |
|
| 100 |
- void get_cpu_cap(struct cpuinfo_x86 *c) |
|
| 101 |
-diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c |
|
| 102 |
-index 18e6db5..910c2db 100644 |
|
| 103 |
-+++ b/arch/x86/kvm/cpuid.c |
|
| 104 |
-@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
|
| 105 |
- |
|
| 106 |
- /* cpuid 0x80000008.ebx */ |
|
| 107 |
- const u32 kvm_cpuid_8000_0008_ebx_x86_features = |
|
| 108 |
-- F(IBPB) | F(IBRS); |
|
| 109 |
-+ F(AMD_IBPB) | F(AMD_IBRS); |
|
| 110 |
- |
|
| 111 |
- /* cpuid 0xC0000001.edx */ |
|
| 112 |
- const u32 kvm_cpuid_C000_0001_edx_x86_features = |
|
| 113 |
-@@ -619,10 +619,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
|
| 114 |
- entry->eax = g_phys_as | (virt_as << 8); |
|
| 115 |
- entry->edx = 0; |
|
| 116 |
- /* IBRS and IBPB aren't necessarily present in hardware cpuid */ |
|
| 117 |
-- if (boot_cpu_has(X86_FEATURE_IBPB)) |
|
| 118 |
-- entry->ebx |= F(IBPB); |
|
| 119 |
-- if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 120 |
-- entry->ebx |= F(IBRS); |
|
| 121 |
-+ if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) |
|
| 122 |
-+ entry->ebx |= F(AMD_IBPB); |
|
| 123 |
-+ if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) |
|
| 124 |
-+ entry->ebx |= F(AMD_IBRS); |
|
| 125 |
- entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; |
|
| 126 |
- cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); |
|
| 127 |
- break; |
|
| 128 |
-diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h |
|
| 129 |
-index aedeb6c..eb47c37 100644 |
|
| 130 |
-+++ b/arch/x86/kvm/cpuid.h |
|
| 131 |
-@@ -165,7 +165,7 @@ static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu) |
|
| 132 |
- struct kvm_cpuid_entry2 *best; |
|
| 133 |
- |
|
| 134 |
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); |
|
| 135 |
-- if (best && (best->ebx & bit(X86_FEATURE_IBPB))) |
|
| 136 |
-+ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB))) |
|
| 137 |
- return true; |
|
| 138 |
- best = kvm_find_cpuid_entry(vcpu, 7, 0); |
|
| 139 |
- return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL)); |
|
| 140 |
-@@ -176,7 +176,7 @@ static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu) |
|
| 141 |
- struct kvm_cpuid_entry2 *best; |
|
| 142 |
- |
|
| 143 |
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); |
|
| 144 |
-- if (best && (best->ebx & bit(X86_FEATURE_IBRS))) |
|
| 145 |
-+ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS))) |
|
| 146 |
- return true; |
|
| 147 |
- best = kvm_find_cpuid_entry(vcpu, 7, 0); |
|
| 148 |
- return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD))); |
|
| 149 |
-2.7.4 |
|
| 150 |
- |
| 151 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,155 +0,0 @@ |
| 1 |
-From 0197f009f7ddca5784d0b4a00529dad84c09cbc6 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Thu, 10 May 2018 19:13:18 +0200 |
|
| 4 |
-Subject: [PATCH 39/54] x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration |
|
| 5 |
- from IBRS |
|
| 6 |
- |
|
| 7 |
-commit 7eb8956a7fec3c1f0abc2a5517dada99ccc8a961 upstream |
|
| 8 |
- |
|
| 9 |
-The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on |
|
| 10 |
-Intel and implied by IBRS or STIBP support on AMD. That's just confusing |
|
| 11 |
-and in case an AMD CPU has IBRS not supported because the underlying |
|
| 12 |
-problem has been fixed but has another bit valid in the SPEC_CTRL MSR, |
|
| 13 |
-the thing falls apart. |
|
| 14 |
- |
|
| 15 |
-Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the |
|
| 16 |
-availability on both Intel and AMD. |
|
| 17 |
- |
|
| 18 |
-While at it replace the boot_cpu_has() checks with static_cpu_has() where |
|
| 19 |
-possible. This prevents late microcode loading from exposing SPEC_CTRL, but |
|
| 20 |
-late loading is already very limited as it does not reevaluate the |
|
| 21 |
-mitigation options and other bits and pieces. Having static_cpu_has() is |
|
| 22 |
-the simplest and least fragile solution. |
|
| 23 |
- |
|
| 24 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 25 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 26 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 27 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 28 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 29 |
- arch/x86/include/asm/cpufeatures.h | 2 ++ |
|
| 30 |
- arch/x86/kernel/cpu/bugs.c | 18 +++++++++++------- |
|
| 31 |
- arch/x86/kernel/cpu/common.c | 9 +++++++-- |
|
| 32 |
- arch/x86/kernel/cpu/intel.c | 1 + |
|
| 33 |
- 4 files changed, 21 insertions(+), 9 deletions(-) |
|
| 34 |
- |
|
| 35 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 36 |
-index 059437a..ca0f33f 100644 |
|
| 37 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 38 |
-@@ -197,6 +197,8 @@ |
|
| 39 |
- #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ |
|
| 40 |
- #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ |
|
| 41 |
- |
|
| 42 |
-+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ |
|
| 43 |
-+ |
|
| 44 |
- #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ |
|
| 45 |
- |
|
| 46 |
- /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */ |
|
| 47 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 48 |
-index 4f8c88e..59649310 100644 |
|
| 49 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 50 |
-@@ -63,7 +63,7 @@ void __init check_bugs(void) |
|
| 51 |
- * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD |
|
| 52 |
- * init code as it is not enumerated and depends on the family. |
|
| 53 |
- */ |
|
| 54 |
-- if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 55 |
-+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 56 |
- rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 57 |
- |
|
| 58 |
- /* Select the proper spectre mitigation before patching alternatives */ |
|
| 59 |
-@@ -144,7 +144,7 @@ u64 x86_spec_ctrl_get_default(void) |
|
| 60 |
- {
|
|
| 61 |
- u64 msrval = x86_spec_ctrl_base; |
|
| 62 |
- |
|
| 63 |
-- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 64 |
-+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) |
|
| 65 |
- msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 66 |
- return msrval; |
|
| 67 |
- } |
|
| 68 |
-@@ -154,10 +154,12 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) |
|
| 69 |
- {
|
|
| 70 |
- u64 host = x86_spec_ctrl_base; |
|
| 71 |
- |
|
| 72 |
-- if (!boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 73 |
-+ /* Is MSR_SPEC_CTRL implemented ? */ |
|
| 74 |
-+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 75 |
- return; |
|
| 76 |
- |
|
| 77 |
-- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 78 |
-+ /* Intel controls SSB in MSR_SPEC_CTRL */ |
|
| 79 |
-+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) |
|
| 80 |
- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 81 |
- |
|
| 82 |
- if (host != guest_spec_ctrl) |
|
| 83 |
-@@ -169,10 +171,12 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) |
|
| 84 |
- {
|
|
| 85 |
- u64 host = x86_spec_ctrl_base; |
|
| 86 |
- |
|
| 87 |
-- if (!boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 88 |
-+ /* Is MSR_SPEC_CTRL implemented ? */ |
|
| 89 |
-+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 90 |
- return; |
|
| 91 |
- |
|
| 92 |
-- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
|
| 93 |
-+ /* Intel controls SSB in MSR_SPEC_CTRL */ |
|
| 94 |
-+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) |
|
| 95 |
- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 96 |
- |
|
| 97 |
- if (host != guest_spec_ctrl) |
|
| 98 |
-@@ -630,7 +634,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
|
| 99 |
- |
|
| 100 |
- void x86_spec_ctrl_setup_ap(void) |
|
| 101 |
- {
|
|
| 102 |
-- if (boot_cpu_has(X86_FEATURE_IBRS)) |
|
| 103 |
-+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 104 |
- x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); |
|
| 105 |
- |
|
| 106 |
- if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) |
|
| 107 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 108 |
-index 67bfa3c..04362282 100644 |
|
| 109 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 110 |
-@@ -729,19 +729,24 @@ static void init_speculation_control(struct cpuinfo_x86 *c) |
|
| 111 |
- if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
|
| 112 |
- set_cpu_cap(c, X86_FEATURE_IBRS); |
|
| 113 |
- set_cpu_cap(c, X86_FEATURE_IBPB); |
|
| 114 |
-+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
|
| 115 |
- } |
|
| 116 |
- |
|
| 117 |
- if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) |
|
| 118 |
- set_cpu_cap(c, X86_FEATURE_STIBP); |
|
| 119 |
- |
|
| 120 |
-- if (cpu_has(c, X86_FEATURE_AMD_IBRS)) |
|
| 121 |
-+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
|
| 122 |
- set_cpu_cap(c, X86_FEATURE_IBRS); |
|
| 123 |
-+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
|
| 124 |
-+ } |
|
| 125 |
- |
|
| 126 |
- if (cpu_has(c, X86_FEATURE_AMD_IBPB)) |
|
| 127 |
- set_cpu_cap(c, X86_FEATURE_IBPB); |
|
| 128 |
- |
|
| 129 |
-- if (cpu_has(c, X86_FEATURE_AMD_STIBP)) |
|
| 130 |
-+ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
|
| 131 |
- set_cpu_cap(c, X86_FEATURE_STIBP); |
|
| 132 |
-+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
|
| 133 |
-+ } |
|
| 134 |
- } |
|
| 135 |
- |
|
| 136 |
- void get_cpu_cap(struct cpuinfo_x86 *c) |
|
| 137 |
-diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c |
|
| 138 |
-index 047adaa..7f495e8 100644 |
|
| 139 |
-+++ b/arch/x86/kernel/cpu/intel.c |
|
| 140 |
-@@ -153,6 +153,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
|
| 141 |
- setup_clear_cpu_cap(X86_FEATURE_IBPB); |
|
| 142 |
- setup_clear_cpu_cap(X86_FEATURE_STIBP); |
|
| 143 |
- setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); |
|
| 144 |
-+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); |
|
| 145 |
- setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); |
|
| 146 |
- setup_clear_cpu_cap(X86_FEATURE_SSBD); |
|
| 147 |
- } |
|
| 148 |
-2.7.4 |
|
| 149 |
- |
| 150 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,163 +0,0 @@ |
| 1 |
-From c0374a5d3dcefc7e21ae1ebe2343c88f8e0ff121 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Thu, 10 May 2018 20:21:36 +0200 |
|
| 4 |
-Subject: [PATCH 40/54] x86/cpufeatures: Disentangle SSBD enumeration |
|
| 5 |
- |
|
| 6 |
-commit 52817587e706686fcdb27f14c1b000c92f266c96 upstream |
|
| 7 |
- |
|
| 8 |
-The SSBD enumeration is similarly to the other bits magically shared |
|
| 9 |
-between Intel and AMD though the mechanisms are different. |
|
| 10 |
- |
|
| 11 |
-Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific |
|
| 12 |
-features or family dependent setup. |
|
| 13 |
- |
|
| 14 |
-Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is |
|
| 15 |
-controlled via MSR_SPEC_CTRL and fix up the usage sites. |
|
| 16 |
- |
|
| 17 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 18 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 19 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 20 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 21 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 22 |
- arch/x86/include/asm/cpufeatures.h | 5 +++-- |
|
| 23 |
- arch/x86/kernel/cpu/amd.c | 7 +------ |
|
| 24 |
- arch/x86/kernel/cpu/bugs.c | 10 +++++----- |
|
| 25 |
- arch/x86/kernel/cpu/common.c | 3 +++ |
|
| 26 |
- arch/x86/kernel/cpu/intel.c | 1 + |
|
| 27 |
- arch/x86/kernel/process.c | 2 +- |
|
| 28 |
- 6 files changed, 14 insertions(+), 14 deletions(-) |
|
| 29 |
- |
|
| 30 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 31 |
-index ca0f33f..d071767 100644 |
|
| 32 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 33 |
-@@ -198,6 +198,7 @@ |
|
| 34 |
- #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ |
|
| 35 |
- |
|
| 36 |
- #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ |
|
| 37 |
-+#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ |
|
| 38 |
- |
|
| 39 |
- #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ |
|
| 40 |
- |
|
| 41 |
-@@ -207,7 +208,7 @@ |
|
| 42 |
- #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
|
| 43 |
- #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ |
|
| 44 |
- #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ |
|
| 45 |
--#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */ |
|
| 46 |
-+#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */ |
|
| 47 |
- #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ |
|
| 48 |
- #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ |
|
| 49 |
- #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ |
|
| 50 |
-@@ -314,7 +315,7 @@ |
|
| 51 |
- #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
|
| 52 |
- #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
|
| 53 |
- #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
|
| 54 |
--#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */ |
|
| 55 |
-+#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ |
|
| 56 |
- |
|
| 57 |
- /* |
|
| 58 |
- * BUG word(s) |
|
| 59 |
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
|
| 60 |
-index acb2fcc..179d572 100644 |
|
| 61 |
-+++ b/arch/x86/kernel/cpu/amd.c |
|
| 62 |
-@@ -558,8 +558,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) |
|
| 63 |
- * avoid RMW. If that faults, do not enable SSBD. |
|
| 64 |
- */ |
|
| 65 |
- if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
|
| 66 |
-+ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); |
|
| 67 |
- setup_force_cpu_cap(X86_FEATURE_SSBD); |
|
| 68 |
-- setup_force_cpu_cap(X86_FEATURE_AMD_SSBD); |
|
| 69 |
- x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; |
|
| 70 |
- } |
|
| 71 |
- } |
|
| 72 |
-@@ -848,11 +848,6 @@ static void init_amd(struct cpuinfo_x86 *c) |
|
| 73 |
- /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ |
|
| 74 |
- if (!cpu_has(c, X86_FEATURE_XENPV)) |
|
| 75 |
- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); |
|
| 76 |
-- |
|
| 77 |
-- if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
| 78 |
-- set_cpu_cap(c, X86_FEATURE_SSBD); |
|
| 79 |
-- set_cpu_cap(c, X86_FEATURE_AMD_SSBD); |
|
| 80 |
-- } |
|
| 81 |
- } |
|
| 82 |
- |
|
| 83 |
- #ifdef CONFIG_X86_32 |
|
| 84 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 85 |
-index 59649310..15a6c58 100644 |
|
| 86 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 87 |
-@@ -158,8 +158,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) |
|
| 88 |
- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 89 |
- return; |
|
| 90 |
- |
|
| 91 |
-- /* Intel controls SSB in MSR_SPEC_CTRL */ |
|
| 92 |
-- if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) |
|
| 93 |
-+ /* SSBD controlled in MSR_SPEC_CTRL */ |
|
| 94 |
-+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 95 |
- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 96 |
- |
|
| 97 |
- if (host != guest_spec_ctrl) |
|
| 98 |
-@@ -175,8 +175,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) |
|
| 99 |
- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 100 |
- return; |
|
| 101 |
- |
|
| 102 |
-- /* Intel controls SSB in MSR_SPEC_CTRL */ |
|
| 103 |
-- if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) |
|
| 104 |
-+ /* SSBD controlled in MSR_SPEC_CTRL */ |
|
| 105 |
-+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 106 |
- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 107 |
- |
|
| 108 |
- if (host != guest_spec_ctrl) |
|
| 109 |
-@@ -188,7 +188,7 @@ static void x86_amd_ssb_disable(void) |
|
| 110 |
- {
|
|
| 111 |
- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; |
|
| 112 |
- |
|
| 113 |
-- if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
|
| 114 |
-+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
|
| 115 |
- wrmsrl(MSR_AMD64_LS_CFG, msrval); |
|
| 116 |
- } |
|
| 117 |
- |
|
| 118 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 119 |
-index 04362282..945e841 100644 |
|
| 120 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 121 |
-@@ -735,6 +735,9 @@ static void init_speculation_control(struct cpuinfo_x86 *c) |
|
| 122 |
- if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) |
|
| 123 |
- set_cpu_cap(c, X86_FEATURE_STIBP); |
|
| 124 |
- |
|
| 125 |
-+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 126 |
-+ set_cpu_cap(c, X86_FEATURE_SSBD); |
|
| 127 |
-+ |
|
| 128 |
- if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
|
| 129 |
- set_cpu_cap(c, X86_FEATURE_IBRS); |
|
| 130 |
- set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
|
| 131 |
-diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c |
|
| 132 |
-index 7f495e8..93781e3 100644 |
|
| 133 |
-+++ b/arch/x86/kernel/cpu/intel.c |
|
| 134 |
-@@ -156,6 +156,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
|
| 135 |
- setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); |
|
| 136 |
- setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); |
|
| 137 |
- setup_clear_cpu_cap(X86_FEATURE_SSBD); |
|
| 138 |
-+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); |
|
| 139 |
- } |
|
| 140 |
- |
|
| 141 |
- /* |
|
| 142 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 143 |
-index c344230..b3cd08e 100644 |
|
| 144 |
-+++ b/arch/x86/kernel/process.c |
|
| 145 |
-@@ -207,7 +207,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn |
|
| 146 |
- {
|
|
| 147 |
- u64 msr; |
|
| 148 |
- |
|
| 149 |
-- if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
| 150 |
-+ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
|
| 151 |
- msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); |
|
| 152 |
- wrmsrl(MSR_AMD64_LS_CFG, msr); |
|
| 153 |
- } else {
|
|
| 154 |
-2.7.4 |
|
| 155 |
- |
| 156 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,55 +0,0 @@ |
| 1 |
-From be3e692ab1eb72a96b4b46a63384fbe2e5bac2f8 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Borislav Petkov <bp@suse.de> |
|
| 3 |
-Date: Thu, 7 Sep 2017 19:08:21 +0200 |
|
| 4 |
-Subject: [PATCH 41/54] x86/cpu/AMD: Fix erratum 1076 (CPB bit) |
|
| 5 |
- |
|
| 6 |
-commit f7f3dc00f61261cdc9ccd8b886f21bc4dffd6fd9 upstream |
|
| 7 |
- |
|
| 8 |
-CPUID Fn8000_0007_EDX[CPB] is wrongly 0 on models up to B1. But they do |
|
| 9 |
-support CPB (AMD's Core Performance Boosting cpufreq CPU feature), so fix that. |
|
| 10 |
- |
|
| 11 |
-Signed-off-by: Borislav Petkov <bp@suse.de> |
|
| 12 |
-Cc: Linus Torvalds <torvalds@linux-foundation.org> |
|
| 13 |
-Cc: Peter Zijlstra <peterz@infradead.org> |
|
| 14 |
-Cc: Sherry Hurwitz <sherry.hurwitz@amd.com> |
|
| 15 |
-Cc: Thomas Gleixner <tglx@linutronix.de> |
|
| 16 |
-Link: http://lkml.kernel.org/r/20170907170821.16021-1-bp@alien8.de |
|
| 17 |
-Signed-off-by: Ingo Molnar <mingo@kernel.org> |
|
| 18 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 19 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 20 |
- arch/x86/kernel/cpu/amd.c | 11 +++++++++++ |
|
| 21 |
- 1 file changed, 11 insertions(+) |
|
| 22 |
- |
|
| 23 |
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
|
| 24 |
-index 179d572..21367b5 100644 |
|
| 25 |
-+++ b/arch/x86/kernel/cpu/amd.c |
|
| 26 |
-@@ -749,6 +749,16 @@ static void init_amd_bd(struct cpuinfo_x86 *c) |
|
| 27 |
- } |
|
| 28 |
- } |
|
| 29 |
- |
|
| 30 |
-+static void init_amd_zn(struct cpuinfo_x86 *c) |
|
| 31 |
-+{
|
|
| 32 |
-+ /* |
|
| 33 |
-+ * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects |
|
| 34 |
-+ * all up to and including B1. |
|
| 35 |
-+ */ |
|
| 36 |
-+ if (c->x86_model <= 1 && c->x86_stepping <= 1) |
|
| 37 |
-+ set_cpu_cap(c, X86_FEATURE_CPB); |
|
| 38 |
-+} |
|
| 39 |
-+ |
|
| 40 |
- static void init_amd(struct cpuinfo_x86 *c) |
|
| 41 |
- {
|
|
| 42 |
- u32 dummy; |
|
| 43 |
-@@ -779,6 +789,7 @@ static void init_amd(struct cpuinfo_x86 *c) |
|
| 44 |
- case 0x10: init_amd_gh(c); break; |
|
| 45 |
- case 0x12: init_amd_ln(c); break; |
|
| 46 |
- case 0x15: init_amd_bd(c); break; |
|
| 47 |
-+ case 0x17: init_amd_zn(c); break; |
|
| 48 |
- } |
|
| 49 |
- |
|
| 50 |
- /* Enable workaround for FXSAVE leak */ |
|
| 51 |
-2.7.4 |
|
| 52 |
- |
| 53 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,48 +0,0 @@ |
| 1 |
-From 3532efa1474efa07d13c3b8022bc84e07f94b55b Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Thu, 10 May 2018 16:26:00 +0200 |
|
| 4 |
-Subject: [PATCH 42/54] x86/cpufeatures: Add FEATURE_ZEN |
|
| 5 |
- |
|
| 6 |
-commit d1035d971829dcf80e8686ccde26f94b0a069472 upstream |
|
| 7 |
- |
|
| 8 |
-Add a ZEN feature bit so family-dependent static_cpu_has() optimizations |
|
| 9 |
-can be built for ZEN. |
|
| 10 |
- |
|
| 11 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 12 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 13 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 14 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 15 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 16 |
- arch/x86/include/asm/cpufeatures.h | 2 ++ |
|
| 17 |
- arch/x86/kernel/cpu/amd.c | 1 + |
|
| 18 |
- 2 files changed, 3 insertions(+) |
|
| 19 |
- |
|
| 20 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 21 |
-index d071767..ec87b8c 100644 |
|
| 22 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 23 |
-@@ -212,6 +212,8 @@ |
|
| 24 |
- #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ |
|
| 25 |
- #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ |
|
| 26 |
- #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ |
|
| 27 |
-+#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ |
|
| 28 |
-+ |
|
| 29 |
- |
|
| 30 |
- /* Virtualization flags: Linux defined, word 8 */ |
|
| 31 |
- #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
|
| 32 |
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
|
| 33 |
-index 21367b5..4c2be99 100644 |
|
| 34 |
-+++ b/arch/x86/kernel/cpu/amd.c |
|
| 35 |
-@@ -751,6 +751,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c) |
|
| 36 |
- |
|
| 37 |
- static void init_amd_zn(struct cpuinfo_x86 *c) |
|
| 38 |
- {
|
|
| 39 |
-+ set_cpu_cap(c, X86_FEATURE_ZEN); |
|
| 40 |
- /* |
|
| 41 |
- * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects |
|
| 42 |
- * all up to and including B1. |
|
| 43 |
-2.7.4 |
|
| 44 |
- |
| 45 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,240 +0,0 @@ |
| 1 |
-From dd42e8f6dc439a771e8864502f1404ecec2b031b Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Wed, 9 May 2018 21:53:09 +0200 |
|
| 4 |
-Subject: [PATCH 43/54] x86/speculation: Handle HT correctly on AMD |
|
| 5 |
- |
|
| 6 |
-commit 1f50ddb4f4189243c05926b842dc1a0332195f31 upstream |
|
| 7 |
- |
|
| 8 |
-The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when |
|
| 9 |
-hyperthreading is enabled the SSBD bit toggle needs to take both cores into |
|
| 10 |
-account. Otherwise the following situation can happen: |
|
| 11 |
- |
|
| 12 |
-CPU0 CPU1 |
|
| 13 |
- |
|
| 14 |
-disable SSB |
|
| 15 |
- disable SSB |
|
| 16 |
- enable SSB <- Enables it for the Core, i.e. for CPU0 as well |
|
| 17 |
- |
|
| 18 |
-So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled |
|
| 19 |
-again. |
|
| 20 |
- |
|
| 21 |
-On Intel the SSBD control is per core as well, but the synchronization |
|
| 22 |
-logic is implemented behind the per thread SPEC_CTRL MSR. It works like |
|
| 23 |
-this: |
|
| 24 |
- |
|
| 25 |
- CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL |
|
| 26 |
- |
|
| 27 |
-i.e. if one of the threads enables a mitigation then this affects both and |
|
| 28 |
-the mitigation is only disabled in the core when both threads disabled it. |
|
| 29 |
- |
|
| 30 |
-Add the necessary synchronization logic for AMD family 17H. Unfortunately |
|
| 31 |
-that requires a spinlock to serialize the access to the MSR, but the locks |
|
| 32 |
-are only shared between siblings. |
|
| 33 |
- |
|
| 34 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 35 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 36 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 37 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 38 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 39 |
- arch/x86/include/asm/spec-ctrl.h | 6 ++ |
|
| 40 |
- arch/x86/kernel/process.c | 125 +++++++++++++++++++++++++++++++++++++-- |
|
| 41 |
- arch/x86/kernel/smpboot.c | 5 ++ |
|
| 42 |
- 3 files changed, 130 insertions(+), 6 deletions(-) |
|
| 43 |
- |
|
| 44 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 45 |
-index dc21209..0cb49c4 100644 |
|
| 46 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 47 |
-@@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) |
|
| 48 |
- return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; |
|
| 49 |
- } |
|
| 50 |
- |
|
| 51 |
-+#ifdef CONFIG_SMP |
|
| 52 |
-+extern void speculative_store_bypass_ht_init(void); |
|
| 53 |
-+#else |
|
| 54 |
-+static inline void speculative_store_bypass_ht_init(void) { }
|
|
| 55 |
-+#endif |
|
| 56 |
-+ |
|
| 57 |
- extern void speculative_store_bypass_update(void); |
|
| 58 |
- |
|
| 59 |
- #endif |
|
| 60 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 61 |
-index b3cd08e..1e9d155 100644 |
|
| 62 |
-+++ b/arch/x86/kernel/process.c |
|
| 63 |
-@@ -203,22 +203,135 @@ static inline void switch_to_bitmap(struct tss_struct *tss, |
|
| 64 |
- } |
|
| 65 |
- } |
|
| 66 |
- |
|
| 67 |
--static __always_inline void __speculative_store_bypass_update(unsigned long tifn) |
|
| 68 |
-+#ifdef CONFIG_SMP |
|
| 69 |
-+ |
|
| 70 |
-+struct ssb_state {
|
|
| 71 |
-+ struct ssb_state *shared_state; |
|
| 72 |
-+ raw_spinlock_t lock; |
|
| 73 |
-+ unsigned int disable_state; |
|
| 74 |
-+ unsigned long local_state; |
|
| 75 |
-+}; |
|
| 76 |
-+ |
|
| 77 |
-+#define LSTATE_SSB 0 |
|
| 78 |
-+ |
|
| 79 |
-+static DEFINE_PER_CPU(struct ssb_state, ssb_state); |
|
| 80 |
-+ |
|
| 81 |
-+void speculative_store_bypass_ht_init(void) |
|
| 82 |
- {
|
|
| 83 |
-- u64 msr; |
|
| 84 |
-+ struct ssb_state *st = this_cpu_ptr(&ssb_state); |
|
| 85 |
-+ unsigned int this_cpu = smp_processor_id(); |
|
| 86 |
-+ unsigned int cpu; |
|
| 87 |
-+ |
|
| 88 |
-+ st->local_state = 0; |
|
| 89 |
-+ |
|
| 90 |
-+ /* |
|
| 91 |
-+ * Shared state setup happens once on the first bringup |
|
| 92 |
-+ * of the CPU. It's not destroyed on CPU hotunplug. |
|
| 93 |
-+ */ |
|
| 94 |
-+ if (st->shared_state) |
|
| 95 |
-+ return; |
|
| 96 |
-+ |
|
| 97 |
-+ raw_spin_lock_init(&st->lock); |
|
| 98 |
- |
|
| 99 |
-- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
|
| 100 |
-- msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); |
|
| 101 |
-+ /* |
|
| 102 |
-+ * Go over HT siblings and check whether one of them has set up the |
|
| 103 |
-+ * shared state pointer already. |
|
| 104 |
-+ */ |
|
| 105 |
-+ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
|
|
| 106 |
-+ if (cpu == this_cpu) |
|
| 107 |
-+ continue; |
|
| 108 |
-+ |
|
| 109 |
-+ if (!per_cpu(ssb_state, cpu).shared_state) |
|
| 110 |
-+ continue; |
|
| 111 |
-+ |
|
| 112 |
-+ /* Link it to the state of the sibling: */ |
|
| 113 |
-+ st->shared_state = per_cpu(ssb_state, cpu).shared_state; |
|
| 114 |
-+ return; |
|
| 115 |
-+ } |
|
| 116 |
-+ |
|
| 117 |
-+ /* |
|
| 118 |
-+ * First HT sibling to come up on the core. Link shared state of |
|
| 119 |
-+ * the first HT sibling to itself. The siblings on the same core |
|
| 120 |
-+ * which come up later will see the shared state pointer and link |
|
| 121 |
-+ * themself to the state of this CPU. |
|
| 122 |
-+ */ |
|
| 123 |
-+ st->shared_state = st; |
|
| 124 |
-+} |
|
| 125 |
-+ |
|
| 126 |
-+/* |
|
| 127 |
-+ * Logic is: First HT sibling enables SSBD for both siblings in the core |
|
| 128 |
-+ * and last sibling to disable it, disables it for the whole core. This how |
|
| 129 |
-+ * MSR_SPEC_CTRL works in "hardware": |
|
| 130 |
-+ * |
|
| 131 |
-+ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL |
|
| 132 |
-+ */ |
|
| 133 |
-+static __always_inline void amd_set_core_ssb_state(unsigned long tifn) |
|
| 134 |
-+{
|
|
| 135 |
-+ struct ssb_state *st = this_cpu_ptr(&ssb_state); |
|
| 136 |
-+ u64 msr = x86_amd_ls_cfg_base; |
|
| 137 |
-+ |
|
| 138 |
-+ if (!static_cpu_has(X86_FEATURE_ZEN)) {
|
|
| 139 |
-+ msr |= ssbd_tif_to_amd_ls_cfg(tifn); |
|
| 140 |
- wrmsrl(MSR_AMD64_LS_CFG, msr); |
|
| 141 |
-+ return; |
|
| 142 |
-+ } |
|
| 143 |
-+ |
|
| 144 |
-+ if (tifn & _TIF_SSBD) {
|
|
| 145 |
-+ /* |
|
| 146 |
-+ * Since this can race with prctl(), block reentry on the |
|
| 147 |
-+ * same CPU. |
|
| 148 |
-+ */ |
|
| 149 |
-+ if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) |
|
| 150 |
-+ return; |
|
| 151 |
-+ |
|
| 152 |
-+ msr |= x86_amd_ls_cfg_ssbd_mask; |
|
| 153 |
-+ |
|
| 154 |
-+ raw_spin_lock(&st->shared_state->lock); |
|
| 155 |
-+ /* First sibling enables SSBD: */ |
|
| 156 |
-+ if (!st->shared_state->disable_state) |
|
| 157 |
-+ wrmsrl(MSR_AMD64_LS_CFG, msr); |
|
| 158 |
-+ st->shared_state->disable_state++; |
|
| 159 |
-+ raw_spin_unlock(&st->shared_state->lock); |
|
| 160 |
- } else {
|
|
| 161 |
-- msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); |
|
| 162 |
-- wrmsrl(MSR_IA32_SPEC_CTRL, msr); |
|
| 163 |
-+ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) |
|
| 164 |
-+ return; |
|
| 165 |
-+ |
|
| 166 |
-+ raw_spin_lock(&st->shared_state->lock); |
|
| 167 |
-+ st->shared_state->disable_state--; |
|
| 168 |
-+ if (!st->shared_state->disable_state) |
|
| 169 |
-+ wrmsrl(MSR_AMD64_LS_CFG, msr); |
|
| 170 |
-+ raw_spin_unlock(&st->shared_state->lock); |
|
| 171 |
- } |
|
| 172 |
- } |
|
| 173 |
-+#else |
|
| 174 |
-+static __always_inline void amd_set_core_ssb_state(unsigned long tifn) |
|
| 175 |
-+{
|
|
| 176 |
-+ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); |
|
| 177 |
-+ |
|
| 178 |
-+ wrmsrl(MSR_AMD64_LS_CFG, msr); |
|
| 179 |
-+} |
|
| 180 |
-+#endif |
|
| 181 |
-+ |
|
| 182 |
-+static __always_inline void intel_set_ssb_state(unsigned long tifn) |
|
| 183 |
-+{
|
|
| 184 |
-+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); |
|
| 185 |
-+ |
|
| 186 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, msr); |
|
| 187 |
-+} |
|
| 188 |
-+ |
|
| 189 |
-+static __always_inline void __speculative_store_bypass_update(unsigned long tifn) |
|
| 190 |
-+{
|
|
| 191 |
-+ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
|
| 192 |
-+ amd_set_core_ssb_state(tifn); |
|
| 193 |
-+ else |
|
| 194 |
-+ intel_set_ssb_state(tifn); |
|
| 195 |
-+} |
|
| 196 |
- |
|
| 197 |
- void speculative_store_bypass_update(void) |
|
| 198 |
- {
|
|
| 199 |
-+ preempt_disable(); |
|
| 200 |
- __speculative_store_bypass_update(current_thread_info()->flags); |
|
| 201 |
-+ preempt_enable(); |
|
| 202 |
- } |
|
| 203 |
- |
|
| 204 |
- void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
|
| 205 |
-diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c |
|
| 206 |
-index 83929cc4..cb94514 100644 |
|
| 207 |
-+++ b/arch/x86/kernel/smpboot.c |
|
| 208 |
-@@ -75,6 +75,7 @@ |
|
| 209 |
- #include <asm/i8259.h> |
|
| 210 |
- #include <asm/realmode.h> |
|
| 211 |
- #include <asm/misc.h> |
|
| 212 |
-+#include <asm/spec-ctrl.h> |
|
| 213 |
- |
|
| 214 |
- /* Number of siblings per CPU package */ |
|
| 215 |
- int smp_num_siblings = 1; |
|
| 216 |
-@@ -229,6 +230,8 @@ static void notrace start_secondary(void *unused) |
|
| 217 |
- */ |
|
| 218 |
- check_tsc_sync_target(); |
|
| 219 |
- |
|
| 220 |
-+ speculative_store_bypass_ht_init(); |
|
| 221 |
-+ |
|
| 222 |
- /* |
|
| 223 |
- * Lock vector_lock and initialize the vectors on this cpu |
|
| 224 |
- * before setting the cpu online. We must set it online with |
|
| 225 |
-@@ -1325,6 +1328,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) |
|
| 226 |
- set_mtrr_aps_delayed_init(); |
|
| 227 |
- |
|
| 228 |
- smp_quirk_init_udelay(); |
|
| 229 |
-+ |
|
| 230 |
-+ speculative_store_bypass_ht_init(); |
|
| 231 |
- } |
|
| 232 |
- |
|
| 233 |
- void arch_enable_nonboot_cpus_begin(void) |
|
| 234 |
-2.7.4 |
|
| 235 |
- |
| 236 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,163 +0,0 @@ |
| 1 |
-From 56e877154f4bae42342edf644f77d446e102690d Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Wed, 9 May 2018 23:01:01 +0200 |
|
| 4 |
-Subject: [PATCH 44/54] x86/bugs, KVM: Extend speculation control for |
|
| 5 |
- VIRT_SPEC_CTRL |
|
| 6 |
- |
|
| 7 |
-commit ccbcd2674472a978b48c91c1fbfb66c0ff959f24 upstream |
|
| 8 |
- |
|
| 9 |
-AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store |
|
| 10 |
-Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care |
|
| 11 |
-about the bit position of the SSBD bit and thus facilitate migration. |
|
| 12 |
-Also, the sibling coordination on Family 17H CPUs can only be done on |
|
| 13 |
-the host. |
|
| 14 |
- |
|
| 15 |
-Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an |
|
| 16 |
-extra argument for the VIRT_SPEC_CTRL MSR. |
|
| 17 |
- |
|
| 18 |
-Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU |
|
| 19 |
-data structure which is going to be used in later patches for the actual |
|
| 20 |
-implementation. |
|
| 21 |
- |
|
| 22 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 23 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 24 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 25 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 26 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 27 |
- arch/x86/include/asm/spec-ctrl.h | 9 ++++++--- |
|
| 28 |
- arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++++++++-- |
|
| 29 |
- arch/x86/kvm/svm.c | 11 +++++++++-- |
|
| 30 |
- arch/x86/kvm/vmx.c | 5 +++-- |
|
| 31 |
- 4 files changed, 36 insertions(+), 9 deletions(-) |
|
| 32 |
- |
|
| 33 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 34 |
-index 0cb49c4..6e28740 100644 |
|
| 35 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 36 |
-@@ -10,10 +10,13 @@ |
|
| 37 |
- * the guest has, while on VMEXIT we restore the host view. This |
|
| 38 |
- * would be easier if SPEC_CTRL were architecturally maskable or |
|
| 39 |
- * shadowable for guests but this is not (currently) the case. |
|
| 40 |
-- * Takes the guest view of SPEC_CTRL MSR as a parameter. |
|
| 41 |
-+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also |
|
| 42 |
-+ * the guest's version of VIRT_SPEC_CTRL, if emulated. |
|
| 43 |
- */ |
|
| 44 |
--extern void x86_spec_ctrl_set_guest(u64); |
|
| 45 |
--extern void x86_spec_ctrl_restore_host(u64); |
|
| 46 |
-+extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, |
|
| 47 |
-+ u64 guest_virt_spec_ctrl); |
|
| 48 |
-+extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, |
|
| 49 |
-+ u64 guest_virt_spec_ctrl); |
|
| 50 |
- |
|
| 51 |
- /* AMD specific Speculative Store Bypass MSR data */ |
|
| 52 |
- extern u64 x86_amd_ls_cfg_base; |
|
| 53 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 54 |
-index 15a6c58..d00e246 100644 |
|
| 55 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 56 |
-@@ -150,7 +150,15 @@ u64 x86_spec_ctrl_get_default(void) |
|
| 57 |
- } |
|
| 58 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); |
|
| 59 |
- |
|
| 60 |
--void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) |
|
| 61 |
-+/** |
|
| 62 |
-+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest |
|
| 63 |
-+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL |
|
| 64 |
-+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL |
|
| 65 |
-+ * (may get translated to MSR_AMD64_LS_CFG bits) |
|
| 66 |
-+ * |
|
| 67 |
-+ * Avoids writing to the MSR if the content/bits are the same |
|
| 68 |
-+ */ |
|
| 69 |
-+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) |
|
| 70 |
- {
|
|
| 71 |
- u64 host = x86_spec_ctrl_base; |
|
| 72 |
- |
|
| 73 |
-@@ -167,7 +175,15 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) |
|
| 74 |
- } |
|
| 75 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); |
|
| 76 |
- |
|
| 77 |
--void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) |
|
| 78 |
-+/** |
|
| 79 |
-+ * x86_spec_ctrl_restore_host - Restore host speculation control registers |
|
| 80 |
-+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL |
|
| 81 |
-+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL |
|
| 82 |
-+ * (may get translated to MSR_AMD64_LS_CFG bits) |
|
| 83 |
-+ * |
|
| 84 |
-+ * Avoids writing to the MSR if the content/bits are the same |
|
| 85 |
-+ */ |
|
| 86 |
-+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) |
|
| 87 |
- {
|
|
| 88 |
- u64 host = x86_spec_ctrl_base; |
|
| 89 |
- |
|
| 90 |
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
|
| 91 |
-index d1a4321..57c96f1 100644 |
|
| 92 |
-+++ b/arch/x86/kvm/svm.c |
|
| 93 |
-@@ -185,6 +185,12 @@ struct vcpu_svm {
|
|
| 94 |
- } host; |
|
| 95 |
- |
|
| 96 |
- u64 spec_ctrl; |
|
| 97 |
-+ /* |
|
| 98 |
-+ * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be |
|
| 99 |
-+ * translated into the appropriate L2_CFG bits on the host to |
|
| 100 |
-+ * perform speculative control. |
|
| 101 |
-+ */ |
|
| 102 |
-+ u64 virt_spec_ctrl; |
|
| 103 |
- |
|
| 104 |
- u32 *msrpm; |
|
| 105 |
- |
|
| 106 |
-@@ -1561,6 +1567,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
|
| 107 |
- u32 eax = 1; |
|
| 108 |
- |
|
| 109 |
- svm->spec_ctrl = 0; |
|
| 110 |
-+ svm->virt_spec_ctrl = 0; |
|
| 111 |
- |
|
| 112 |
- if (!init_event) {
|
|
| 113 |
- svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | |
|
| 114 |
-@@ -4917,7 +4924,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 115 |
- * is no need to worry about the conditional branch over the wrmsr |
|
| 116 |
- * being speculatively taken. |
|
| 117 |
- */ |
|
| 118 |
-- x86_spec_ctrl_set_guest(svm->spec_ctrl); |
|
| 119 |
-+ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); |
|
| 120 |
- |
|
| 121 |
- asm volatile ( |
|
| 122 |
- "push %%" _ASM_BP "; \n\t" |
|
| 123 |
-@@ -5041,7 +5048,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 124 |
- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
|
| 125 |
- svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
|
| 126 |
- |
|
| 127 |
-- x86_spec_ctrl_restore_host(svm->spec_ctrl); |
|
| 128 |
-+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); |
|
| 129 |
- |
|
| 130 |
- reload_tss(vcpu); |
|
| 131 |
- |
|
| 132 |
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
|
| 133 |
-index 874b661..f1d158a 100644 |
|
| 134 |
-+++ b/arch/x86/kvm/vmx.c |
|
| 135 |
-@@ -8916,9 +8916,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 136 |
- * is no need to worry about the conditional branch over the wrmsr |
|
| 137 |
- * being speculatively taken. |
|
| 138 |
- */ |
|
| 139 |
-- x86_spec_ctrl_set_guest(vmx->spec_ctrl); |
|
| 140 |
-+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); |
|
| 141 |
- |
|
| 142 |
- vmx->__launched = vmx->loaded_vmcs->launched; |
|
| 143 |
-+ |
|
| 144 |
- asm( |
|
| 145 |
- /* Store host registers */ |
|
| 146 |
- "push %%" _ASM_DX "; push %%" _ASM_BP ";" |
|
| 147 |
-@@ -9054,7 +9055,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
|
| 148 |
- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
|
| 149 |
- vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
|
| 150 |
- |
|
| 151 |
-- x86_spec_ctrl_restore_host(vmx->spec_ctrl); |
|
| 152 |
-+ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); |
|
| 153 |
- |
|
| 154 |
- /* Eliminate branch target predictions from guest mode */ |
|
| 155 |
- vmexit_fill_RSB(); |
|
| 156 |
-2.7.4 |
|
| 157 |
- |
| 158 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,104 +0,0 @@ |
| 1 |
-From 1aa0d4cbe3810b3161bad906163e198bb6f3f753 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Tom Lendacky <thomas.lendacky@amd.com> |
|
| 3 |
-Date: Thu, 17 May 2018 17:09:18 +0200 |
|
| 4 |
-Subject: [PATCH 45/54] x86/speculation: Add virtualized speculative store |
|
| 5 |
- bypass disable support |
|
| 6 |
- |
|
| 7 |
-commit 11fb0683493b2da112cd64c9dada221b52463bf7 upstream |
|
| 8 |
- |
|
| 9 |
-Some AMD processors only support a non-architectural means of enabling |
|
| 10 |
-speculative store bypass disable (SSBD). To allow a simplified view of |
|
| 11 |
-this to a guest, an architectural definition has been created through a new |
|
| 12 |
-CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a |
|
| 13 |
-hypervisor can virtualize the existence of this definition and provide an |
|
| 14 |
-architectural method for using SSBD to a guest. |
|
| 15 |
- |
|
| 16 |
-Add the new CPUID feature, the new MSR and update the existing SSBD |
|
| 17 |
-support to use this MSR when present. |
|
| 18 |
- |
|
| 19 |
-Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> |
|
| 20 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 21 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 22 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 23 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 24 |
- arch/x86/include/asm/cpufeatures.h | 1 + |
|
| 25 |
- arch/x86/include/asm/msr-index.h | 2 ++ |
|
| 26 |
- arch/x86/kernel/cpu/bugs.c | 4 +++- |
|
| 27 |
- arch/x86/kernel/process.c | 13 ++++++++++++- |
|
| 28 |
- 4 files changed, 18 insertions(+), 2 deletions(-) |
|
| 29 |
- |
|
| 30 |
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
|
| 31 |
-index ec87b8c..c278f27 100644 |
|
| 32 |
-+++ b/arch/x86/include/asm/cpufeatures.h |
|
| 33 |
-@@ -274,6 +274,7 @@ |
|
| 34 |
- #define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ |
|
| 35 |
- #define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ |
|
| 36 |
- #define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ |
|
| 37 |
-+#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ |
|
| 38 |
- |
|
| 39 |
- /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ |
|
| 40 |
- #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ |
|
| 41 |
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
|
| 42 |
-index b67d57e..9fd9dcf 100644 |
|
| 43 |
-+++ b/arch/x86/include/asm/msr-index.h |
|
| 44 |
-@@ -323,6 +323,8 @@ |
|
| 45 |
- #define MSR_AMD64_IBSOPDATA4 0xc001103d |
|
| 46 |
- #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ |
|
| 47 |
- |
|
| 48 |
-+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f |
|
| 49 |
-+ |
|
| 50 |
- /* Fam 17h MSRs */ |
|
| 51 |
- #define MSR_F17H_IRPERF 0xc00000e9 |
|
| 52 |
- |
|
| 53 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 54 |
-index d00e246..97987b5 100644 |
|
| 55 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 56 |
-@@ -204,7 +204,9 @@ static void x86_amd_ssb_disable(void) |
|
| 57 |
- {
|
|
| 58 |
- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; |
|
| 59 |
- |
|
| 60 |
-- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
|
| 61 |
-+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) |
|
| 62 |
-+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); |
|
| 63 |
-+ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
|
| 64 |
- wrmsrl(MSR_AMD64_LS_CFG, msrval); |
|
| 65 |
- } |
|
| 66 |
- |
|
| 67 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 68 |
-index 1e9d155..6d9e1ee 100644 |
|
| 69 |
-+++ b/arch/x86/kernel/process.c |
|
| 70 |
-@@ -312,6 +312,15 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn) |
|
| 71 |
- } |
|
| 72 |
- #endif |
|
| 73 |
- |
|
| 74 |
-+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) |
|
| 75 |
-+{
|
|
| 76 |
-+ /* |
|
| 77 |
-+ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, |
|
| 78 |
-+ * so ssbd_tif_to_spec_ctrl() just works. |
|
| 79 |
-+ */ |
|
| 80 |
-+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); |
|
| 81 |
-+} |
|
| 82 |
-+ |
|
| 83 |
- static __always_inline void intel_set_ssb_state(unsigned long tifn) |
|
| 84 |
- {
|
|
| 85 |
- u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); |
|
| 86 |
-@@ -321,7 +330,9 @@ static __always_inline void intel_set_ssb_state(unsigned long tifn) |
|
| 87 |
- |
|
| 88 |
- static __always_inline void __speculative_store_bypass_update(unsigned long tifn) |
|
| 89 |
- {
|
|
| 90 |
-- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
|
| 91 |
-+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) |
|
| 92 |
-+ amd_set_ssb_virt_state(tifn); |
|
| 93 |
-+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
|
| 94 |
- amd_set_core_ssb_state(tifn); |
|
| 95 |
- else |
|
| 96 |
- intel_set_ssb_state(tifn); |
|
| 97 |
-2.7.4 |
|
| 98 |
- |
| 99 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,75 +0,0 @@ |
| 1 |
-From 5be915f638a3c90d5b8ee435f586377438bd4c05 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Thu, 10 May 2018 20:31:44 +0200 |
|
| 4 |
-Subject: [PATCH 46/54] x86/speculation: Rework |
|
| 5 |
- speculative_store_bypass_update() |
|
| 6 |
- |
|
| 7 |
-commit 0270be3e34efb05a88bc4c422572ece038ef3608 upstream |
|
| 8 |
- |
|
| 9 |
-The upcoming support for the virtual SPEC_CTRL MSR on AMD needs to reuse |
|
| 10 |
-speculative_store_bypass_update() to avoid code duplication. Add an |
|
| 11 |
-argument for supplying a thread info (TIF) value and create a wrapper |
|
| 12 |
-speculative_store_bypass_update_current() which is used at the existing |
|
| 13 |
-call site. |
|
| 14 |
- |
|
| 15 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 16 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 17 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 18 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 19 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 20 |
- arch/x86/include/asm/spec-ctrl.h | 7 ++++++- |
|
| 21 |
- arch/x86/kernel/cpu/bugs.c | 2 +- |
|
| 22 |
- arch/x86/kernel/process.c | 4 ++-- |
|
| 23 |
- 3 files changed, 9 insertions(+), 4 deletions(-) |
|
| 24 |
- |
|
| 25 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 26 |
-index 6e28740..82b6c5a 100644 |
|
| 27 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 28 |
-@@ -42,6 +42,11 @@ extern void speculative_store_bypass_ht_init(void); |
|
| 29 |
- static inline void speculative_store_bypass_ht_init(void) { }
|
|
| 30 |
- #endif |
|
| 31 |
- |
|
| 32 |
--extern void speculative_store_bypass_update(void); |
|
| 33 |
-+extern void speculative_store_bypass_update(unsigned long tif); |
|
| 34 |
-+ |
|
| 35 |
-+static inline void speculative_store_bypass_update_current(void) |
|
| 36 |
-+{
|
|
| 37 |
-+ speculative_store_bypass_update(current_thread_info()->flags); |
|
| 38 |
-+} |
|
| 39 |
- |
|
| 40 |
- #endif |
|
| 41 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 42 |
-index 97987b5..eddbdc8 100644 |
|
| 43 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 44 |
-@@ -597,7 +597,7 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
|
| 45 |
- * mitigation until it is next scheduled. |
|
| 46 |
- */ |
|
| 47 |
- if (task == current && update) |
|
| 48 |
-- speculative_store_bypass_update(); |
|
| 49 |
-+ speculative_store_bypass_update_current(); |
|
| 50 |
- |
|
| 51 |
- return 0; |
|
| 52 |
- } |
|
| 53 |
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
|
| 54 |
-index 6d9e1ee..00a9047 100644 |
|
| 55 |
-+++ b/arch/x86/kernel/process.c |
|
| 56 |
-@@ -338,10 +338,10 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn |
|
| 57 |
- intel_set_ssb_state(tifn); |
|
| 58 |
- } |
|
| 59 |
- |
|
| 60 |
--void speculative_store_bypass_update(void) |
|
| 61 |
-+void speculative_store_bypass_update(unsigned long tif) |
|
| 62 |
- {
|
|
| 63 |
- preempt_disable(); |
|
| 64 |
-- __speculative_store_bypass_update(current_thread_info()->flags); |
|
| 65 |
-+ __speculative_store_bypass_update(tif); |
|
| 66 |
- preempt_enable(); |
|
| 67 |
- } |
|
| 68 |
- |
|
| 69 |
-2.7.4 |
|
| 70 |
- |
| 71 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,145 +0,0 @@ |
| 1 |
-From 7b74e5e60aab9eb69f6e7df757292da84bdbee36 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Borislav Petkov <bp@suse.de> |
|
| 3 |
-Date: Sat, 12 May 2018 00:14:51 +0200 |
|
| 4 |
-Subject: [PATCH 47/54] x86/bugs: Unify x86_spec_ctrl_{set_guest,restore_host}
|
|
| 5 |
- |
|
| 6 |
-commit cc69b34989210f067b2c51d5539b5f96ebcc3a01 upstream |
|
| 7 |
- |
|
| 8 |
-Function bodies are very similar and are going to grow more almost |
|
| 9 |
-identical code. Add a bool arg to determine whether SPEC_CTRL is being set |
|
| 10 |
-for the guest or restored to the host. |
|
| 11 |
- |
|
| 12 |
-No functional changes. |
|
| 13 |
- |
|
| 14 |
-Signed-off-by: Borislav Petkov <bp@suse.de> |
|
| 15 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 16 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 17 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 18 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 19 |
- arch/x86/include/asm/spec-ctrl.h | 33 +++++++++++++++++++--- |
|
| 20 |
- arch/x86/kernel/cpu/bugs.c | 60 ++++++++++------------------------------ |
|
| 21 |
- 2 files changed, 44 insertions(+), 49 deletions(-) |
|
| 22 |
- |
|
| 23 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 24 |
-index 82b6c5a..9cecbe5 100644 |
|
| 25 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 26 |
-@@ -13,10 +13,35 @@ |
|
| 27 |
- * Takes the guest view of SPEC_CTRL MSR as a parameter and also |
|
| 28 |
- * the guest's version of VIRT_SPEC_CTRL, if emulated. |
|
| 29 |
- */ |
|
| 30 |
--extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, |
|
| 31 |
-- u64 guest_virt_spec_ctrl); |
|
| 32 |
--extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, |
|
| 33 |
-- u64 guest_virt_spec_ctrl); |
|
| 34 |
-+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest); |
|
| 35 |
-+ |
|
| 36 |
-+/** |
|
| 37 |
-+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest |
|
| 38 |
-+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL |
|
| 39 |
-+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL |
|
| 40 |
-+ * (may get translated to MSR_AMD64_LS_CFG bits) |
|
| 41 |
-+ * |
|
| 42 |
-+ * Avoids writing to the MSR if the content/bits are the same |
|
| 43 |
-+ */ |
|
| 44 |
-+static inline |
|
| 45 |
-+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) |
|
| 46 |
-+{
|
|
| 47 |
-+ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true); |
|
| 48 |
-+} |
|
| 49 |
-+ |
|
| 50 |
-+/** |
|
| 51 |
-+ * x86_spec_ctrl_restore_host - Restore host speculation control registers |
|
| 52 |
-+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL |
|
| 53 |
-+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL |
|
| 54 |
-+ * (may get translated to MSR_AMD64_LS_CFG bits) |
|
| 55 |
-+ * |
|
| 56 |
-+ * Avoids writing to the MSR if the content/bits are the same |
|
| 57 |
-+ */ |
|
| 58 |
-+static inline |
|
| 59 |
-+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) |
|
| 60 |
-+{
|
|
| 61 |
-+ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false); |
|
| 62 |
-+} |
|
| 63 |
- |
|
| 64 |
- /* AMD specific Speculative Store Bypass MSR data */ |
|
| 65 |
- extern u64 x86_amd_ls_cfg_base; |
|
| 66 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 67 |
-index eddbdc8..9203150 100644 |
|
| 68 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 69 |
-@@ -150,55 +150,25 @@ u64 x86_spec_ctrl_get_default(void) |
|
| 70 |
- } |
|
| 71 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); |
|
| 72 |
- |
|
| 73 |
--/** |
|
| 74 |
-- * x86_spec_ctrl_set_guest - Set speculation control registers for the guest |
|
| 75 |
-- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL |
|
| 76 |
-- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL |
|
| 77 |
-- * (may get translated to MSR_AMD64_LS_CFG bits) |
|
| 78 |
-- * |
|
| 79 |
-- * Avoids writing to the MSR if the content/bits are the same |
|
| 80 |
-- */ |
|
| 81 |
--void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) |
|
| 82 |
-+void |
|
| 83 |
-+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
|
| 84 |
- {
|
|
| 85 |
-- u64 host = x86_spec_ctrl_base; |
|
| 86 |
-+ struct thread_info *ti = current_thread_info(); |
|
| 87 |
-+ u64 msr, host = x86_spec_ctrl_base; |
|
| 88 |
- |
|
| 89 |
- /* Is MSR_SPEC_CTRL implemented ? */ |
|
| 90 |
-- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 91 |
-- return; |
|
| 92 |
-- |
|
| 93 |
-- /* SSBD controlled in MSR_SPEC_CTRL */ |
|
| 94 |
-- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 95 |
-- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 96 |
-- |
|
| 97 |
-- if (host != guest_spec_ctrl) |
|
| 98 |
-- wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); |
|
| 99 |
--} |
|
| 100 |
--EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); |
|
| 101 |
-- |
|
| 102 |
--/** |
|
| 103 |
-- * x86_spec_ctrl_restore_host - Restore host speculation control registers |
|
| 104 |
-- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL |
|
| 105 |
-- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL |
|
| 106 |
-- * (may get translated to MSR_AMD64_LS_CFG bits) |
|
| 107 |
-- * |
|
| 108 |
-- * Avoids writing to the MSR if the content/bits are the same |
|
| 109 |
-- */ |
|
| 110 |
--void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) |
|
| 111 |
--{
|
|
| 112 |
-- u64 host = x86_spec_ctrl_base; |
|
| 113 |
-- |
|
| 114 |
-- /* Is MSR_SPEC_CTRL implemented ? */ |
|
| 115 |
-- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 116 |
-- return; |
|
| 117 |
-- |
|
| 118 |
-- /* SSBD controlled in MSR_SPEC_CTRL */ |
|
| 119 |
-- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 120 |
-- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 121 |
-- |
|
| 122 |
-- if (host != guest_spec_ctrl) |
|
| 123 |
-- wrmsrl(MSR_IA32_SPEC_CTRL, host); |
|
| 124 |
-+ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
|
| 125 |
-+ /* SSBD controlled in MSR_SPEC_CTRL */ |
|
| 126 |
-+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 127 |
-+ host |= ssbd_tif_to_spec_ctrl(ti->flags); |
|
| 128 |
-+ |
|
| 129 |
-+ if (host != guest_spec_ctrl) {
|
|
| 130 |
-+ msr = setguest ? guest_spec_ctrl : host; |
|
| 131 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, msr); |
|
| 132 |
-+ } |
|
| 133 |
-+ } |
|
| 134 |
- } |
|
| 135 |
--EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); |
|
| 136 |
-+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); |
|
| 137 |
- |
|
| 138 |
- static void x86_amd_ssb_disable(void) |
|
| 139 |
- {
|
|
| 140 |
-2.7.4 |
|
| 141 |
- |
| 142 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,120 +0,0 @@ |
| 1 |
-From deeee7b974828f923efeb93946efc701dd668f78 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Sat, 12 May 2018 20:49:16 +0200 |
|
| 4 |
-Subject: [PATCH 48/54] x86/bugs: Expose x86_spec_ctrl_base directly |
|
| 5 |
- |
|
| 6 |
-commit fa8ac4988249c38476f6ad678a4848a736373403 upstream |
|
| 7 |
- |
|
| 8 |
-x86_spec_ctrl_base is the system wide default value for the SPEC_CTRL MSR. |
|
| 9 |
-x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to |
|
| 10 |
-prevent modification to that variable. Though the variable is read only |
|
| 11 |
-after init and globaly visible already. |
|
| 12 |
- |
|
| 13 |
-Remove the function and export the variable instead. |
|
| 14 |
- |
|
| 15 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 16 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 17 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 18 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 19 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 20 |
- arch/x86/include/asm/nospec-branch.h | 16 +++++----------- |
|
| 21 |
- arch/x86/include/asm/spec-ctrl.h | 3 --- |
|
| 22 |
- arch/x86/kernel/cpu/bugs.c | 11 +---------- |
|
| 23 |
- 3 files changed, 6 insertions(+), 24 deletions(-) |
|
| 24 |
- |
|
| 25 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 26 |
-index bc258e6..8d9deec 100644 |
|
| 27 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 28 |
-@@ -217,16 +217,7 @@ enum spectre_v2_mitigation {
|
|
| 29 |
- SPECTRE_V2_IBRS, |
|
| 30 |
- }; |
|
| 31 |
- |
|
| 32 |
--/* |
|
| 33 |
-- * The Intel specification for the SPEC_CTRL MSR requires that we |
|
| 34 |
-- * preserve any already set reserved bits at boot time (e.g. for |
|
| 35 |
-- * future additions that this kernel is not currently aware of). |
|
| 36 |
-- * We then set any additional mitigation bits that we want |
|
| 37 |
-- * ourselves and always use this as the base for SPEC_CTRL. |
|
| 38 |
-- * We also use this when handling guest entry/exit as below. |
|
| 39 |
-- */ |
|
| 40 |
- extern void x86_spec_ctrl_set(u64); |
|
| 41 |
--extern u64 x86_spec_ctrl_get_default(void); |
|
| 42 |
- |
|
| 43 |
- /* The Speculative Store Bypass disable variants */ |
|
| 44 |
- enum ssb_mitigation {
|
|
| 45 |
-@@ -278,6 +269,9 @@ static inline void indirect_branch_prediction_barrier(void) |
|
| 46 |
- alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); |
|
| 47 |
- } |
|
| 48 |
- |
|
| 49 |
-+/* The Intel SPEC CTRL MSR base value cache */ |
|
| 50 |
-+extern u64 x86_spec_ctrl_base; |
|
| 51 |
-+ |
|
| 52 |
- /* |
|
| 53 |
- * With retpoline, we must use IBRS to restrict branch prediction |
|
| 54 |
- * before calling into firmware. |
|
| 55 |
-@@ -286,7 +280,7 @@ static inline void indirect_branch_prediction_barrier(void) |
|
| 56 |
- */ |
|
| 57 |
- #define firmware_restrict_branch_speculation_start() \ |
|
| 58 |
- do { \
|
|
| 59 |
-- u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \ |
|
| 60 |
-+ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ |
|
| 61 |
- \ |
|
| 62 |
- preempt_disable(); \ |
|
| 63 |
- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ |
|
| 64 |
-@@ -295,7 +289,7 @@ do { \
|
|
| 65 |
- |
|
| 66 |
- #define firmware_restrict_branch_speculation_end() \ |
|
| 67 |
- do { \
|
|
| 68 |
-- u64 val = x86_spec_ctrl_get_default(); \ |
|
| 69 |
-+ u64 val = x86_spec_ctrl_base; \ |
|
| 70 |
- \ |
|
| 71 |
- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ |
|
| 72 |
- X86_FEATURE_USE_IBRS_FW); \ |
|
| 73 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 74 |
-index 9cecbe5..763d497 100644 |
|
| 75 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 76 |
-@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) |
|
| 77 |
- extern u64 x86_amd_ls_cfg_base; |
|
| 78 |
- extern u64 x86_amd_ls_cfg_ssbd_mask; |
|
| 79 |
- |
|
| 80 |
--/* The Intel SPEC CTRL MSR base value cache */ |
|
| 81 |
--extern u64 x86_spec_ctrl_base; |
|
| 82 |
-- |
|
| 83 |
- static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) |
|
| 84 |
- {
|
|
| 85 |
- BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); |
|
| 86 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 87 |
-index 9203150..47b7f4f 100644 |
|
| 88 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 89 |
-@@ -35,6 +35,7 @@ static void __init ssb_select_mitigation(void); |
|
| 90 |
- * writes to SPEC_CTRL contain whatever reserved bits have been set. |
|
| 91 |
- */ |
|
| 92 |
- u64 __ro_after_init x86_spec_ctrl_base; |
|
| 93 |
-+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); |
|
| 94 |
- |
|
| 95 |
- /* |
|
| 96 |
- * The vendor and possibly platform specific bits which can be modified in |
|
| 97 |
-@@ -140,16 +141,6 @@ void x86_spec_ctrl_set(u64 val) |
|
| 98 |
- } |
|
| 99 |
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); |
|
| 100 |
- |
|
| 101 |
--u64 x86_spec_ctrl_get_default(void) |
|
| 102 |
--{
|
|
| 103 |
-- u64 msrval = x86_spec_ctrl_base; |
|
| 104 |
-- |
|
| 105 |
-- if (static_cpu_has(X86_FEATURE_SPEC_CTRL)) |
|
| 106 |
-- msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); |
|
| 107 |
-- return msrval; |
|
| 108 |
--} |
|
| 109 |
--EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); |
|
| 110 |
-- |
|
| 111 |
- void |
|
| 112 |
- x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
|
| 113 |
- {
|
|
| 114 |
-2.7.4 |
|
| 115 |
- |
| 116 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,76 +0,0 @@ |
| 1 |
-From 5eb348cfda0c0efd41e44ca4ba6e267eb5877a3a Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Sat, 12 May 2018 20:53:14 +0200 |
|
| 4 |
-Subject: [PATCH 49/54] x86/bugs: Remove x86_spec_ctrl_set() |
|
| 5 |
- |
|
| 6 |
-commit 4b59bdb569453a60b752b274ca61f009e37f4dae upstream |
|
| 7 |
- |
|
| 8 |
-x86_spec_ctrl_set() is only used in bugs.c and the extra mask checks there |
|
| 9 |
-provide no real value as both call sites can just write x86_spec_ctrl_base |
|
| 10 |
-to MSR_SPEC_CTRL. x86_spec_ctrl_base is valid and does not need any extra |
|
| 11 |
-masking or checking. |
|
| 12 |
- |
|
| 13 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 14 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 15 |
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 16 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 17 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 18 |
- arch/x86/include/asm/nospec-branch.h | 2 -- |
|
| 19 |
- arch/x86/kernel/cpu/bugs.c | 13 ++----------- |
|
| 20 |
- 2 files changed, 2 insertions(+), 13 deletions(-) |
|
| 21 |
- |
|
| 22 |
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
|
| 23 |
-index 8d9deec..8b38df9 100644 |
|
| 24 |
-+++ b/arch/x86/include/asm/nospec-branch.h |
|
| 25 |
-@@ -217,8 +217,6 @@ enum spectre_v2_mitigation {
|
|
| 26 |
- SPECTRE_V2_IBRS, |
|
| 27 |
- }; |
|
| 28 |
- |
|
| 29 |
--extern void x86_spec_ctrl_set(u64); |
|
| 30 |
-- |
|
| 31 |
- /* The Speculative Store Bypass disable variants */ |
|
| 32 |
- enum ssb_mitigation {
|
|
| 33 |
- SPEC_STORE_BYPASS_NONE, |
|
| 34 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 35 |
-index 47b7f4f..82a99d0 100644 |
|
| 36 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 37 |
-@@ -132,15 +132,6 @@ static const char *spectre_v2_strings[] = {
|
|
| 38 |
- static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
|
| 39 |
- SPECTRE_V2_NONE; |
|
| 40 |
- |
|
| 41 |
--void x86_spec_ctrl_set(u64 val) |
|
| 42 |
--{
|
|
| 43 |
-- if (val & x86_spec_ctrl_mask) |
|
| 44 |
-- WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val); |
|
| 45 |
-- else |
|
| 46 |
-- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val); |
|
| 47 |
--} |
|
| 48 |
--EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); |
|
| 49 |
-- |
|
| 50 |
- void |
|
| 51 |
- x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
|
| 52 |
- {
|
|
| 53 |
-@@ -502,7 +493,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) |
|
| 54 |
- case X86_VENDOR_INTEL: |
|
| 55 |
- x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
|
| 56 |
- x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD; |
|
| 57 |
-- x86_spec_ctrl_set(SPEC_CTRL_SSBD); |
|
| 58 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 59 |
- break; |
|
| 60 |
- case X86_VENDOR_AMD: |
|
| 61 |
- x86_amd_ssb_disable(); |
|
| 62 |
-@@ -614,7 +605,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
|
| 63 |
- void x86_spec_ctrl_setup_ap(void) |
|
| 64 |
- {
|
|
| 65 |
- if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 66 |
-- x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); |
|
| 67 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 68 |
- |
|
| 69 |
- if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) |
|
| 70 |
- x86_amd_ssb_disable(); |
|
| 71 |
-2.7.4 |
|
| 72 |
- |
| 73 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,95 +0,0 @@ |
| 1 |
-From 899b5d7cb4192d50163bb45e4957df11b4557696 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Sat, 12 May 2018 20:10:00 +0200 |
|
| 4 |
-Subject: [PATCH 50/54] x86/bugs: Rework spec_ctrl base and mask logic |
|
| 5 |
- |
|
| 6 |
-commit be6fcb5478e95bb1c91f489121238deb3abca46a upstream |
|
| 7 |
- |
|
| 8 |
-x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value |
|
| 9 |
-which are not to be modified. However the implementation is not really used |
|
| 10 |
-and the bitmask was inverted to make a check easier, which was removed in |
|
| 11 |
-"x86/bugs: Remove x86_spec_ctrl_set()" |
|
| 12 |
- |
|
| 13 |
-Aside of that it is missing the STIBP bit if it is supported by the |
|
| 14 |
-platform, so if the mask would be used in x86_virt_spec_ctrl() then it |
|
| 15 |
-would prevent a guest from setting STIBP. |
|
| 16 |
- |
|
| 17 |
-Add the STIBP bit if supported and use the mask in x86_virt_spec_ctrl() to |
|
| 18 |
-sanitize the value which is supplied by the guest. |
|
| 19 |
- |
|
| 20 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 21 |
-Reviewed-by: Borislav Petkov <bp@suse.de> |
|
| 22 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 23 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 24 |
- arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++------- |
|
| 25 |
- 1 file changed, 19 insertions(+), 7 deletions(-) |
|
| 26 |
- |
|
| 27 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 28 |
-index 82a99d0..2ae3586 100644 |
|
| 29 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 30 |
-@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); |
|
| 31 |
- * The vendor and possibly platform specific bits which can be modified in |
|
| 32 |
- * x86_spec_ctrl_base. |
|
| 33 |
- */ |
|
| 34 |
--static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; |
|
| 35 |
-+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; |
|
| 36 |
- |
|
| 37 |
- /* |
|
| 38 |
- * AMD specific MSR info for Speculative Store Bypass control. |
|
| 39 |
-@@ -67,6 +67,10 @@ void __init check_bugs(void) |
|
| 40 |
- if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
|
| 41 |
- rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 42 |
- |
|
| 43 |
-+ /* Allow STIBP in MSR_SPEC_CTRL if supported */ |
|
| 44 |
-+ if (boot_cpu_has(X86_FEATURE_STIBP)) |
|
| 45 |
-+ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; |
|
| 46 |
-+ |
|
| 47 |
- /* Select the proper spectre mitigation before patching alternatives */ |
|
| 48 |
- spectre_v2_select_mitigation(); |
|
| 49 |
- |
|
| 50 |
-@@ -135,18 +139,26 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
|
| 51 |
- void |
|
| 52 |
- x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
|
| 53 |
- {
|
|
| 54 |
-+ u64 msrval, guestval, hostval = x86_spec_ctrl_base; |
|
| 55 |
- struct thread_info *ti = current_thread_info(); |
|
| 56 |
-- u64 msr, host = x86_spec_ctrl_base; |
|
| 57 |
- |
|
| 58 |
- /* Is MSR_SPEC_CTRL implemented ? */ |
|
| 59 |
- if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
|
| 60 |
-+ /* |
|
| 61 |
-+ * Restrict guest_spec_ctrl to supported values. Clear the |
|
| 62 |
-+ * modifiable bits in the host base value and or the |
|
| 63 |
-+ * modifiable bits from the guest value. |
|
| 64 |
-+ */ |
|
| 65 |
-+ guestval = hostval & ~x86_spec_ctrl_mask; |
|
| 66 |
-+ guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; |
|
| 67 |
-+ |
|
| 68 |
- /* SSBD controlled in MSR_SPEC_CTRL */ |
|
| 69 |
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 70 |
-- host |= ssbd_tif_to_spec_ctrl(ti->flags); |
|
| 71 |
-+ hostval |= ssbd_tif_to_spec_ctrl(ti->flags); |
|
| 72 |
- |
|
| 73 |
-- if (host != guest_spec_ctrl) {
|
|
| 74 |
-- msr = setguest ? guest_spec_ctrl : host; |
|
| 75 |
-- wrmsrl(MSR_IA32_SPEC_CTRL, msr); |
|
| 76 |
-+ if (hostval != guestval) {
|
|
| 77 |
-+ msrval = setguest ? guestval : hostval; |
|
| 78 |
-+ wrmsrl(MSR_IA32_SPEC_CTRL, msrval); |
|
| 79 |
- } |
|
| 80 |
- } |
|
| 81 |
- } |
|
| 82 |
-@@ -492,7 +504,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) |
|
| 83 |
- switch (boot_cpu_data.x86_vendor) {
|
|
| 84 |
- case X86_VENDOR_INTEL: |
|
| 85 |
- x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
|
| 86 |
-- x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD; |
|
| 87 |
-+ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; |
|
| 88 |
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
|
| 89 |
- break; |
|
| 90 |
- case X86_VENDOR_AMD: |
|
| 91 |
-2.7.4 |
|
| 92 |
- |
| 93 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,84 +0,0 @@ |
| 1 |
-From ea1f82069f5733039a4a535c3f8260c61b0ef793 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Thomas Gleixner <tglx@linutronix.de> |
|
| 3 |
-Date: Thu, 10 May 2018 20:42:48 +0200 |
|
| 4 |
-Subject: [PATCH 51/54] x86/speculation, KVM: Implement support for |
|
| 5 |
- VIRT_SPEC_CTRL/LS_CFG |
|
| 6 |
- |
|
| 7 |
-commit 47c61b3955cf712cadfc25635bf9bc174af030ea upstream |
|
| 8 |
- |
|
| 9 |
-Add the necessary logic for supporting the emulated VIRT_SPEC_CTRL MSR to |
|
| 10 |
-x86_virt_spec_ctrl(). If either X86_FEATURE_LS_CFG_SSBD or |
|
| 11 |
-X86_FEATURE_VIRT_SPEC_CTRL is set then use the new guest_virt_spec_ctrl |
|
| 12 |
-argument to check whether the state must be modified on the host. The |
|
| 13 |
-update reuses speculative_store_bypass_update() so the ZEN-specific sibling |
|
| 14 |
-coordination can be reused. |
|
| 15 |
- |
|
| 16 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 17 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 18 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 19 |
- arch/x86/include/asm/spec-ctrl.h | 6 ++++++ |
|
| 20 |
- arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++++ |
|
| 21 |
- 2 files changed, 36 insertions(+) |
|
| 22 |
- |
|
| 23 |
-diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h |
|
| 24 |
-index 763d497..ae7c2c5 100644 |
|
| 25 |
-+++ b/arch/x86/include/asm/spec-ctrl.h |
|
| 26 |
-@@ -53,6 +53,12 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) |
|
| 27 |
- return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); |
|
| 28 |
- } |
|
| 29 |
- |
|
| 30 |
-+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) |
|
| 31 |
-+{
|
|
| 32 |
-+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); |
|
| 33 |
-+ return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); |
|
| 34 |
-+} |
|
| 35 |
-+ |
|
| 36 |
- static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) |
|
| 37 |
- {
|
|
| 38 |
- return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; |
|
| 39 |
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
|
| 40 |
-index 2ae3586..86af9b1 100644 |
|
| 41 |
-+++ b/arch/x86/kernel/cpu/bugs.c |
|
| 42 |
-@@ -161,6 +161,36 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
|
| 43 |
- wrmsrl(MSR_IA32_SPEC_CTRL, msrval); |
|
| 44 |
- } |
|
| 45 |
- } |
|
| 46 |
-+ |
|
| 47 |
-+ /* |
|
| 48 |
-+ * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update |
|
| 49 |
-+ * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. |
|
| 50 |
-+ */ |
|
| 51 |
-+ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && |
|
| 52 |
-+ !static_cpu_has(X86_FEATURE_VIRT_SSBD)) |
|
| 53 |
-+ return; |
|
| 54 |
-+ |
|
| 55 |
-+ /* |
|
| 56 |
-+ * If the host has SSBD mitigation enabled, force it in the host's |
|
| 57 |
-+ * virtual MSR value. If its not permanently enabled, evaluate |
|
| 58 |
-+ * current's TIF_SSBD thread flag. |
|
| 59 |
-+ */ |
|
| 60 |
-+ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) |
|
| 61 |
-+ hostval = SPEC_CTRL_SSBD; |
|
| 62 |
-+ else |
|
| 63 |
-+ hostval = ssbd_tif_to_spec_ctrl(ti->flags); |
|
| 64 |
-+ |
|
| 65 |
-+ /* Sanitize the guest value */ |
|
| 66 |
-+ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; |
|
| 67 |
-+ |
|
| 68 |
-+ if (hostval != guestval) {
|
|
| 69 |
-+ unsigned long tif; |
|
| 70 |
-+ |
|
| 71 |
-+ tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : |
|
| 72 |
-+ ssbd_spec_ctrl_to_tif(hostval); |
|
| 73 |
-+ |
|
| 74 |
-+ speculative_store_bypass_update(tif); |
|
| 75 |
-+ } |
|
| 76 |
- } |
|
| 77 |
- EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); |
|
| 78 |
- |
|
| 79 |
-2.7.4 |
|
| 80 |
- |
| 81 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,241 +0,0 @@ |
| 1 |
-From 2bf8ebaf99e44d838272a02b32501511d716cb64 Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Tom Lendacky <thomas.lendacky@amd.com> |
|
| 3 |
-Date: Thu, 10 May 2018 22:06:39 +0200 |
|
| 4 |
-Subject: [PATCH 52/54] KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD |
|
| 5 |
- |
|
| 6 |
-commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream |
|
| 7 |
- |
|
| 8 |
-Expose the new virtualized architectural mechanism, VIRT_SSBD, for using |
|
| 9 |
-speculative store bypass disable (SSBD) under SVM. This will allow guests |
|
| 10 |
-to use SSBD on hardware that uses non-architectural mechanisms for enabling |
|
| 11 |
-SSBD. |
|
| 12 |
- |
|
| 13 |
-[ tglx: Folded the migration fixup from Paolo Bonzini ] |
|
| 14 |
- |
|
| 15 |
-Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> |
|
| 16 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 17 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 18 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 19 |
- arch/x86/include/asm/kvm_host.h | 2 +- |
|
| 20 |
- arch/x86/kernel/cpu/common.c | 3 ++- |
|
| 21 |
- arch/x86/kvm/cpuid.c | 11 +++++++++-- |
|
| 22 |
- arch/x86/kvm/cpuid.h | 9 +++++++++ |
|
| 23 |
- arch/x86/kvm/svm.c | 21 +++++++++++++++++++-- |
|
| 24 |
- arch/x86/kvm/vmx.c | 18 +++++++++++++++--- |
|
| 25 |
- arch/x86/kvm/x86.c | 13 ++++--------- |
|
| 26 |
- 7 files changed, 59 insertions(+), 18 deletions(-) |
|
| 27 |
- |
|
| 28 |
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
|
| 29 |
-index 20cfeeb..7598a6c 100644 |
|
| 30 |
-+++ b/arch/x86/include/asm/kvm_host.h |
|
| 31 |
-@@ -864,7 +864,7 @@ struct kvm_x86_ops {
|
|
| 32 |
- int (*hardware_setup)(void); /* __init */ |
|
| 33 |
- void (*hardware_unsetup)(void); /* __exit */ |
|
| 34 |
- bool (*cpu_has_accelerated_tpr)(void); |
|
| 35 |
-- bool (*cpu_has_high_real_mode_segbase)(void); |
|
| 36 |
-+ bool (*has_emulated_msr)(int index); |
|
| 37 |
- void (*cpuid_update)(struct kvm_vcpu *vcpu); |
|
| 38 |
- |
|
| 39 |
- int (*vm_init)(struct kvm *kvm); |
|
| 40 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 41 |
-index 945e841..40fc748 100644 |
|
| 42 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 43 |
-@@ -735,7 +735,8 @@ static void init_speculation_control(struct cpuinfo_x86 *c) |
|
| 44 |
- if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) |
|
| 45 |
- set_cpu_cap(c, X86_FEATURE_STIBP); |
|
| 46 |
- |
|
| 47 |
-- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD)) |
|
| 48 |
-+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || |
|
| 49 |
-+ cpu_has(c, X86_FEATURE_VIRT_SSBD)) |
|
| 50 |
- set_cpu_cap(c, X86_FEATURE_SSBD); |
|
| 51 |
- |
|
| 52 |
- if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
|
| 53 |
-diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c |
|
| 54 |
-index 910c2db..a69f18d 100644 |
|
| 55 |
-+++ b/arch/x86/kvm/cpuid.c |
|
| 56 |
-@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
|
| 57 |
- |
|
| 58 |
- /* cpuid 0x80000008.ebx */ |
|
| 59 |
- const u32 kvm_cpuid_8000_0008_ebx_x86_features = |
|
| 60 |
-- F(AMD_IBPB) | F(AMD_IBRS); |
|
| 61 |
-+ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD); |
|
| 62 |
- |
|
| 63 |
- /* cpuid 0xC0000001.edx */ |
|
| 64 |
- const u32 kvm_cpuid_C000_0001_edx_x86_features = |
|
| 65 |
-@@ -618,13 +618,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
|
| 66 |
- g_phys_as = phys_as; |
|
| 67 |
- entry->eax = g_phys_as | (virt_as << 8); |
|
| 68 |
- entry->edx = 0; |
|
| 69 |
-- /* IBRS and IBPB aren't necessarily present in hardware cpuid */ |
|
| 70 |
-+ /* |
|
| 71 |
-+ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in |
|
| 72 |
-+ * hardware cpuid |
|
| 73 |
-+ */ |
|
| 74 |
- if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) |
|
| 75 |
- entry->ebx |= F(AMD_IBPB); |
|
| 76 |
- if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) |
|
| 77 |
- entry->ebx |= F(AMD_IBRS); |
|
| 78 |
-+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) |
|
| 79 |
-+ entry->ebx |= F(VIRT_SSBD); |
|
| 80 |
- entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; |
|
| 81 |
- cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); |
|
| 82 |
-+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
|
| 83 |
-+ entry->ebx |= F(VIRT_SSBD); |
|
| 84 |
- break; |
|
| 85 |
- } |
|
| 86 |
- case 0x80000019: |
|
| 87 |
-diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h |
|
| 88 |
-index eb47c37..c383697 100644 |
|
| 89 |
-+++ b/arch/x86/kvm/cpuid.h |
|
| 90 |
-@@ -190,6 +190,15 @@ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu) |
|
| 91 |
- return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES)); |
|
| 92 |
- } |
|
| 93 |
- |
|
| 94 |
-+static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu) |
|
| 95 |
-+{
|
|
| 96 |
-+ struct kvm_cpuid_entry2 *best; |
|
| 97 |
-+ |
|
| 98 |
-+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); |
|
| 99 |
-+ return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD)); |
|
| 100 |
-+} |
|
| 101 |
-+ |
|
| 102 |
-+ |
|
| 103 |
- |
|
| 104 |
- /* |
|
| 105 |
- * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3 |
|
| 106 |
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
|
| 107 |
-index 57c96f1..a27f9e4 100644 |
|
| 108 |
-+++ b/arch/x86/kvm/svm.c |
|
| 109 |
-@@ -3557,6 +3557,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
|
| 110 |
- |
|
| 111 |
- msr_info->data = svm->spec_ctrl; |
|
| 112 |
- break; |
|
| 113 |
-+ case MSR_AMD64_VIRT_SPEC_CTRL: |
|
| 114 |
-+ if (!msr_info->host_initiated && |
|
| 115 |
-+ !guest_cpuid_has_virt_ssbd(vcpu)) |
|
| 116 |
-+ return 1; |
|
| 117 |
-+ |
|
| 118 |
-+ msr_info->data = svm->virt_spec_ctrl; |
|
| 119 |
-+ break; |
|
| 120 |
- case MSR_IA32_UCODE_REV: |
|
| 121 |
- msr_info->data = 0x01000065; |
|
| 122 |
- break; |
|
| 123 |
-@@ -3691,6 +3698,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
|
| 124 |
- break; |
|
| 125 |
- set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); |
|
| 126 |
- break; |
|
| 127 |
-+ case MSR_AMD64_VIRT_SPEC_CTRL: |
|
| 128 |
-+ if (!msr->host_initiated && |
|
| 129 |
-+ !guest_cpuid_has_virt_ssbd(vcpu)) |
|
| 130 |
-+ return 1; |
|
| 131 |
-+ |
|
| 132 |
-+ if (data & ~SPEC_CTRL_SSBD) |
|
| 133 |
-+ return 1; |
|
| 134 |
-+ |
|
| 135 |
-+ svm->virt_spec_ctrl = data; |
|
| 136 |
-+ break; |
|
| 137 |
- case MSR_STAR: |
|
| 138 |
- svm->vmcb->save.star = data; |
|
| 139 |
- break; |
|
| 140 |
-@@ -5150,7 +5167,7 @@ static bool svm_cpu_has_accelerated_tpr(void) |
|
| 141 |
- return false; |
|
| 142 |
- } |
|
| 143 |
- |
|
| 144 |
--static bool svm_has_high_real_mode_segbase(void) |
|
| 145 |
-+static bool svm_has_emulated_msr(int index) |
|
| 146 |
- {
|
|
| 147 |
- return true; |
|
| 148 |
- } |
|
| 149 |
-@@ -5467,7 +5484,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|
| 150 |
- .hardware_enable = svm_hardware_enable, |
|
| 151 |
- .hardware_disable = svm_hardware_disable, |
|
| 152 |
- .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, |
|
| 153 |
-- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, |
|
| 154 |
-+ .has_emulated_msr = svm_has_emulated_msr, |
|
| 155 |
- |
|
| 156 |
- .vcpu_create = svm_create_vcpu, |
|
| 157 |
- .vcpu_free = svm_free_vcpu, |
|
| 158 |
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
|
| 159 |
-index f1d158a..d92523a 100644 |
|
| 160 |
-+++ b/arch/x86/kvm/vmx.c |
|
| 161 |
-@@ -8691,9 +8691,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) |
|
| 162 |
- } |
|
| 163 |
- } |
|
| 164 |
- |
|
| 165 |
--static bool vmx_has_high_real_mode_segbase(void) |
|
| 166 |
-+static bool vmx_has_emulated_msr(int index) |
|
| 167 |
- {
|
|
| 168 |
-- return enable_unrestricted_guest || emulate_invalid_guest_state; |
|
| 169 |
-+ switch (index) {
|
|
| 170 |
-+ case MSR_IA32_SMBASE: |
|
| 171 |
-+ /* |
|
| 172 |
-+ * We cannot do SMM unless we can run the guest in big |
|
| 173 |
-+ * real mode. |
|
| 174 |
-+ */ |
|
| 175 |
-+ return enable_unrestricted_guest || emulate_invalid_guest_state; |
|
| 176 |
-+ case MSR_AMD64_VIRT_SPEC_CTRL: |
|
| 177 |
-+ /* This is AMD only. */ |
|
| 178 |
-+ return false; |
|
| 179 |
-+ default: |
|
| 180 |
-+ return true; |
|
| 181 |
-+ } |
|
| 182 |
- } |
|
| 183 |
- |
|
| 184 |
- static bool vmx_mpx_supported(void) |
|
| 185 |
-@@ -11346,7 +11358,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|
| 186 |
- .hardware_enable = hardware_enable, |
|
| 187 |
- .hardware_disable = hardware_disable, |
|
| 188 |
- .cpu_has_accelerated_tpr = report_flexpriority, |
|
| 189 |
-- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, |
|
| 190 |
-+ .has_emulated_msr = vmx_has_emulated_msr, |
|
| 191 |
- |
|
| 192 |
- .vcpu_create = vmx_create_vcpu, |
|
| 193 |
- .vcpu_free = vmx_free_vcpu, |
|
| 194 |
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
|
| 195 |
-index 3aaaf30..a0cb85f 100644 |
|
| 196 |
-+++ b/arch/x86/kvm/x86.c |
|
| 197 |
-@@ -1002,6 +1002,7 @@ static u32 emulated_msrs[] = {
|
|
| 198 |
- MSR_IA32_MCG_CTL, |
|
| 199 |
- MSR_IA32_MCG_EXT_CTL, |
|
| 200 |
- MSR_IA32_SMBASE, |
|
| 201 |
-+ MSR_AMD64_VIRT_SPEC_CTRL, |
|
| 202 |
- }; |
|
| 203 |
- |
|
| 204 |
- static unsigned num_emulated_msrs; |
|
| 205 |
-@@ -2664,7 +2665,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
|
| 206 |
- * fringe case that is not enabled except via specific settings |
|
| 207 |
- * of the module parameters. |
|
| 208 |
- */ |
|
| 209 |
-- r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); |
|
| 210 |
-+ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE); |
|
| 211 |
- break; |
|
| 212 |
- case KVM_CAP_COALESCED_MMIO: |
|
| 213 |
- r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
|
| 214 |
-@@ -4226,14 +4227,8 @@ static void kvm_init_msr_list(void) |
|
| 215 |
- num_msrs_to_save = j; |
|
| 216 |
- |
|
| 217 |
- for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
|
|
| 218 |
-- switch (emulated_msrs[i]) {
|
|
| 219 |
-- case MSR_IA32_SMBASE: |
|
| 220 |
-- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase()) |
|
| 221 |
-- continue; |
|
| 222 |
-- break; |
|
| 223 |
-- default: |
|
| 224 |
-- break; |
|
| 225 |
-- } |
|
| 226 |
-+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i])) |
|
| 227 |
-+ continue; |
|
| 228 |
- |
|
| 229 |
- if (j < i) |
|
| 230 |
- emulated_msrs[j] = emulated_msrs[i]; |
|
| 231 |
-2.7.4 |
|
| 232 |
- |
| 233 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,48 +0,0 @@ |
| 1 |
-From 3c8411083c45a3eb9f9b9b381ed553d9fea7e05b Mon Sep 17 00:00:00 2001 |
|
| 2 |
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 3 |
-Date: Wed, 16 May 2018 23:18:09 -0400 |
|
| 4 |
-Subject: [PATCH 53/54] x86/bugs: Rename SSBD_NO to SSB_NO |
|
| 5 |
- |
|
| 6 |
-commit 240da953fcc6a9008c92fae5b1f727ee5ed167ab upstream |
|
| 7 |
- |
|
| 8 |
-The "336996 Speculative Execution Side Channel Mitigations" from |
|
| 9 |
-May defines this as SSB_NO, hence lets sync-up. |
|
| 10 |
- |
|
| 11 |
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
|
| 12 |
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
|
| 13 |
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
|
| 14 |
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
|
| 15 |
- arch/x86/include/asm/msr-index.h | 2 +- |
|
| 16 |
- arch/x86/kernel/cpu/common.c | 2 +- |
|
| 17 |
- 2 files changed, 2 insertions(+), 2 deletions(-) |
|
| 18 |
- |
|
| 19 |
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
|
| 20 |
-index 9fd9dcf..1ec13e2 100644 |
|
| 21 |
-+++ b/arch/x86/include/asm/msr-index.h |
|
| 22 |
-@@ -63,7 +63,7 @@ |
|
| 23 |
- #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a |
|
| 24 |
- #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ |
|
| 25 |
- #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ |
|
| 26 |
--#define ARCH_CAP_SSBD_NO (1 << 4) /* |
|
| 27 |
-+#define ARCH_CAP_SSB_NO (1 << 4) /* |
|
| 28 |
- * Not susceptible to Speculative Store Bypass |
|
| 29 |
- * attack, so no Speculative Store Bypass |
|
| 30 |
- * control required. |
|
| 31 |
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
|
| 32 |
-index 40fc748..b0fd028 100644 |
|
| 33 |
-+++ b/arch/x86/kernel/cpu/common.c |
|
| 34 |
-@@ -926,7 +926,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
|
| 35 |
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
|
| 36 |
- |
|
| 37 |
- if (!x86_match_cpu(cpu_no_spec_store_bypass) && |
|
| 38 |
-- !(ia32_cap & ARCH_CAP_SSBD_NO)) |
|
| 39 |
-+ !(ia32_cap & ARCH_CAP_SSB_NO)) |
|
| 40 |
- setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
|
| 41 |
- |
|
| 42 |
- if (x86_match_cpu(cpu_no_speculation)) |
|
| 43 |
-2.7.4 |
|
| 44 |
- |