aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilliam Pitcock <nenolod@dereferenced.org>2013-09-08 13:15:46 +0000
committerWilliam Pitcock <nenolod@dereferenced.org>2013-09-08 13:34:53 +0000
commita17a579a91bee5f66a6f3ff0d782a5c23133c740 (patch)
tree09958260ae24e094f1159fe14ddd0fdd0a0846db
parent86591e0d45277b720435fca5babaa5d21c507b9c (diff)
downloadalpine_aports-a17a579a91bee5f66a6f3ff0d782a5c23133c740.tar.bz2
alpine_aports-a17a579a91bee5f66a6f3ff0d782a5c23133c740.tar.xz
alpine_aports-a17a579a91bee5f66a6f3ff0d782a5c23133c740.zip
main/xen: initial rebase of patches, fixups needed
-rw-r--r--main/xen/APKBUILD61
-rw-r--r--main/xen/fix-pod2man-choking.patch46
-rw-r--r--main/xen/qemu-xen-websocket.patch225
-rw-r--r--main/xen/qemu_uclibc_configure.patch48
-rw-r--r--main/xen/xsa45-4.2.patch1133
-rw-r--r--main/xen/xsa52-4.2-unstable.patch46
-rw-r--r--main/xen/xsa53-4.2.patch57
-rw-r--r--main/xen/xsa54.patch24
-rw-r--r--main/xen/xsa55.patch3431
-rw-r--r--main/xen/xsa56.patch50
-rw-r--r--main/xen/xsa57.patch333
-rw-r--r--main/xen/xsa58-4.2.patch129
12 files changed, 140 insertions, 5443 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index 3cb426b925..def6d7968e 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -2,8 +2,8 @@
2# Contributor: Roger Pau Monne <roger.pau@entel.upc.edu> 2# Contributor: Roger Pau Monne <roger.pau@entel.upc.edu>
3# Maintainer: William Pitcock <nenolod@dereferenced.org> 3# Maintainer: William Pitcock <nenolod@dereferenced.org>
4pkgname=xen 4pkgname=xen
5pkgver=4.2.2 5pkgver=4.3.0
6pkgrel=11 6pkgrel=0
7pkgdesc="Xen hypervisor" 7pkgdesc="Xen hypervisor"
8url="http://www.xen.org/" 8url="http://www.xen.org/"
9arch="x86 x86_64" 9arch="x86 x86_64"
@@ -16,7 +16,6 @@ makedepends="$depends_dev"
16install="" 16install=""
17subpackages="$pkgname-doc $pkgname-dev $pkgname-libs $pkgname-hypervisor $pkgname-xend" 17subpackages="$pkgname-doc $pkgname-dev $pkgname-libs $pkgname-hypervisor $pkgname-xend"
18source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.gz 18source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.gz
19 qemu_uclibc_configure.patch
20 librt.patch 19 librt.patch
21 qemu-xen_paths.patch 20 qemu-xen_paths.patch
22 docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch 21 docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch
@@ -24,17 +23,10 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g
24 xsa41.patch 23 xsa41.patch
25 xsa41b.patch 24 xsa41b.patch
26 xsa41c.patch 25 xsa41c.patch
27 xsa45-4.2.patch
28 xsa48-4.2.patch 26 xsa48-4.2.patch
29 xsa52-4.2-unstable.patch
30 xsa53-4.2.patch
31 xsa54.patch
32 xsa55.patch
33 xsa56.patch
34 xsa57.patch
35 xsa58-4.2.patch
36 27
37 fix-pod2man-choking.patch 28 fix-pod2man-choking.patch
29
38 qemu-xen-websocket.patch 30 qemu-xen-websocket.patch
39 qemu-xen-tls-websockets.patch 31 qemu-xen-tls-websockets.patch
40 32
@@ -76,7 +68,7 @@ build() {
76 cd "$_builddir" 68 cd "$_builddir"
77 69
78 msg "Running configure..." 70 msg "Running configure..."
79 ./configure \ 71 ./configure --prefix=/usr \
80 --build=$CBUILD \ 72 --build=$CBUILD \
81 --host=$CHOST \ 73 --host=$CHOST \
82 || return 1 74 || return 1
@@ -153,25 +145,16 @@ xend() {
153 -exec mv '{}' "$subpkgdir"/"$sitepackages"/xen \; 145 -exec mv '{}' "$subpkgdir"/"$sitepackages"/xen \;
154} 146}
155 147
156md5sums="f7362b19401a47826f2d8fd603a1782a xen-4.2.2.tar.gz 148md5sums="7b18cfb58f1ac2ce39cf35a1867f0c0a xen-4.3.0.tar.gz
157506e7ab6f9482dc95f230978d340bcd9 qemu_uclibc_configure.patch
1582dc5ddf47c53ea168729975046c3c1f9 librt.patch 1492dc5ddf47c53ea168729975046c3c1f9 librt.patch
1591ccde6b36a6f9542a16d998204dc9a22 qemu-xen_paths.patch 1501ccde6b36a6f9542a16d998204dc9a22 qemu-xen_paths.patch
1606dcff640268d514fa9164b4c812cc52d docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch 1516dcff640268d514fa9164b4c812cc52d docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch
1618ad8942000b8a4be4917599cad9209cf xsa41.patch 1528ad8942000b8a4be4917599cad9209cf xsa41.patch
162ed7d0399c6ca6aeee479da5d8f807fe0 xsa41b.patch 153ed7d0399c6ca6aeee479da5d8f807fe0 xsa41b.patch
1632f3dd7bdc59d104370066d6582725575 xsa41c.patch 1542f3dd7bdc59d104370066d6582725575 xsa41c.patch
1649265540493f41f7d40c48d0886ec5823 xsa45-4.2.patch
165b3e3a57d189a4f86c9766eaf3b5207f4 xsa48-4.2.patch 155b3e3a57d189a4f86c9766eaf3b5207f4 xsa48-4.2.patch
16683a9cdd035bcd18bf035434a1ba08c38 xsa52-4.2-unstable.patch 1562651e5c42934bc5d37bae1fdeb7aa0c4 fix-pod2man-choking.patch
16703a1a4ebc470ee7e638e04db2701a4f7 xsa53-4.2.patch 1572b5e6af003ca8246701a3673d2d2292f qemu-xen-websocket.patch
168a8393d1ec6b886ea72ffe624a04ee10a xsa54.patch
16942cd104f2a33d67938a63a6372cff573 xsa55.patch
170e70b9128ffc2175cea314a533a7d8457 xsa56.patch
1717475158130474ee062a4eb878259af61 xsa57.patch
1727de2cd11c10d6a554f3c81e0688c38b7 xsa58-4.2.patch
173c1d1a415415b0192e5dae9032962bf61 fix-pod2man-choking.patch
174af5c5e21e68ae27847e2307815c82f98 qemu-xen-websocket.patch
17535bdea1d4e3ae2565edc7e40906efdd5 qemu-xen-tls-websockets.patch 15835bdea1d4e3ae2565edc7e40906efdd5 qemu-xen-tls-websockets.patch
176a90c36642f0701a8aaa4ebe4dde430f5 xenstored.initd 159a90c36642f0701a8aaa4ebe4dde430f5 xenstored.initd
177b017ccdd5e1c27bbf1513e3569d4ff07 xenstored.confd 160b017ccdd5e1c27bbf1513e3569d4ff07 xenstored.confd
@@ -184,25 +167,16 @@ fa8c72b42e0479d521a353386d8543ef xendomains.initd
1849df68ac65dc3f372f5d61183abdc83ff xen-consoles.logrotate 1679df68ac65dc3f372f5d61183abdc83ff xen-consoles.logrotate
1856a2f777c16678d84039acf670d86fff6 xenqemu.confd 1686a2f777c16678d84039acf670d86fff6 xenqemu.confd
186f9afbf39e2b5a7d9dde60ebbd249ea7d xenqemu.initd" 169f9afbf39e2b5a7d9dde60ebbd249ea7d xenqemu.initd"
187sha256sums="c9bfe91a5e72f8545acebad9889d64368020359bfe18044c0e683133e55ae005 xen-4.2.2.tar.gz 170sha256sums="e1e9faabe4886e2227aacdbde74410653b233d66642ca1972a860cbec6439961 xen-4.3.0.tar.gz
1884fb92fa1ce67eb3f78a15c6c971415d4d53599904969596acc7a52edc83a5fee qemu_uclibc_configure.patch
18912bf32f9937b09283f2df4955b50d6739768f66137a7d991f661f45cf77cb53b librt.patch 17112bf32f9937b09283f2df4955b50d6739768f66137a7d991f661f45cf77cb53b librt.patch
1909440ca31a6911201f02694e93faafb5ca9b17de18b7f15b53ceac39a03411b4a qemu-xen_paths.patch 1729440ca31a6911201f02694e93faafb5ca9b17de18b7f15b53ceac39a03411b4a qemu-xen_paths.patch
191a0c225d716d343fe041b63e3940900c5b3573ed3bcfc5b7c2d52ea2861c3fc28 docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch 173a0c225d716d343fe041b63e3940900c5b3573ed3bcfc5b7c2d52ea2861c3fc28 docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch
19293452beba88a8da8e89b8bfa743074a358ba1d9052151c608e21c4d62f8c4867 xsa41.patch 17493452beba88a8da8e89b8bfa743074a358ba1d9052151c608e21c4d62f8c4867 xsa41.patch
193896a07f57310c9bea9bc2a305166cf796282c381cb7839be49105b1726a860b5 xsa41b.patch 175896a07f57310c9bea9bc2a305166cf796282c381cb7839be49105b1726a860b5 xsa41b.patch
194683dd96a0a8899f794070c8c09643dfeeb39f92da531955cba961b45f6075914 xsa41c.patch 176683dd96a0a8899f794070c8c09643dfeeb39f92da531955cba961b45f6075914 xsa41c.patch
195f3c8c75cc6f55409139b1928017d1e432e5e64b6fac2083395f4723353e1c775 xsa45-4.2.patch
196dc23077028584e71a08dd0dc9e81552c76744a5ce9d39df5958a95ae9cf3107b xsa48-4.2.patch 177dc23077028584e71a08dd0dc9e81552c76744a5ce9d39df5958a95ae9cf3107b xsa48-4.2.patch
1975b8582185bf90386729e81db1f7780c69a891b074a87d9a619a90d6f639bea13 xsa52-4.2-unstable.patch 178b58bc9c048d05856f1a7571c60afd0bab72493cc6aa402c15a5f5dc85483fa7e fix-pod2man-choking.patch
198785f7612bd229f7501f4e98e4760f307d90c64305ee14707d262b77f05fa683d xsa53-4.2.patch 1790ed31a9f92662c38ac6ec75280387304816703441fcc4721003f9c2347507893 qemu-xen-websocket.patch
1995d94946b3c9cba52aae2bffd4b0ebb11d09181650b5322a3c85170674a05f6b7 xsa54.patch
200ac3ebaf3ec37e28ba08e23d63626d7aaccf0a3f282dd0af9c24cc4df3fd8fae0 xsa55.patch
201a691c5f5332a42c0d38ddb4dc037eb902f01ba31033b64c47d02909a8de0257d xsa56.patch
202b6a5106848541972519cc529859d9ff3083c79367276c7031560fa4ce6f9f770 xsa57.patch
203194d6610fc38b767d643e5d58a1268f45921fb35e309b47aca6a388b861311c2 xsa58-4.2.patch
204b4e7d43364a06b2cb04527db3e9567524bc489fef475709fd8493ebf1e62406d fix-pod2man-choking.patch
205cc4bf76be2c87ba089f9e330f3f18419a8399920319e04f6a97be463ce1bfa1e qemu-xen-websocket.patch
206435dd428d83acdfde58888532a1cece1e9075b2a2460fe3f6cd33c7d400f2715 qemu-xen-tls-websockets.patch 180435dd428d83acdfde58888532a1cece1e9075b2a2460fe3f6cd33c7d400f2715 qemu-xen-tls-websockets.patch
207868c77d689ae54b7041da169bfaa01868503337d4105a071eb771f4ec5a0543d xenstored.initd 181868c77d689ae54b7041da169bfaa01868503337d4105a071eb771f4ec5a0543d xenstored.initd
208ea9171e71ab3d33061979bcf3bb737156192aa4b0be4d1234438ced75b6fdef3 xenstored.confd 182ea9171e71ab3d33061979bcf3bb737156192aa4b0be4d1234438ced75b6fdef3 xenstored.confd
@@ -215,25 +189,16 @@ a50a4485e84bcc098ad021556cd2aa7947c228f0a546ab942e880787ced57be3 xend.initd
2150da87a4b9094f934e3de937e8ef8d3afc752e76793aa3d730182d0241e118b19 xen-consoles.logrotate 1890da87a4b9094f934e3de937e8ef8d3afc752e76793aa3d730182d0241e118b19 xen-consoles.logrotate
2164cfcddcade5d055422ab4543e8caa6e5c5eee7625c41880a9000b7a87c7c424e xenqemu.confd 1904cfcddcade5d055422ab4543e8caa6e5c5eee7625c41880a9000b7a87c7c424e xenqemu.confd
217bf17808a79c57a9efc38b9f14cc87f556b2bb7ecfdec5763d9cf686255a47fce xenqemu.initd" 191bf17808a79c57a9efc38b9f14cc87f556b2bb7ecfdec5763d9cf686255a47fce xenqemu.initd"
218sha512sums="4943b18016ed8c2b194a3b55e6655b3b734b39ffb8cb7ee0a0580f2f4460a1d0e92e1de8ac23f5186272914fad1650586af51fd7c3644d0310eb16f2e11c5e80 xen-4.2.2.tar.gz 192sha512sums="e6b8f64e15e48704ea5cee5585cd6151fe6a5a62bc4670caf0b762c1aa71c9598db236c637ac34c42c92c6e8a5001acdd3d9d4b9305401a26273279358f481d6 xen-4.3.0.tar.gz
21981a5555c123daad6a9a1835186a82d604e68d833efe3a6576a88717268e5335f809a6621846645c2e1eb1d33a51951a6306e4c393a76c677959149bc28a886be qemu_uclibc_configure.patch
22074e3cfc51e367fc445cb3d8149f0c8830e94719a266daf04d2cd0889864591860c4c8842de2bc78070e4c5be7d14dfbb8b236c511d5faeddc2ad97177c1d3764 librt.patch 19374e3cfc51e367fc445cb3d8149f0c8830e94719a266daf04d2cd0889864591860c4c8842de2bc78070e4c5be7d14dfbb8b236c511d5faeddc2ad97177c1d3764 librt.patch
221425149aea57a6deae9f488cea867f125983998dc6e8c63893fb3b9caf0ea34214251dd98ad74db823f5168631c44c49b988b6fe9c11b76bd493ddf51bc0baaa2 qemu-xen_paths.patch 194425149aea57a6deae9f488cea867f125983998dc6e8c63893fb3b9caf0ea34214251dd98ad74db823f5168631c44c49b988b6fe9c11b76bd493ddf51bc0baaa2 qemu-xen_paths.patch
222477d3d08bd4fcdfbc54abea1a18acb6a41d298c366cd01c954f474515cb862d0dd59217c0dfca5460a725a8bc036de42132f522c3eefdffcc4fd511f016b783f docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch 195477d3d08bd4fcdfbc54abea1a18acb6a41d298c366cd01c954f474515cb862d0dd59217c0dfca5460a725a8bc036de42132f522c3eefdffcc4fd511f016b783f docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch
22394672a4d37db4e370370157cac9507ee1a75832f4be779fba148c1faa0b18f26ed57126eee6256ccd5d218463325a730266b53139554f4865adedb7659154c16 xsa41.patch 19694672a4d37db4e370370157cac9507ee1a75832f4be779fba148c1faa0b18f26ed57126eee6256ccd5d218463325a730266b53139554f4865adedb7659154c16 xsa41.patch
224bda9105793f2327e1317991762120d0668af0e964076b18c9fdbfd509984b2e88d85df95702c46b2e00d5350e8113f6aa7b34b19064d19abbeb4d43f0c431d38 xsa41b.patch 197bda9105793f2327e1317991762120d0668af0e964076b18c9fdbfd509984b2e88d85df95702c46b2e00d5350e8113f6aa7b34b19064d19abbeb4d43f0c431d38 xsa41b.patch
22536b60478660ff7748328f5ab9adff13286eee1a1bad06e42fdf7e6aafe105103988525725aacd660cf5b2a184a9e2d6b3818655203c1fa07e07dcebdf23f35d9 xsa41c.patch 19836b60478660ff7748328f5ab9adff13286eee1a1bad06e42fdf7e6aafe105103988525725aacd660cf5b2a184a9e2d6b3818655203c1fa07e07dcebdf23f35d9 xsa41c.patch
226a57b4c8be76a938d51e51ffb39f0781389ebef320f359b0ae9af4a93af970d37dde50a304d4864a75b7fb32861a4745b9da5fa6acce0f2a688b11b13ab43fb4e xsa45-4.2.patch
22731dd8c62d41cc0a01a79d9b24a5b793f5e2058230808d9c5364c6ff3477ab02f3258f1bbd761d97dc1b97ee120b41524b999eaac77f33b606496fc324b5fa2e4 xsa48-4.2.patch 19931dd8c62d41cc0a01a79d9b24a5b793f5e2058230808d9c5364c6ff3477ab02f3258f1bbd761d97dc1b97ee120b41524b999eaac77f33b606496fc324b5fa2e4 xsa48-4.2.patch
228b64a965fab8534958e453c493211ed3a6555aafb90d18f6d56a45b41d3086a0029aee85b6b6eb93b0d861d5fdc0ef10fc32e9b4f83593b37c43922d838085dd8 xsa52-4.2-unstable.patch 200dd8f016b36719e0f96ce3c9c7098d4047ee2d36e7147f3c35a730253eec2f64100f7702430f5e637727dc7c3a20b1762270ffce2291b2046e08028da14d4c7e6 fix-pod2man-choking.patch
2299b08924e563e79d2b308c1521da520c0579b334b61ac99a5593eabdb96dbda2da898b542cc47bda6d663c68343216d9d29c04853b6d1b6ecdde964b0cbb3f7ab xsa53-4.2.patch 201442b6e6ed771e58226cd541d0ee0e2eafc9442ef77236a86e02d594eaeb1abb725895d44b8ed8a04d64280bd4c61b7ca91b0fdd448647f59ba5d0fdc82f85cc9 qemu-xen-websocket.patch
230c9010be637d4f96ef03c880e1ef28228f762c5980108380a105bd190b631a882c8dff81e9421246d88d597e72f69ad1a8c672be6ddd06936acfcacd4575a2650 xsa54.patch
231b4f43095163146a29ae258575bb03bd45f5a315d3cca7434a0b88c18eb1b6e1cf17ef13b4ac428a08797271a3dbc756d3f705a990991c8d2fc96f0f272c3665a xsa55.patch
23226a1c2cc92ddd4c1ab6712b0e41a0135d0e76a7fe3a14b651fb0235e352e5a24077414371acccb93058b7ce4d882b667386811170ba74570c53165837bcd983d xsa56.patch
2335ccc1654d9f0270485495f9fc913e41663ddbda602ffe049e0a9c3247c6246690b7ec4165482f96921c5253a2a5205ca384048339996e611c07ab60a6a75cf6a xsa57.patch
23460813c01f6bb909da8748919df4d0ffa923baf4b7b55287e0bec3389fb83020158225182e112941c9e126b4df57e7b8724f2a69d0c1fa9ce3b37c0bdf1a49da4 xsa58-4.2.patch
235ffb1113fcec0853b690c177655c7d1136388efdebf0d7f625b80481b98eadd3e9ef461442ced53e11acf0e347800a2b0a41e18b05065b5d04bffdd8a4e127cec fix-pod2man-choking.patch
2365da25a997c69d737b6a43f460d54e34dccf3c94751990969c93e674ab3aaa34ddd41c2b2a7988aaa68a22abf1508705336d9a9ae3637147b0cf9036b9909daf8 qemu-xen-websocket.patch
23711eaccc346440ff285552f204d491e3b31bda1665c3219ecae3061b5d55db9dec885af0c031fa19c67e87bbe238002b1911bbd5bfea2f2ba0d61e6b3d0c952c9 qemu-xen-tls-websockets.patch 20211eaccc346440ff285552f204d491e3b31bda1665c3219ecae3061b5d55db9dec885af0c031fa19c67e87bbe238002b1911bbd5bfea2f2ba0d61e6b3d0c952c9 qemu-xen-tls-websockets.patch
238880584e0866b1efcf3b7a934f07072ec84c13c782e3e7a15848d38ba8af50259d46db037dca1e037b15274989f2c22acd1134954dd60c59f4ee693b417d03e0d xenstored.initd 203880584e0866b1efcf3b7a934f07072ec84c13c782e3e7a15848d38ba8af50259d46db037dca1e037b15274989f2c22acd1134954dd60c59f4ee693b417d03e0d xenstored.initd
239100cf4112f401f45c1e4e885a5074698c484b40521262f6268fad286498e95f4c51e746f0e94eb43a590bb8e813a397bb53801ccacebec9541020799d8d70514 xenstored.confd 204100cf4112f401f45c1e4e885a5074698c484b40521262f6268fad286498e95f4c51e746f0e94eb43a590bb8e813a397bb53801ccacebec9541020799d8d70514 xenstored.confd
diff --git a/main/xen/fix-pod2man-choking.patch b/main/xen/fix-pod2man-choking.patch
index c4a4ac6a25..fba9be9d2c 100644
--- a/main/xen/fix-pod2man-choking.patch
+++ b/main/xen/fix-pod2man-choking.patch
@@ -11,32 +11,6 @@
11 qemu must be told to not have parallel ports to have working GUS 11 qemu must be told to not have parallel ports to have working GUS
12--- xen-4.2.2.orig/docs/man/xl.cfg.pod.5 12--- xen-4.2.2.orig/docs/man/xl.cfg.pod.5
13+++ xen-4.2.2/docs/man/xl.cfg.pod.5 13+++ xen-4.2.2/docs/man/xl.cfg.pod.5
14@@ -402,10 +402,10 @@
15
16 =back
17
18-=item B<ioports=[ "IOPORT_RANGE", "IOPORT_RANGE", ... ]>
19-
20 =over 4
21
22+=item B<ioports=[ "IOPORT_RANGE", "IOPORT_RANGE", ... ]>
23+
24 Allow guest to access specific legacy I/O ports. Each B<IOPORT_RANGE>
25 is given in hexadecimal and may either a span e.g. C<2f8-2ff>
26 (inclusive) or a single I/O port C<2f8>.
27@@ -415,10 +415,10 @@
28
29 =back
30
31-=item B<irqs=[ NUMBER, NUMBER, ... ]>
32-
33 =over 4
34
35+=item B<irqs=[ NUMBER, NUMBER, ... ]>
36+
37 Allow a guest to access specific physical IRQs.
38
39 It is recommended to use this option only for trusted VMs under
40@@ -680,9 +680,11 @@ 14@@ -680,9 +680,11 @@
41 15
42 =back 16 =back
@@ -50,23 +24,3 @@
50 24
51 =item B<localtime=BOOLEAN> 25 =item B<localtime=BOOLEAN>
52 26
53@@ -691,6 +693,8 @@
54 =item B<rtc_timeoffset=SECONDS>
55
56 Set the real time clock offset in seconds. 0 by default.
57+
58+=back
59
60 =head3 Support for Paravirtualisation of HVM Guests
61
62--- xen-4.2.2.orig/docs/man/xl.pod.1
63+++ xen-4.2.2/docs/man/xl.pod.1
64@@ -851,8 +851,6 @@
65
66 =item B<-p [pool] -d>... : Illegal
67
68-=item
69-
70 =back
71
72 =item B<sched-credit2> [I<OPTIONS>]
diff --git a/main/xen/qemu-xen-websocket.patch b/main/xen/qemu-xen-websocket.patch
index 4e54d7e213..02bb805dac 100644
--- a/main/xen/qemu-xen-websocket.patch
+++ b/main/xen/qemu-xen-websocket.patch
@@ -1,36 +1,26 @@
1--- xen-4.2.2.orig/tools/Makefile 1--- xen-4.3.0.orig/tools/Makefile
2+++ xen-4.2.2/tools/Makefile 2+++ xen-4.3.0/tools/Makefile
3@@ -202,6 +202,7 @@ 3@@ -203,6 +203,7 @@
4 --docdir=$(PREFIX)/share/doc \ 4 --docdir=$(PREFIX)/share/doc \
5 --sysconfdir=/etc/qemu \ 5 --sysconfdir=/etc/qemu \
6 --disable-kvm \ 6 --disable-kvm \
7+ --enable-vnc-ws \ 7+ --enable-vnc-ws \
8 --disable-docs \
8 --python=$(PYTHON) \ 9 --python=$(PYTHON) \
9 $(IOEMU_CONFIGURE_CROSS); \ 10 $(IOEMU_CONFIGURE_CROSS); \
10 $(MAKE) all 11--- xen-4.3.0.orig/tools/qemu-xen/configure
11--- xen-4.2.2.orig/tools/qemu-xen/Makefile.objs 12+++ xen-4.3.0/tools/qemu-xen/configure
12+++ xen-4.2.2/tools/qemu-xen/Makefile.objs 13@@ -158,6 +158,7 @@
13@@ -149,6 +149,7 @@
14 vnc-obj-y += vnc-enc-zrle.o
15 vnc-obj-$(CONFIG_VNC_TLS) += vnc-tls.o vnc-auth-vencrypt.o
16 vnc-obj-$(CONFIG_VNC_SASL) += vnc-auth-sasl.o
17+vnc-obj-$(CONFIG_VNC_WS) += vnc-ws.o
18 ifdef CONFIG_VNC_THREAD
19 vnc-obj-y += vnc-jobs-async.o
20 else
21--- xen-4.2.2.orig/tools/qemu-xen/configure
22+++ xen-4.2.2/tools/qemu-xen/configure
23@@ -124,6 +124,7 @@
24 vnc_sasl="" 14 vnc_sasl=""
25 vnc_jpeg="" 15 vnc_jpeg=""
26 vnc_png="" 16 vnc_png=""
27+vnc_ws="" 17+vnc_ws=""
28 vnc_thread="no"
29 xen="" 18 xen=""
30 xen_ctrl_version="" 19 xen_ctrl_version=""
31@@ -638,6 +639,10 @@ 20 xen_pci_passthrough=""
21@@ -703,6 +704,10 @@
32 ;; 22 ;;
33 --enable-vnc-thread) vnc_thread="yes" 23 --enable-vnc-png) vnc_png="yes"
34 ;; 24 ;;
35+ --disable-vnc-ws) vnc_ws="no" 25+ --disable-vnc-ws) vnc_ws="no"
36+ ;; 26+ ;;
@@ -39,16 +29,16 @@
39 --disable-slirp) slirp="no" 29 --disable-slirp) slirp="no"
40 ;; 30 ;;
41 --disable-uuid) uuid="no" 31 --disable-uuid) uuid="no"
42@@ -1008,6 +1013,8 @@ 32@@ -1048,6 +1053,8 @@
33 echo " --enable-vnc-jpeg enable JPEG lossy compression for VNC server"
34 echo " --disable-vnc-png disable PNG compression for VNC server (default)"
43 echo " --enable-vnc-png enable PNG compression for VNC server" 35 echo " --enable-vnc-png enable PNG compression for VNC server"
44 echo " --disable-vnc-thread disable threaded VNC server"
45 echo " --enable-vnc-thread enable threaded VNC server"
46+echo " --disable-vnc-ws disable Websockets support for VNC server" 36+echo " --disable-vnc-ws disable Websockets support for VNC server"
47+echo " --enable-vnc-ws enable Websockets support for VNC server" 37+echo " --enable-vnc-ws enable Websockets support for VNC server"
48 echo " --disable-curses disable curses output" 38 echo " --disable-curses disable curses output"
49 echo " --enable-curses enable curses output" 39 echo " --enable-curses enable curses output"
50 echo " --disable-curl disable curl connectivity" 40 echo " --disable-curl disable curl connectivity"
51@@ -1554,8 +1561,8 @@ 41@@ -1692,8 +1699,8 @@
52 fi 42 fi
53 43
54 ########################################## 44 ##########################################
@@ -59,7 +49,7 @@
59 cat > $TMPC <<EOF 49 cat > $TMPC <<EOF
60 #include <gnutls/gnutls.h> 50 #include <gnutls/gnutls.h>
61 int main(void) { gnutls_session_t s; gnutls_init(&s, GNUTLS_SERVER); return 0; } 51 int main(void) { gnutls_session_t s; gnutls_init(&s, GNUTLS_SERVER); return 0; }
62@@ -1563,13 +1570,22 @@ 52@@ -1701,13 +1708,22 @@
63 vnc_tls_cflags=`$pkg_config --cflags gnutls 2> /dev/null` 53 vnc_tls_cflags=`$pkg_config --cflags gnutls 2> /dev/null`
64 vnc_tls_libs=`$pkg_config --libs gnutls 2> /dev/null` 54 vnc_tls_libs=`$pkg_config --libs gnutls 2> /dev/null`
65 if compile_prog "$vnc_tls_cflags" "$vnc_tls_libs" ; then 55 if compile_prog "$vnc_tls_cflags" "$vnc_tls_libs" ; then
@@ -83,18 +73,18 @@
83 fi 73 fi
84 fi 74 fi
85 75
86@@ -2864,6 +2880,7 @@ 76@@ -3209,6 +3225,7 @@
87 echo "VNC SASL support $vnc_sasl" 77 echo "VNC SASL support $vnc_sasl"
88 echo "VNC JPEG support $vnc_jpeg" 78 echo "VNC JPEG support $vnc_jpeg"
89 echo "VNC PNG support $vnc_png" 79 echo "VNC PNG support $vnc_png"
90+ echo "VNC WS support $vnc_ws" 80+ echo "VNC WS support $vnc_ws"
91 echo "VNC thread $vnc_thread"
92 fi 81 fi
93 if test -n "$sparc_cpu"; then 82 if test -n "$sparc_cpu"; then
94@@ -3053,6 +3070,10 @@ 83 echo "Target Sparc Arch $sparc_cpu"
95 fi 84@@ -3384,6 +3401,10 @@
96 if test "$vnc_thread" = "yes" ; then 85 if test "$vnc_png" = "yes" ; then
97 echo "CONFIG_VNC_THREAD=y" >> $config_host_mak 86 echo "CONFIG_VNC_PNG=y" >> $config_host_mak
87 echo "VNC_PNG_CFLAGS=$vnc_png_cflags" >> $config_host_mak
98+fi 88+fi
99+if test "$vnc_ws" = "yes" ; then 89+if test "$vnc_ws" = "yes" ; then
100+ echo "CONFIG_VNC_WS=y" >> $config_host_mak 90+ echo "CONFIG_VNC_WS=y" >> $config_host_mak
@@ -102,9 +92,9 @@
102 fi 92 fi
103 if test "$fnmatch" = "yes" ; then 93 if test "$fnmatch" = "yes" ; then
104 echo "CONFIG_FNMATCH=y" >> $config_host_mak 94 echo "CONFIG_FNMATCH=y" >> $config_host_mak
105--- xen-4.2.2.orig/tools/qemu-xen/qemu-options.hx 95--- xen-4.3.0.orig/tools/qemu-xen/qemu-options.hx
106+++ xen-4.2.2/tools/qemu-xen/qemu-options.hx 96+++ xen-4.3.0/tools/qemu-xen/qemu-options.hx
107@@ -976,6 +976,14 @@ 97@@ -1096,6 +1096,14 @@
108 connections (@var{host}:@var{d},@code{reverse}), the @var{d} argument 98 connections (@var{host}:@var{d},@code{reverse}), the @var{d} argument
109 is a TCP port number, not a display number. 99 is a TCP port number, not a display number.
110 100
@@ -119,8 +109,46 @@
119 @item password 109 @item password
120 110
121 Require that password based authentication is used for client connections. 111 Require that password based authentication is used for client connections.
112--- xen-4.3.0.orig/tools/qemu-xen/qemu-options.hx.orig
113+++ xen-4.3.0/tools/qemu-xen/qemu-options.hx.orig
114@@ -2045,18 +2045,13 @@
115 devices.
116
117 Syntax for specifying a sheepdog device
118-@table @list
119-``sheepdog:<vdiname>''
120-
121-``sheepdog:<vdiname>:<snapid>''
122-
123-``sheepdog:<vdiname>:<tag>''
124-
125-``sheepdog:<host>:<port>:<vdiname>''
126-
127-``sheepdog:<host>:<port>:<vdiname>:<snapid>''
128-
129-``sheepdog:<host>:<port>:<vdiname>:<tag>''
130+@table @code
131+@item sheepdog:<vdiname>
132+@item sheepdog:<vdiname>:<snapid>
133+@item sheepdog:<vdiname>:<tag>
134+@item sheepdog:<host>:<port>:<vdiname>
135+@item sheepdog:<host>:<port>:<vdiname>:<snapid>
136+@item sheepdog:<host>:<port>:<vdiname>:<tag>
137 @end table
138
139 Example
140--- xen-4.3.0.orig/tools/qemu-xen/ui/Makefile.objs
141+++ xen-4.3.0/tools/qemu-xen/ui/Makefile.objs
142@@ -4,6 +4,7 @@
143 vnc-obj-y += vnc-enc-zrle.o
144 vnc-obj-$(CONFIG_VNC_TLS) += vnc-tls.o vnc-auth-vencrypt.o
145 vnc-obj-$(CONFIG_VNC_SASL) += vnc-auth-sasl.o
146+vnc-obj-$(CONFIG_VNC_WS) += vnc-ws.o
147 vnc-obj-y += vnc-jobs.o
148
149 common-obj-y += keymaps.o
122--- /dev/null 150--- /dev/null
123+++ xen-4.2.2/tools/qemu-xen/ui/vnc-ws.c 151+++ xen-4.3.0/tools/qemu-xen/ui/vnc-ws.c
124@@ -0,0 +1,284 @@ 152@@ -0,0 +1,284 @@
125+/* 153+/*
126+ * QEMU VNC display driver: Websockets support 154+ * QEMU VNC display driver: Websockets support
@@ -407,7 +435,7 @@
407+ return 1; 435+ return 1;
408+} 436+}
409--- /dev/null 437--- /dev/null
410+++ xen-4.2.2/tools/qemu-xen/ui/vnc-ws.h 438+++ xen-4.3.0/tools/qemu-xen/ui/vnc-ws.h
411@@ -0,0 +1,86 @@ 439@@ -0,0 +1,86 @@
412+/* 440+/*
413+ * QEMU VNC display driver: Websockets support 441+ * QEMU VNC display driver: Websockets support
@@ -495,9 +523,9 @@
495+ size_t *payload_size, size_t *frame_size); 523+ size_t *payload_size, size_t *frame_size);
496+ 524+
497+#endif /* __QEMU_UI_VNC_WS_H */ 525+#endif /* __QEMU_UI_VNC_WS_H */
498--- xen-4.2.2.orig/tools/qemu-xen/ui/vnc.c 526--- xen-4.3.0.orig/tools/qemu-xen/ui/vnc.c
499+++ xen-4.2.2/tools/qemu-xen/ui/vnc.c 527+++ xen-4.3.0/tools/qemu-xen/ui/vnc.c
500@@ -391,7 +391,6 @@ 528@@ -420,7 +420,6 @@
501 static int vnc_update_client(VncState *vs, int has_dirty); 529 static int vnc_update_client(VncState *vs, int has_dirty);
502 static int vnc_update_client_sync(VncState *vs, int has_dirty); 530 static int vnc_update_client_sync(VncState *vs, int has_dirty);
503 static void vnc_disconnect_start(VncState *vs); 531 static void vnc_disconnect_start(VncState *vs);
@@ -505,7 +533,7 @@
505 static void vnc_init_timer(VncDisplay *vd); 533 static void vnc_init_timer(VncDisplay *vd);
506 static void vnc_remove_timer(VncDisplay *vd); 534 static void vnc_remove_timer(VncDisplay *vd);
507 535
508@@ -479,6 +478,13 @@ 536@@ -510,6 +509,13 @@
509 buffer->offset += len; 537 buffer->offset += len;
510 } 538 }
511 539
@@ -519,7 +547,7 @@
519 static void vnc_desktop_resize(VncState *vs) 547 static void vnc_desktop_resize(VncState *vs)
520 { 548 {
521 DisplayState *ds = vs->ds; 549 DisplayState *ds = vs->ds;
522@@ -1002,7 +1008,7 @@ 550@@ -1016,7 +1022,7 @@
523 vs->csock = -1; 551 vs->csock = -1;
524 } 552 }
525 553
@@ -528,7 +556,7 @@
528 { 556 {
529 int i; 557 int i;
530 558
531@@ -1013,6 +1019,10 @@ 559@@ -1027,6 +1033,10 @@
532 560
533 buffer_free(&vs->input); 561 buffer_free(&vs->input);
534 buffer_free(&vs->output); 562 buffer_free(&vs->output);
@@ -539,7 +567,7 @@
539 567
540 qobject_decref(vs->info); 568 qobject_decref(vs->info);
541 569
542@@ -1150,8 +1160,7 @@ 570@@ -1166,8 +1176,7 @@
543 if (!ret) 571 if (!ret)
544 return 0; 572 return 0;
545 573
@@ -549,7 +577,7 @@
549 577
550 if (vs->output.offset == 0) { 578 if (vs->output.offset == 0) {
551 qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs); 579 qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs);
552@@ -1177,7 +1186,16 @@ 580@@ -1193,7 +1202,16 @@
553 vnc_client_write_sasl(vs); 581 vnc_client_write_sasl(vs);
554 } else 582 } else
555 #endif /* CONFIG_VNC_SASL */ 583 #endif /* CONFIG_VNC_SASL */
@@ -567,7 +595,7 @@
567 } 595 }
568 596
569 void vnc_client_write(void *opaque) 597 void vnc_client_write(void *opaque)
570@@ -1185,7 +1203,11 @@ 598@@ -1201,7 +1219,11 @@
571 VncState *vs = opaque; 599 VncState *vs = opaque;
572 600
573 vnc_lock_output(vs); 601 vnc_lock_output(vs);
@@ -580,7 +608,7 @@
580 vnc_client_write_locked(opaque); 608 vnc_client_write_locked(opaque);
581 } else if (vs->csock != -1) { 609 } else if (vs->csock != -1) {
582 qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs); 610 qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs);
583@@ -1273,7 +1295,21 @@ 611@@ -1295,7 +1317,21 @@
584 ret = vnc_client_read_sasl(vs); 612 ret = vnc_client_read_sasl(vs);
585 else 613 else
586 #endif /* CONFIG_VNC_SASL */ 614 #endif /* CONFIG_VNC_SASL */
@@ -602,7 +630,7 @@
602 if (!ret) { 630 if (!ret) {
603 if (vs->csock == -1) 631 if (vs->csock == -1)
604 vnc_disconnect_finish(vs); 632 vnc_disconnect_finish(vs);
605@@ -1291,8 +1327,7 @@ 633@@ -1313,8 +1349,7 @@
606 } 634 }
607 635
608 if (!ret) { 636 if (!ret) {
@@ -612,7 +640,7 @@
612 } else { 640 } else {
613 vs->read_handler_expect = ret; 641 vs->read_handler_expect = ret;
614 } 642 }
615@@ -1345,7 +1380,11 @@ 643@@ -1367,7 +1402,11 @@
616 void vnc_flush(VncState *vs) 644 void vnc_flush(VncState *vs)
617 { 645 {
618 vnc_lock_output(vs); 646 vnc_lock_output(vs);
@@ -625,7 +653,7 @@
625 vnc_client_write_locked(vs); 653 vnc_client_write_locked(vs);
626 } 654 }
627 vnc_unlock_output(vs); 655 vnc_unlock_output(vs);
628@@ -2525,7 +2564,7 @@ 656@@ -2657,7 +2696,7 @@
629 } 657 }
630 } 658 }
631 659
@@ -634,7 +662,7 @@
634 { 662 {
635 VncState *vs = g_malloc0(sizeof(VncState)); 663 VncState *vs = g_malloc0(sizeof(VncState));
636 int i; 664 int i;
637@@ -2552,12 +2591,33 @@ 665@@ -2684,13 +2723,34 @@
638 VNC_DEBUG("New client on socket %d\n", csock); 666 VNC_DEBUG("New client on socket %d\n", csock);
639 dcl->idle = 0; 667 dcl->idle = 0;
640 socket_set_nonblock(vs->csock); 668 socket_set_nonblock(vs->csock);
@@ -651,6 +679,7 @@
651 679
652 vnc_client_cache_addr(vs); 680 vnc_client_cache_addr(vs);
653 vnc_qmp_event(vs, QEVENT_VNC_CONNECTED); 681 vnc_qmp_event(vs, QEVENT_VNC_CONNECTED);
682 vnc_set_share_mode(vs, VNC_SHARE_MODE_CONNECTING);
654 683
655 vs->vd = vd; 684 vs->vd = vd;
656+ 685+
@@ -669,7 +698,7 @@
669 vs->ds = vd->ds; 698 vs->ds = vd->ds;
670 vs->last_x = -1; 699 vs->last_x = -1;
671 vs->last_y = -1; 700 vs->last_y = -1;
672@@ -2590,21 +2650,41 @@ 701@@ -2722,21 +2782,41 @@
673 /* vs might be free()ed here */ 702 /* vs might be free()ed here */
674 } 703 }
675 704
@@ -714,7 +743,7 @@
714 void vnc_display_init(DisplayState *ds) 743 void vnc_display_init(DisplayState *ds)
715 { 744 {
716 VncDisplay *vs = g_malloc0(sizeof(*vs)); 745 VncDisplay *vs = g_malloc0(sizeof(*vs));
717@@ -2616,6 +2696,9 @@ 746@@ -2748,6 +2828,9 @@
718 vnc_display = vs; 747 vnc_display = vs;
719 748
720 vs->lsock = -1; 749 vs->lsock = -1;
@@ -724,7 +753,7 @@
724 753
725 vs->ds = ds; 754 vs->ds = ds;
726 QTAILQ_INIT(&vs->clients); 755 QTAILQ_INIT(&vs->clients);
727@@ -2659,6 +2742,15 @@ 756@@ -2789,6 +2872,15 @@
728 close(vs->lsock); 757 close(vs->lsock);
729 vs->lsock = -1; 758 vs->lsock = -1;
730 } 759 }
@@ -740,7 +769,7 @@
740 vs->auth = VNC_AUTH_INVALID; 769 vs->auth = VNC_AUTH_INVALID;
741 #ifdef CONFIG_VNC_TLS 770 #ifdef CONFIG_VNC_TLS
742 vs->subauth = VNC_AUTH_INVALID; 771 vs->subauth = VNC_AUTH_INVALID;
743@@ -2769,6 +2861,36 @@ 772@@ -2910,6 +3002,36 @@
744 } else if (strncmp(options, "sasl", 4) == 0) { 773 } else if (strncmp(options, "sasl", 4) == 0) {
745 sasl = 1; /* Require SASL auth */ 774 sasl = 1; /* Require SASL auth */
746 #endif 775 #endif
@@ -777,23 +806,30 @@
777 #ifdef CONFIG_VNC_TLS 806 #ifdef CONFIG_VNC_TLS
778 } else if (strncmp(options, "tls", 3) == 0) { 807 } else if (strncmp(options, "tls", 3) == 0) {
779 tls = 1; /* Require TLS */ 808 tls = 1; /* Require TLS */
780@@ -2931,7 +3053,10 @@ 809@@ -3068,6 +3190,9 @@
781 } else { 810 /* connect to viewer */
782 int csock = vs->lsock; 811 int csock;
783 vs->lsock = -1; 812 vs->lsock = -1;
784- vnc_connect(vs, csock, 0);
785+#ifdef CONFIG_VNC_WS 813+#ifdef CONFIG_VNC_WS
786+ vs->lwebsock = -1; 814+ vs->lwebsock = -1;
787+#endif 815+#endif
788+ vnc_connect(vs, csock, 0, 0); 816 if (strncmp(display, "unix:", 5) == 0) {
817 csock = unix_connect(display+5, errp);
818 } else {
819@@ -3076,7 +3201,7 @@
820 if (csock < 0) {
821 goto fail;
789 } 822 }
790 return 0; 823- vnc_connect(vs, csock, 0);
791 824+ vnc_connect(vs, csock, 0, 0);
792@@ -2944,21 +3069,56 @@ 825 } else {
793 vs->lsock = unix_listen(display+5, dpy+5, 256-5); 826 /* listen for connects */
827 char *dpy;
828@@ -3087,25 +3212,54 @@
794 } else { 829 } else {
795 vs->lsock = inet_listen(display, dpy, 256, SOCK_STREAM, 5900); 830 vs->lsock = inet_listen(display, dpy, 256,
796+ if (-1 == vs->lsock) { 831 SOCK_STREAM, 5900, errp);
832+ if (vs->lsock < 0) {
797+ g_free(dpy); 833+ g_free(dpy);
798+ goto fail; 834+ goto fail;
799+ } 835+ }
@@ -818,45 +854,41 @@
818+ } 854+ }
819+#endif /* CONFIG_VNC_WS */ 855+#endif /* CONFIG_VNC_WS */
820 } 856 }
821- if (-1 == vs->lsock) { 857- if (vs->lsock < 0) {
822- g_free(dpy); 858- g_free(dpy);
823- return -1; 859- goto fail;
824- } else { 860- }
825- g_free(vs->display); 861 g_free(vs->display);
826- vs->display = dpy; 862 vs->display = dpy;
827+ g_free(vs->display); 863- qemu_set_fd_handler2(vs->lsock, NULL, vnc_listen_read, NULL, vs);
828+ vs->display = dpy;
829+
830+ qemu_set_fd_handler2(vs->lsock, NULL, vnc_listen_regular_read, NULL, vs); 864+ qemu_set_fd_handler2(vs->lsock, NULL, vnc_listen_regular_read, NULL, vs);
831+#ifdef CONFIG_VNC_WS 865+#ifdef CONFIG_VNC_WS
832+ if (vs->websocket) { 866+ if (vs->websocket) {
833+ qemu_set_fd_handler2(vs->lwebsock, NULL, vnc_listen_websocket_read, NULL, vs); 867+ qemu_set_fd_handler2(vs->lwebsock, NULL, vnc_listen_websocket_read, NULL, vs);
834 } 868+ }
835+#endif 869+#endif
836 } 870 }
837- return qemu_set_fd_handler2(vs->lsock, NULL, vnc_listen_read, NULL, vs); 871 return;
838+ return 0; 872
839+ 873 fail:
840+fail: 874 g_free(vs->display);
841+ g_free(vs->display); 875 vs->display = NULL;
842+ vs->display = NULL;
843+#ifdef CONFIG_VNC_WS 876+#ifdef CONFIG_VNC_WS
844+ g_free(vs->ws_display); 877+ g_free(vs->ws_display);
845+ vs->ws_display = NULL; 878+ vs->ws_display = NULL;
846+#endif /* CONFIG_VNC_WS */ 879+#endif /* CONFIG_VNC_WS */
847+ return -1;
848 } 880 }
849 881
850 void vnc_display_add_client(DisplayState *ds, int csock, int skipauth) 882 void vnc_display_add_client(DisplayState *ds, int csock, int skipauth)
851 { 883 {
852 VncDisplay *vs = ds ? (VncDisplay *)ds->opaque : vnc_display; 884 VncDisplay *vs = ds ? (VncDisplay *)ds->opaque : vnc_display;
853 885
854- return vnc_connect(vs, csock, skipauth); 886- vnc_connect(vs, csock, skipauth);
855+ return vnc_connect(vs, csock, skipauth, 0); 887+ vnc_connect(vs, csock, skipauth, 0);
856 } 888 }
857--- xen-4.2.2.orig/tools/qemu-xen/ui/vnc.h 889--- xen-4.3.0.orig/tools/qemu-xen/ui/vnc.h
858+++ xen-4.2.2/tools/qemu-xen/ui/vnc.h 890+++ xen-4.3.0/tools/qemu-xen/ui/vnc.h
859@@ -101,6 +101,9 @@ 891@@ -99,6 +99,9 @@
860 #ifdef CONFIG_VNC_SASL 892 #ifdef CONFIG_VNC_SASL
861 #include "vnc-auth-sasl.h" 893 #include "vnc-auth-sasl.h"
862 #endif 894 #endif
@@ -866,7 +898,7 @@
866 898
867 struct VncRectStat 899 struct VncRectStat
868 { 900 {
869@@ -128,6 +131,11 @@ 901@@ -142,6 +145,11 @@
870 QEMUTimer *timer; 902 QEMUTimer *timer;
871 int timer_interval; 903 int timer_interval;
872 int lsock; 904 int lsock;
@@ -878,7 +910,7 @@
878 DisplayState *ds; 910 DisplayState *ds;
879 kbd_layout_t *kbd_layout; 911 kbd_layout_t *kbd_layout;
880 int lock_key_sync; 912 int lock_key_sync;
881@@ -265,11 +273,19 @@ 913@@ -269,11 +277,19 @@
882 #ifdef CONFIG_VNC_SASL 914 #ifdef CONFIG_VNC_SASL
883 VncStateSASL sasl; 915 VncStateSASL sasl;
884 #endif 916 #endif
@@ -897,8 +929,8 @@
897+#endif 929+#endif
898 /* current output mode information */ 930 /* current output mode information */
899 VncWritePixels *write_pixels; 931 VncWritePixels *write_pixels;
900 DisplaySurface clientds; 932 PixelFormat client_pf;
901@@ -489,6 +505,8 @@ 933@@ -493,6 +509,8 @@
902 void vnc_write_u8(VncState *vs, uint8_t value); 934 void vnc_write_u8(VncState *vs, uint8_t value);
903 void vnc_flush(VncState *vs); 935 void vnc_flush(VncState *vs);
904 void vnc_read_when(VncState *vs, VncReadEvent *func, size_t expecting); 936 void vnc_read_when(VncState *vs, VncReadEvent *func, size_t expecting);
@@ -907,16 +939,13 @@
907 939
908 940
909 /* Buffer I/O functions */ 941 /* Buffer I/O functions */
910@@ -507,10 +525,11 @@ 942@@ -510,7 +528,8 @@
911 /* Buffer management */
912 void buffer_reserve(Buffer *buffer, size_t len);
913 int buffer_empty(Buffer *buffer);
914-uint8_t *buffer_end(Buffer *buffer);
915 void buffer_reset(Buffer *buffer); 943 void buffer_reset(Buffer *buffer);
916 void buffer_free(Buffer *buffer); 944 void buffer_free(Buffer *buffer);
917 void buffer_append(Buffer *buffer, const void *data, size_t len); 945 void buffer_append(Buffer *buffer, const void *data, size_t len);
946-
918+void buffer_advance(Buffer *buf, size_t len); 947+void buffer_advance(Buffer *buf, size_t len);
919+uint8_t *buffer_end(Buffer *buffer); 948+uint8_t *buffer_end(Buffer *buffer);
920 949
921
922 /* Misc helpers */ 950 /* Misc helpers */
951
diff --git a/main/xen/qemu_uclibc_configure.patch b/main/xen/qemu_uclibc_configure.patch
deleted file mode 100644
index c948f8c2f9..0000000000
--- a/main/xen/qemu_uclibc_configure.patch
+++ /dev/null
@@ -1,48 +0,0 @@
1--- a/tools/qemu-xen/configure
2+++ b/tools/qemu-xen/configure
3@@ -2457,19 +2457,44 @@ EOF
4 fi
5 fi
6
7+##########################################
8+# Do we need libm
9+cat > $TMPC << EOF
10+#include <math.h>
11+int main(void) { return isnan(sin(0.0)); }
12+EOF
13+if compile_prog "" "" ; then
14+ :
15+elif compile_prog "" "-lm" ; then
16+ LIBS="-lm $LIBS"
17+ libs_qga="-lm $libs_qga"
18+else
19+ echo
20+ echo "Error: libm check failed"
21+ echo
22+ exit 1
23+fi
24
25 ##########################################
26 # Do we need librt
27+# uClibc provides 2 versions of clock_gettime(), one with realtime
28+# support and one without. This means that the clock_gettime() don't
29+# need -lrt. We still need it for timer_create() so we check for this
30+# function in addition.
31 cat > $TMPC <<EOF
32 #include <signal.h>
33 #include <time.h>
34-int main(void) { clockid_t id; return clock_gettime(id, NULL); }
35+int main(void) {
36+ timer_create(CLOCK_REALTIME, NULL, NULL);
37+ return clock_gettime(CLOCK_REALTIME, NULL);
38+}
39 EOF
40
41 if compile_prog "" "" ; then
42 :
43 elif compile_prog "" "-lrt" ; then
44 LIBS="-lrt $LIBS"
45+ libs_qga="-lrt $libs_qga"
46 fi
47
48 if test "$darwin" != "yes" -a "$mingw32" != "yes" -a "$solaris" != yes -a \
diff --git a/main/xen/xsa45-4.2.patch b/main/xen/xsa45-4.2.patch
deleted file mode 100644
index dfdfdea64b..0000000000
--- a/main/xen/xsa45-4.2.patch
+++ /dev/null
@@ -1,1133 +0,0 @@
1diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
2index 26a7f12..b97ac6d 100644
3--- a/xen/arch/x86/domain.c
4+++ b/xen/arch/x86/domain.c
5@@ -73,8 +73,6 @@ void (*dead_idle) (void) __read_mostly = default_dead_idle;
6 static void paravirt_ctxt_switch_from(struct vcpu *v);
7 static void paravirt_ctxt_switch_to(struct vcpu *v);
8
9-static void vcpu_destroy_pagetables(struct vcpu *v);
10-
11 static void default_idle(void)
12 {
13 local_irq_disable();
14@@ -860,6 +858,9 @@ int arch_set_info_guest(
15
16 if ( !v->is_initialised )
17 {
18+ if ( !compat && !(flags & VGCF_in_kernel) && !c.nat->ctrlreg[1] )
19+ return -EINVAL;
20+
21 v->arch.pv_vcpu.ldt_base = c(ldt_base);
22 v->arch.pv_vcpu.ldt_ents = c(ldt_ents);
23 }
24@@ -957,24 +958,44 @@ int arch_set_info_guest(
25 if ( rc != 0 )
26 return rc;
27
28+ set_bit(_VPF_in_reset, &v->pause_flags);
29+
30 if ( !compat )
31- {
32 cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]);
33- cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
34-
35- if ( !cr3_page )
36- {
37- destroy_gdt(v);
38- return -EINVAL;
39- }
40- if ( !paging_mode_refcounts(d)
41- && !get_page_type(cr3_page, PGT_base_page_table) )
42- {
43- put_page(cr3_page);
44- destroy_gdt(v);
45- return -EINVAL;
46- }
47+#ifdef CONFIG_COMPAT
48+ else
49+ cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]);
50+#endif
51+ cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
52
53+ if ( !cr3_page )
54+ rc = -EINVAL;
55+ else if ( paging_mode_refcounts(d) )
56+ /* nothing */;
57+ else if ( cr3_page == v->arch.old_guest_table )
58+ {
59+ v->arch.old_guest_table = NULL;
60+ put_page(cr3_page);
61+ }
62+ else
63+ {
64+ /*
65+ * Since v->arch.guest_table{,_user} are both NULL, this effectively
66+ * is just a call to put_old_guest_table().
67+ */
68+ if ( !compat )
69+ rc = vcpu_destroy_pagetables(v);
70+ if ( !rc )
71+ rc = get_page_type_preemptible(cr3_page,
72+ !compat ? PGT_root_page_table
73+ : PGT_l3_page_table);
74+ if ( rc == -EINTR )
75+ rc = -EAGAIN;
76+ }
77+ if ( rc )
78+ /* handled below */;
79+ else if ( !compat )
80+ {
81 v->arch.guest_table = pagetable_from_page(cr3_page);
82 #ifdef __x86_64__
83 if ( c.nat->ctrlreg[1] )
84@@ -982,56 +1003,44 @@ int arch_set_info_guest(
85 cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]);
86 cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
87
88- if ( !cr3_page ||
89- (!paging_mode_refcounts(d)
90- && !get_page_type(cr3_page, PGT_base_page_table)) )
91+ if ( !cr3_page )
92+ rc = -EINVAL;
93+ else if ( !paging_mode_refcounts(d) )
94 {
95- if (cr3_page)
96- put_page(cr3_page);
97- cr3_page = pagetable_get_page(v->arch.guest_table);
98- v->arch.guest_table = pagetable_null();
99- if ( paging_mode_refcounts(d) )
100- put_page(cr3_page);
101- else
102- put_page_and_type(cr3_page);
103- destroy_gdt(v);
104- return -EINVAL;
105+ rc = get_page_type_preemptible(cr3_page, PGT_root_page_table);
106+ switch ( rc )
107+ {
108+ case -EINTR:
109+ rc = -EAGAIN;
110+ case -EAGAIN:
111+ v->arch.old_guest_table =
112+ pagetable_get_page(v->arch.guest_table);
113+ v->arch.guest_table = pagetable_null();
114+ break;
115+ }
116 }
117-
118- v->arch.guest_table_user = pagetable_from_page(cr3_page);
119- }
120- else if ( !(flags & VGCF_in_kernel) )
121- {
122- destroy_gdt(v);
123- return -EINVAL;
124+ if ( !rc )
125+ v->arch.guest_table_user = pagetable_from_page(cr3_page);
126 }
127 }
128 else
129 {
130 l4_pgentry_t *l4tab;
131
132- cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]);
133- cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
134-
135- if ( !cr3_page)
136- {
137- destroy_gdt(v);
138- return -EINVAL;
139- }
140-
141- if (!paging_mode_refcounts(d)
142- && !get_page_type(cr3_page, PGT_l3_page_table) )
143- {
144- put_page(cr3_page);
145- destroy_gdt(v);
146- return -EINVAL;
147- }
148-
149 l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
150 *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
151 _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
152 #endif
153 }
154+ if ( rc )
155+ {
156+ if ( cr3_page )
157+ put_page(cr3_page);
158+ destroy_gdt(v);
159+ return rc;
160+ }
161+
162+ clear_bit(_VPF_in_reset, &v->pause_flags);
163
164 if ( v->vcpu_id == 0 )
165 update_domain_wallclock_time(d);
166@@ -1053,17 +1062,16 @@ int arch_set_info_guest(
167 #undef c
168 }
169
170-void arch_vcpu_reset(struct vcpu *v)
171+int arch_vcpu_reset(struct vcpu *v)
172 {
173 if ( !is_hvm_vcpu(v) )
174 {
175 destroy_gdt(v);
176- vcpu_destroy_pagetables(v);
177- }
178- else
179- {
180- vcpu_end_shutdown_deferral(v);
181+ return vcpu_destroy_pagetables(v);
182 }
183+
184+ vcpu_end_shutdown_deferral(v);
185+ return 0;
186 }
187
188 /*
189@@ -2069,63 +2077,6 @@ static int relinquish_memory(
190 return ret;
191 }
192
193-static void vcpu_destroy_pagetables(struct vcpu *v)
194-{
195- struct domain *d = v->domain;
196- unsigned long pfn;
197-
198-#ifdef __x86_64__
199- if ( is_pv_32on64_vcpu(v) )
200- {
201- pfn = l4e_get_pfn(*(l4_pgentry_t *)
202- __va(pagetable_get_paddr(v->arch.guest_table)));
203-
204- if ( pfn != 0 )
205- {
206- if ( paging_mode_refcounts(d) )
207- put_page(mfn_to_page(pfn));
208- else
209- put_page_and_type(mfn_to_page(pfn));
210- }
211-
212- l4e_write(
213- (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)),
214- l4e_empty());
215-
216- v->arch.cr3 = 0;
217- return;
218- }
219-#endif
220-
221- pfn = pagetable_get_pfn(v->arch.guest_table);
222- if ( pfn != 0 )
223- {
224- if ( paging_mode_refcounts(d) )
225- put_page(mfn_to_page(pfn));
226- else
227- put_page_and_type(mfn_to_page(pfn));
228- v->arch.guest_table = pagetable_null();
229- }
230-
231-#ifdef __x86_64__
232- /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
233- pfn = pagetable_get_pfn(v->arch.guest_table_user);
234- if ( pfn != 0 )
235- {
236- if ( !is_pv_32bit_vcpu(v) )
237- {
238- if ( paging_mode_refcounts(d) )
239- put_page(mfn_to_page(pfn));
240- else
241- put_page_and_type(mfn_to_page(pfn));
242- }
243- v->arch.guest_table_user = pagetable_null();
244- }
245-#endif
246-
247- v->arch.cr3 = 0;
248-}
249-
250 int domain_relinquish_resources(struct domain *d)
251 {
252 int ret;
253@@ -2143,7 +2094,11 @@ int domain_relinquish_resources(struct domain *d)
254
255 /* Drop the in-use references to page-table bases. */
256 for_each_vcpu ( d, v )
257- vcpu_destroy_pagetables(v);
258+ {
259+ ret = vcpu_destroy_pagetables(v);
260+ if ( ret )
261+ return ret;
262+ }
263
264 if ( !is_hvm_domain(d) )
265 {
266diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
267index 3d471a5..efacc98 100644
268--- a/xen/arch/x86/hvm/hvm.c
269+++ b/xen/arch/x86/hvm/hvm.c
270@@ -3509,8 +3509,11 @@ static void hvm_s3_suspend(struct domain *d)
271
272 for_each_vcpu ( d, v )
273 {
274+ int rc;
275+
276 vlapic_reset(vcpu_vlapic(v));
277- vcpu_reset(v);
278+ rc = vcpu_reset(v);
279+ ASSERT(!rc);
280 }
281
282 vpic_reset(d);
283diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
284index 52d111b..7778342 100644
285--- a/xen/arch/x86/hvm/vlapic.c
286+++ b/xen/arch/x86/hvm/vlapic.c
287@@ -252,10 +252,13 @@ static void vlapic_init_sipi_action(unsigned long _vcpu)
288 {
289 case APIC_DM_INIT: {
290 bool_t fpu_initialised;
291+ int rc;
292+
293 domain_lock(target->domain);
294 /* Reset necessary VCPU state. This does not include FPU state. */
295 fpu_initialised = target->fpu_initialised;
296- vcpu_reset(target);
297+ rc = vcpu_reset(target);
298+ ASSERT(!rc);
299 target->fpu_initialised = fpu_initialised;
300 vlapic_reset(vcpu_vlapic(target));
301 domain_unlock(target->domain);
302diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
303index 8444610..055f307 100644
304--- a/xen/arch/x86/mm.c
305+++ b/xen/arch/x86/mm.c
306@@ -1241,7 +1241,16 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
307 #endif
308
309 if ( unlikely(partial > 0) )
310+ {
311+ ASSERT(preemptible >= 0);
312 return __put_page_type(l3e_get_page(l3e), preemptible);
313+ }
314+
315+ if ( preemptible < 0 )
316+ {
317+ current->arch.old_guest_table = l3e_get_page(l3e);
318+ return 0;
319+ }
320
321 return put_page_and_type_preemptible(l3e_get_page(l3e), preemptible);
322 }
323@@ -1254,7 +1263,17 @@ static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn,
324 (l4e_get_pfn(l4e) != pfn) )
325 {
326 if ( unlikely(partial > 0) )
327+ {
328+ ASSERT(preemptible >= 0);
329 return __put_page_type(l4e_get_page(l4e), preemptible);
330+ }
331+
332+ if ( preemptible < 0 )
333+ {
334+ current->arch.old_guest_table = l4e_get_page(l4e);
335+ return 0;
336+ }
337+
338 return put_page_and_type_preemptible(l4e_get_page(l4e), preemptible);
339 }
340 return 1;
341@@ -1549,12 +1568,17 @@ static int alloc_l3_table(struct page_info *page, int preemptible)
342 if ( rc < 0 && rc != -EAGAIN && rc != -EINTR )
343 {
344 MEM_LOG("Failure in alloc_l3_table: entry %d", i);
345+ if ( i )
346+ {
347+ page->nr_validated_ptes = i;
348+ page->partial_pte = 0;
349+ current->arch.old_guest_table = page;
350+ }
351 while ( i-- > 0 )
352 {
353 if ( !is_guest_l3_slot(i) )
354 continue;
355 unadjust_guest_l3e(pl3e[i], d);
356- put_page_from_l3e(pl3e[i], pfn, 0, 0);
357 }
358 }
359
360@@ -1584,22 +1608,24 @@ static int alloc_l4_table(struct page_info *page, int preemptible)
361 page->nr_validated_ptes = i;
362 page->partial_pte = partial ?: 1;
363 }
364- else if ( rc == -EINTR )
365+ else if ( rc < 0 )
366 {
367+ if ( rc != -EINTR )
368+ MEM_LOG("Failure in alloc_l4_table: entry %d", i);
369 if ( i )
370 {
371 page->nr_validated_ptes = i;
372 page->partial_pte = 0;
373- rc = -EAGAIN;
374+ if ( rc == -EINTR )
375+ rc = -EAGAIN;
376+ else
377+ {
378+ if ( current->arch.old_guest_table )
379+ page->nr_validated_ptes++;
380+ current->arch.old_guest_table = page;
381+ }
382 }
383 }
384- else if ( rc < 0 )
385- {
386- MEM_LOG("Failure in alloc_l4_table: entry %d", i);
387- while ( i-- > 0 )
388- if ( is_guest_l4_slot(d, i) )
389- put_page_from_l4e(pl4e[i], pfn, 0, 0);
390- }
391 if ( rc < 0 )
392 return rc;
393
394@@ -2047,7 +2073,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
395 pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
396 }
397
398- put_page_from_l3e(ol3e, pfn, 0, 0);
399+ put_page_from_l3e(ol3e, pfn, 0, -preemptible);
400 return rc;
401 }
402
403@@ -2110,7 +2136,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
404 return -EFAULT;
405 }
406
407- put_page_from_l4e(ol4e, pfn, 0, 0);
408+ put_page_from_l4e(ol4e, pfn, 0, -preemptible);
409 return rc;
410 }
411
412@@ -2268,7 +2294,15 @@ static int alloc_page_type(struct page_info *page, unsigned long type,
413 PRtype_info ": caf=%08lx taf=%" PRtype_info,
414 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
415 type, page->count_info, page->u.inuse.type_info);
416- page->u.inuse.type_info = 0;
417+ if ( page != current->arch.old_guest_table )
418+ page->u.inuse.type_info = 0;
419+ else
420+ {
421+ ASSERT((page->u.inuse.type_info &
422+ (PGT_count_mask | PGT_validated)) == 1);
423+ get_page_light(page);
424+ page->u.inuse.type_info |= PGT_partial;
425+ }
426 }
427 else
428 {
429@@ -2808,49 +2842,150 @@ static void put_superpage(unsigned long mfn)
430
431 #endif
432
433+static int put_old_guest_table(struct vcpu *v)
434+{
435+ int rc;
436+
437+ if ( !v->arch.old_guest_table )
438+ return 0;
439+
440+ switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table, 1) )
441+ {
442+ case -EINTR:
443+ case -EAGAIN:
444+ return -EAGAIN;
445+ }
446+
447+ v->arch.old_guest_table = NULL;
448+
449+ return rc;
450+}
451+
452+int vcpu_destroy_pagetables(struct vcpu *v)
453+{
454+ unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
455+ struct page_info *page;
456+ int rc = put_old_guest_table(v);
457+
458+ if ( rc )
459+ return rc;
460+
461+#ifdef __x86_64__
462+ if ( is_pv_32on64_vcpu(v) )
463+ mfn = l4e_get_pfn(*(l4_pgentry_t *)mfn_to_virt(mfn));
464+#endif
465+
466+ if ( mfn )
467+ {
468+ page = mfn_to_page(mfn);
469+ if ( paging_mode_refcounts(v->domain) )
470+ put_page(page);
471+ else
472+ rc = put_page_and_type_preemptible(page, 1);
473+ }
474+
475+#ifdef __x86_64__
476+ if ( is_pv_32on64_vcpu(v) )
477+ {
478+ if ( !rc )
479+ l4e_write(
480+ (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)),
481+ l4e_empty());
482+ }
483+ else
484+#endif
485+ if ( !rc )
486+ {
487+ v->arch.guest_table = pagetable_null();
488+
489+#ifdef __x86_64__
490+ /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
491+ mfn = pagetable_get_pfn(v->arch.guest_table_user);
492+ if ( mfn )
493+ {
494+ page = mfn_to_page(mfn);
495+ if ( paging_mode_refcounts(v->domain) )
496+ put_page(page);
497+ else
498+ rc = put_page_and_type_preemptible(page, 1);
499+ }
500+ if ( !rc )
501+ v->arch.guest_table_user = pagetable_null();
502+#endif
503+ }
504+
505+ v->arch.cr3 = 0;
506+
507+ return rc;
508+}
509
510 int new_guest_cr3(unsigned long mfn)
511 {
512 struct vcpu *curr = current;
513 struct domain *d = curr->domain;
514- int okay;
515+ int rc;
516 unsigned long old_base_mfn;
517
518 #ifdef __x86_64__
519 if ( is_pv_32on64_domain(d) )
520 {
521- okay = paging_mode_refcounts(d)
522- ? 0 /* Old code was broken, but what should it be? */
523- : mod_l4_entry(
524+ rc = paging_mode_refcounts(d)
525+ ? -EINVAL /* Old code was broken, but what should it be? */
526+ : mod_l4_entry(
527 __va(pagetable_get_paddr(curr->arch.guest_table)),
528 l4e_from_pfn(
529 mfn,
530 (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
531- pagetable_get_pfn(curr->arch.guest_table), 0, 0, curr) == 0;
532- if ( unlikely(!okay) )
533+ pagetable_get_pfn(curr->arch.guest_table), 0, 1, curr);
534+ switch ( rc )
535 {
536+ case 0:
537+ break;
538+ case -EINTR:
539+ case -EAGAIN:
540+ return -EAGAIN;
541+ default:
542 MEM_LOG("Error while installing new compat baseptr %lx", mfn);
543- return 0;
544+ return rc;
545 }
546
547 invalidate_shadow_ldt(curr, 0);
548 write_ptbase(curr);
549
550- return 1;
551+ return 0;
552 }
553 #endif
554- okay = paging_mode_refcounts(d)
555- ? get_page_from_pagenr(mfn, d)
556- : !get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 0);
557- if ( unlikely(!okay) )
558+ rc = put_old_guest_table(curr);
559+ if ( unlikely(rc) )
560+ return rc;
561+
562+ old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
563+ /*
564+ * This is particularly important when getting restarted after the
565+ * previous attempt got preempted in the put-old-MFN phase.
566+ */
567+ if ( old_base_mfn == mfn )
568 {
569- MEM_LOG("Error while installing new baseptr %lx", mfn);
570+ write_ptbase(curr);
571 return 0;
572 }
573
574- invalidate_shadow_ldt(curr, 0);
575+ rc = paging_mode_refcounts(d)
576+ ? (get_page_from_pagenr(mfn, d) ? 0 : -EINVAL)
577+ : get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 1);
578+ switch ( rc )
579+ {
580+ case 0:
581+ break;
582+ case -EINTR:
583+ case -EAGAIN:
584+ return -EAGAIN;
585+ default:
586+ MEM_LOG("Error while installing new baseptr %lx", mfn);
587+ return rc;
588+ }
589
590- old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
591+ invalidate_shadow_ldt(curr, 0);
592
593 curr->arch.guest_table = pagetable_from_pfn(mfn);
594 update_cr3(curr);
595@@ -2859,13 +2994,25 @@ int new_guest_cr3(unsigned long mfn)
596
597 if ( likely(old_base_mfn != 0) )
598 {
599+ struct page_info *page = mfn_to_page(old_base_mfn);
600+
601 if ( paging_mode_refcounts(d) )
602- put_page(mfn_to_page(old_base_mfn));
603+ put_page(page);
604 else
605- put_page_and_type(mfn_to_page(old_base_mfn));
606+ switch ( rc = put_page_and_type_preemptible(page, 1) )
607+ {
608+ case -EINTR:
609+ rc = -EAGAIN;
610+ case -EAGAIN:
611+ curr->arch.old_guest_table = page;
612+ break;
613+ default:
614+ BUG_ON(rc);
615+ break;
616+ }
617 }
618
619- return 1;
620+ return rc;
621 }
622
623 static struct domain *get_pg_owner(domid_t domid)
624@@ -2994,12 +3141,29 @@ long do_mmuext_op(
625 unsigned int foreigndom)
626 {
627 struct mmuext_op op;
628- int rc = 0, i = 0, okay;
629 unsigned long type;
630- unsigned int done = 0;
631+ unsigned int i = 0, done = 0;
632 struct vcpu *curr = current;
633 struct domain *d = curr->domain;
634 struct domain *pg_owner;
635+ int okay, rc = put_old_guest_table(curr);
636+
637+ if ( unlikely(rc) )
638+ {
639+ if ( likely(rc == -EAGAIN) )
640+ rc = hypercall_create_continuation(
641+ __HYPERVISOR_mmuext_op, "hihi", uops, count, pdone,
642+ foreigndom);
643+ return rc;
644+ }
645+
646+ if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
647+ likely(guest_handle_is_null(uops)) )
648+ {
649+ /* See the curr->arch.old_guest_table related
650+ * hypercall_create_continuation() below. */
651+ return (int)foreigndom;
652+ }
653
654 if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
655 {
656@@ -3024,7 +3188,7 @@ long do_mmuext_op(
657
658 for ( i = 0; i < count; i++ )
659 {
660- if ( hypercall_preempt_check() )
661+ if ( curr->arch.old_guest_table || hypercall_preempt_check() )
662 {
663 rc = -EAGAIN;
664 break;
665@@ -3088,21 +3252,17 @@ long do_mmuext_op(
666 }
667
668 if ( (rc = xsm_memory_pin_page(d, pg_owner, page)) != 0 )
669- {
670- put_page_and_type(page);
671 okay = 0;
672- break;
673- }
674-
675- if ( unlikely(test_and_set_bit(_PGT_pinned,
676- &page->u.inuse.type_info)) )
677+ else if ( unlikely(test_and_set_bit(_PGT_pinned,
678+ &page->u.inuse.type_info)) )
679 {
680 MEM_LOG("Mfn %lx already pinned", page_to_mfn(page));
681- put_page_and_type(page);
682 okay = 0;
683- break;
684 }
685
686+ if ( unlikely(!okay) )
687+ goto pin_drop;
688+
689 /* A page is dirtied when its pin status is set. */
690 paging_mark_dirty(pg_owner, page_to_mfn(page));
691
692@@ -3116,7 +3276,13 @@ long do_mmuext_op(
693 &page->u.inuse.type_info));
694 spin_unlock(&pg_owner->page_alloc_lock);
695 if ( drop_ref )
696- put_page_and_type(page);
697+ {
698+ pin_drop:
699+ if ( type == PGT_l1_page_table )
700+ put_page_and_type(page);
701+ else
702+ curr->arch.old_guest_table = page;
703+ }
704 }
705
706 break;
707@@ -3144,7 +3310,17 @@ long do_mmuext_op(
708 break;
709 }
710
711- put_page_and_type(page);
712+ switch ( rc = put_page_and_type_preemptible(page, 1) )
713+ {
714+ case -EINTR:
715+ case -EAGAIN:
716+ curr->arch.old_guest_table = page;
717+ rc = 0;
718+ break;
719+ default:
720+ BUG_ON(rc);
721+ break;
722+ }
723 put_page(page);
724
725 /* A page is dirtied when its pin status is cleared. */
726@@ -3154,8 +3330,13 @@ long do_mmuext_op(
727 }
728
729 case MMUEXT_NEW_BASEPTR:
730- okay = (!paging_mode_translate(d)
731- && new_guest_cr3(op.arg1.mfn));
732+ if ( paging_mode_translate(d) )
733+ okay = 0;
734+ else
735+ {
736+ rc = new_guest_cr3(op.arg1.mfn);
737+ okay = !rc;
738+ }
739 break;
740
741
742@@ -3169,29 +3350,56 @@ long do_mmuext_op(
743 break;
744 }
745
746+ old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
747+ /*
748+ * This is particularly important when getting restarted after the
749+ * previous attempt got preempted in the put-old-MFN phase.
750+ */
751+ if ( old_mfn == op.arg1.mfn )
752+ break;
753+
754 if ( op.arg1.mfn != 0 )
755 {
756 if ( paging_mode_refcounts(d) )
757 okay = get_page_from_pagenr(op.arg1.mfn, d);
758 else
759- okay = !get_page_and_type_from_pagenr(
760- op.arg1.mfn, PGT_root_page_table, d, 0, 0);
761+ {
762+ rc = get_page_and_type_from_pagenr(
763+ op.arg1.mfn, PGT_root_page_table, d, 0, 1);
764+ okay = !rc;
765+ }
766 if ( unlikely(!okay) )
767 {
768- MEM_LOG("Error while installing new mfn %lx", op.arg1.mfn);
769+ if ( rc == -EINTR )
770+ rc = -EAGAIN;
771+ else if ( rc != -EAGAIN )
772+ MEM_LOG("Error while installing new mfn %lx",
773+ op.arg1.mfn);
774 break;
775 }
776 }
777
778- old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
779 curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
780
781 if ( old_mfn != 0 )
782 {
783+ struct page_info *page = mfn_to_page(old_mfn);
784+
785 if ( paging_mode_refcounts(d) )
786- put_page(mfn_to_page(old_mfn));
787+ put_page(page);
788 else
789- put_page_and_type(mfn_to_page(old_mfn));
790+ switch ( rc = put_page_and_type_preemptible(page, 1) )
791+ {
792+ case -EINTR:
793+ rc = -EAGAIN;
794+ case -EAGAIN:
795+ curr->arch.old_guest_table = page;
796+ okay = 0;
797+ break;
798+ default:
799+ BUG_ON(rc);
800+ break;
801+ }
802 }
803
804 break;
805@@ -3433,9 +3641,27 @@ long do_mmuext_op(
806 }
807
808 if ( rc == -EAGAIN )
809+ {
810+ ASSERT(i < count);
811 rc = hypercall_create_continuation(
812 __HYPERVISOR_mmuext_op, "hihi",
813 uops, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
814+ }
815+ else if ( curr->arch.old_guest_table )
816+ {
817+ XEN_GUEST_HANDLE(void) null;
818+
819+ ASSERT(rc || i == count);
820+ set_xen_guest_handle(null, NULL);
821+ /*
822+ * In order to have a way to communicate the final return value to
823+ * our continuation, we pass this in place of "foreigndom", building
824+ * on the fact that this argument isn't needed anymore.
825+ */
826+ rc = hypercall_create_continuation(
827+ __HYPERVISOR_mmuext_op, "hihi", null,
828+ MMU_UPDATE_PREEMPTED, null, rc);
829+ }
830
831 put_pg_owner(pg_owner);
832
833@@ -3462,11 +3688,28 @@ long do_mmu_update(
834 void *va;
835 unsigned long gpfn, gmfn, mfn;
836 struct page_info *page;
837- int rc = 0, i = 0;
838- unsigned int cmd, done = 0, pt_dom;
839- struct vcpu *v = current;
840+ unsigned int cmd, i = 0, done = 0, pt_dom;
841+ struct vcpu *curr = current, *v = curr;
842 struct domain *d = v->domain, *pt_owner = d, *pg_owner;
843 struct domain_mmap_cache mapcache;
844+ int rc = put_old_guest_table(curr);
845+
846+ if ( unlikely(rc) )
847+ {
848+ if ( likely(rc == -EAGAIN) )
849+ rc = hypercall_create_continuation(
850+ __HYPERVISOR_mmu_update, "hihi", ureqs, count, pdone,
851+ foreigndom);
852+ return rc;
853+ }
854+
855+ if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
856+ likely(guest_handle_is_null(ureqs)) )
857+ {
858+ /* See the curr->arch.old_guest_table related
859+ * hypercall_create_continuation() below. */
860+ return (int)foreigndom;
861+ }
862
863 if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
864 {
865@@ -3515,7 +3758,7 @@ long do_mmu_update(
866
867 for ( i = 0; i < count; i++ )
868 {
869- if ( hypercall_preempt_check() )
870+ if ( curr->arch.old_guest_table || hypercall_preempt_check() )
871 {
872 rc = -EAGAIN;
873 break;
874@@ -3696,9 +3939,27 @@ long do_mmu_update(
875 }
876
877 if ( rc == -EAGAIN )
878+ {
879+ ASSERT(i < count);
880 rc = hypercall_create_continuation(
881 __HYPERVISOR_mmu_update, "hihi",
882 ureqs, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
883+ }
884+ else if ( curr->arch.old_guest_table )
885+ {
886+ XEN_GUEST_HANDLE(void) null;
887+
888+ ASSERT(rc || i == count);
889+ set_xen_guest_handle(null, NULL);
890+ /*
891+ * In order to have a way to communicate the final return value to
892+ * our continuation, we pass this in place of "foreigndom", building
893+ * on the fact that this argument isn't needed anymore.
894+ */
895+ rc = hypercall_create_continuation(
896+ __HYPERVISOR_mmu_update, "hihi", null,
897+ MMU_UPDATE_PREEMPTED, null, rc);
898+ }
899
900 put_pg_owner(pg_owner);
901
902diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
903index 692281a..eada470 100644
904--- a/xen/arch/x86/traps.c
905+++ b/xen/arch/x86/traps.c
906@@ -2407,12 +2407,23 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
907 #endif
908 }
909 page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
910- rc = page ? new_guest_cr3(page_to_mfn(page)) : 0;
911 if ( page )
912+ {
913+ rc = new_guest_cr3(page_to_mfn(page));
914 put_page(page);
915+ }
916+ else
917+ rc = -EINVAL;
918 domain_unlock(v->domain);
919- if ( rc == 0 ) /* not okay */
920+ switch ( rc )
921+ {
922+ case 0:
923+ break;
924+ case -EAGAIN: /* retry after preemption */
925+ goto skip;
926+ default: /* not okay */
927 goto fail;
928+ }
929 break;
930 }
931
932diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
933index fb7baca..ef7822b 100644
934--- a/xen/arch/x86/x86_64/compat/mm.c
935+++ b/xen/arch/x86/x86_64/compat/mm.c
936@@ -268,6 +268,13 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops,
937 int rc = 0;
938 XEN_GUEST_HANDLE(mmuext_op_t) nat_ops;
939
940+ if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
941+ likely(guest_handle_is_null(cmp_uops)) )
942+ {
943+ set_xen_guest_handle(nat_ops, NULL);
944+ return do_mmuext_op(nat_ops, count, pdone, foreigndom);
945+ }
946+
947 preempt_mask = count & MMU_UPDATE_PREEMPTED;
948 count ^= preempt_mask;
949
950@@ -365,17 +372,23 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops,
951 : mcs->call.args[1];
952 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
953
954- BUG_ON(left == arg1);
955+ BUG_ON(left == arg1 && left != i);
956 BUG_ON(left > count);
957 guest_handle_add_offset(nat_ops, i - left);
958 guest_handle_subtract_offset(cmp_uops, left);
959 left = 1;
960- BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, cmp_uops));
961- BUG_ON(left != arg1);
962- if (!test_bit(_MCSF_in_multicall, &mcs->flags))
963- regs->_ecx += count - i;
964+ if ( arg1 != MMU_UPDATE_PREEMPTED )
965+ {
966+ BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops,
967+ cmp_uops));
968+ if ( !test_bit(_MCSF_in_multicall, &mcs->flags) )
969+ regs->_ecx += count - i;
970+ else
971+ mcs->compat_call.args[1] += count - i;
972+ }
973 else
974- mcs->compat_call.args[1] += count - i;
975+ BUG_ON(hypercall_xlat_continuation(&left, 0));
976+ BUG_ON(left != arg1);
977 }
978 else
979 BUG_ON(err > 0);
980diff --git a/xen/common/compat/domain.c b/xen/common/compat/domain.c
981index 40a0287..9ddaa38 100644
982--- a/xen/common/compat/domain.c
983+++ b/xen/common/compat/domain.c
984@@ -50,6 +50,10 @@ int compat_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
985 rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, cmp_ctxt);
986 domain_unlock(d);
987
988+ if ( rc == -EAGAIN )
989+ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
990+ cmd, vcpuid, arg);
991+
992 xfree(cmp_ctxt);
993 break;
994 }
995diff --git a/xen/common/domain.c b/xen/common/domain.c
996index c09fb73..89ab922 100644
997--- a/xen/common/domain.c
998+++ b/xen/common/domain.c
999@@ -779,14 +779,18 @@ void domain_unpause_by_systemcontroller(struct domain *d)
1000 domain_unpause(d);
1001 }
1002
1003-void vcpu_reset(struct vcpu *v)
1004+int vcpu_reset(struct vcpu *v)
1005 {
1006 struct domain *d = v->domain;
1007+ int rc;
1008
1009 vcpu_pause(v);
1010 domain_lock(d);
1011
1012- arch_vcpu_reset(v);
1013+ set_bit(_VPF_in_reset, &v->pause_flags);
1014+ rc = arch_vcpu_reset(v);
1015+ if ( rc )
1016+ goto out_unlock;
1017
1018 set_bit(_VPF_down, &v->pause_flags);
1019
1020@@ -802,9 +806,13 @@ void vcpu_reset(struct vcpu *v)
1021 #endif
1022 cpumask_clear(v->cpu_affinity_tmp);
1023 clear_bit(_VPF_blocked, &v->pause_flags);
1024+ clear_bit(_VPF_in_reset, &v->pause_flags);
1025
1026+ out_unlock:
1027 domain_unlock(v->domain);
1028 vcpu_unpause(v);
1029+
1030+ return rc;
1031 }
1032
1033
1034@@ -841,6 +849,11 @@ long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
1035 domain_unlock(d);
1036
1037 free_vcpu_guest_context(ctxt);
1038+
1039+ if ( rc == -EAGAIN )
1040+ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
1041+ cmd, vcpuid, arg);
1042+
1043 break;
1044
1045 case VCPUOP_up: {
1046diff --git a/xen/common/domctl.c b/xen/common/domctl.c
1047index cbc8146..b3bfb38 100644
1048--- a/xen/common/domctl.c
1049+++ b/xen/common/domctl.c
1050@@ -307,8 +307,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
1051
1052 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
1053 {
1054- vcpu_reset(v);
1055- ret = 0;
1056+ ret = vcpu_reset(v);
1057+ if ( ret == -EAGAIN )
1058+ ret = hypercall_create_continuation(
1059+ __HYPERVISOR_domctl, "h", u_domctl);
1060 goto svc_out;
1061 }
1062
1063@@ -337,6 +339,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
1064 domain_pause(d);
1065 ret = arch_set_info_guest(v, c);
1066 domain_unpause(d);
1067+
1068+ if ( ret == -EAGAIN )
1069+ ret = hypercall_create_continuation(
1070+ __HYPERVISOR_domctl, "h", u_domctl);
1071 }
1072
1073 svc_out:
1074diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
1075index aecee68..898f63a 100644
1076--- a/xen/include/asm-x86/domain.h
1077+++ b/xen/include/asm-x86/domain.h
1078@@ -464,6 +464,7 @@ struct arch_vcpu
1079 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
1080 #endif
1081 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
1082+ struct page_info *old_guest_table; /* partially destructed pagetable */
1083 /* guest_table holds a ref to the page, and also a type-count unless
1084 * shadow refcounts are in use */
1085 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
1086diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
1087index ba92568..82cdde6 100644
1088--- a/xen/include/asm-x86/mm.h
1089+++ b/xen/include/asm-x86/mm.h
1090@@ -605,6 +605,7 @@ void audit_domains(void);
1091 int new_guest_cr3(unsigned long pfn);
1092 void make_cr3(struct vcpu *v, unsigned long mfn);
1093 void update_cr3(struct vcpu *v);
1094+int vcpu_destroy_pagetables(struct vcpu *);
1095 void propagate_page_fault(unsigned long addr, u16 error_code);
1096 void *do_page_walk(struct vcpu *v, unsigned long addr);
1097
1098diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
1099index d4ac50f..504a70f 100644
1100--- a/xen/include/xen/domain.h
1101+++ b/xen/include/xen/domain.h
1102@@ -13,7 +13,7 @@ typedef union {
1103 struct vcpu *alloc_vcpu(
1104 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
1105 struct vcpu *alloc_dom0_vcpu0(void);
1106-void vcpu_reset(struct vcpu *v);
1107+int vcpu_reset(struct vcpu *);
1108
1109 struct xen_domctl_getdomaininfo;
1110 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info);
1111@@ -67,7 +67,7 @@ void arch_dump_vcpu_info(struct vcpu *v);
1112
1113 void arch_dump_domain_info(struct domain *d);
1114
1115-void arch_vcpu_reset(struct vcpu *v);
1116+int arch_vcpu_reset(struct vcpu *);
1117
1118 extern spinlock_t vcpu_alloc_lock;
1119 bool_t domctl_lock_acquire(void);
1120diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
1121index b619269..b0715cb 100644
1122--- a/xen/include/xen/sched.h
1123+++ b/xen/include/xen/sched.h
1124@@ -644,6 +644,9 @@ static inline struct domain *next_domain_in_cpupool(
1125 /* VCPU is blocked due to missing mem_sharing ring. */
1126 #define _VPF_mem_sharing 6
1127 #define VPF_mem_sharing (1UL<<_VPF_mem_sharing)
1128+ /* VCPU is being reset. */
1129+#define _VPF_in_reset 7
1130+#define VPF_in_reset (1UL<<_VPF_in_reset)
1131
1132 static inline int vcpu_runnable(struct vcpu *v)
1133 {
diff --git a/main/xen/xsa52-4.2-unstable.patch b/main/xen/xsa52-4.2-unstable.patch
deleted file mode 100644
index 14db8a8a7f..0000000000
--- a/main/xen/xsa52-4.2-unstable.patch
+++ /dev/null
@@ -1,46 +0,0 @@
1x86/xsave: fix information leak on AMD CPUs
2
3Just like for FXSAVE/FXRSTOR, XSAVE/XRSTOR also don't save/restore the
4last instruction and operand pointers as well as the last opcode if
5there's no pending unmasked exception (see CVE-2006-1056 and commit
69747:4d667a139318).
7
8While the FXSR solution sits in the save path, I prefer to have this in
9the restore path because there the handling is simpler (namely in the
10context of the pending changes to properly save the selector values for
1132-bit guest code).
12
13Also this is using FFREE instead of EMMS, as it doesn't seem unlikely
14that in the future we may see CPUs with x87 and SSE/AVX but no MMX
15support. The goal here anyway is just to avoid an FPU stack overflow.
16I would have preferred to use FFREEP instead of FFREE (freeing two
17stack slots at once), but AMD doesn't document that instruction.
18
19This is CVE-2013-2076 / XSA-52.
20
21Signed-off-by: Jan Beulich <jbeulich@suse.com>
22
23--- a/xen/arch/x86/xstate.c
24+++ b/xen/arch/x86/xstate.c
25@@ -78,6 +78,21 @@ void xrstor(struct vcpu *v, uint64_t mas
26
27 struct xsave_struct *ptr = v->arch.xsave_area;
28
29+ /*
30+ * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
31+ * is pending. Clear the x87 state here by setting it to fixed
32+ * values. The hypervisor data segment can be sometimes 0 and
33+ * sometimes new user value. Both should be ok. Use the FPU saved
34+ * data block as a safe address because it should be in L1.
35+ */
36+ if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
37+ !(ptr->fpu_sse.fsw & 0x0080) &&
38+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
39+ asm volatile ( "fnclex\n\t" /* clear exceptions */
40+ "ffree %%st(7)\n\t" /* clear stack tag */
41+ "fildl %0" /* load to clear state */
42+ : : "m" (ptr->fpu_sse) );
43+
44 asm volatile (
45 ".byte " REX_PREFIX "0x0f,0xae,0x2f"
46 :
diff --git a/main/xen/xsa53-4.2.patch b/main/xen/xsa53-4.2.patch
deleted file mode 100644
index eb8e79bed2..0000000000
--- a/main/xen/xsa53-4.2.patch
+++ /dev/null
@@ -1,57 +0,0 @@
1x86/xsave: recover from faults on XRSTOR
2
3Just like FXRSTOR, XRSTOR can raise #GP if bad content is being passed
4to it in the memory block (i.e. aspects not under the control of the
5hypervisor, other than e.g. proper alignment of the block).
6
7Also correct the comment explaining why FXRSTOR needs exception
8recovery code to not wrongly state that this can only be a result of
9the control tools passing a bad image.
10
11This is CVE-2013-2077 / XSA-53.
12
13Signed-off-by: Jan Beulich <jbeulich@suse.com>
14
15--- a/xen/arch/x86/i387.c
16+++ b/xen/arch/x86/i387.c
17@@ -53,7 +53,7 @@ static inline void fpu_fxrstor(struct vc
18 /*
19 * FXRSTOR can fault if passed a corrupted data block. We handle this
20 * possibility, which may occur if the block was passed to us by control
21- * tools, by silently clearing the block.
22+ * tools or through VCPUOP_initialise, by silently clearing the block.
23 */
24 asm volatile (
25 #ifdef __i386__
26--- a/xen/arch/x86/xstate.c
27+++ b/xen/arch/x86/xstate.c
28@@ -93,10 +93,25 @@ void xrstor(struct vcpu *v, uint64_t mas
29 "fildl %0" /* load to clear state */
30 : : "m" (ptr->fpu_sse) );
31
32- asm volatile (
33- ".byte " REX_PREFIX "0x0f,0xae,0x2f"
34- :
35- : "m" (*ptr), "a" (lmask), "d" (hmask), "D"(ptr) );
36+ /*
37+ * XRSTOR can fault if passed a corrupted data block. We handle this
38+ * possibility, which may occur if the block was passed to us by control
39+ * tools or through VCPUOP_initialise, by silently clearing the block.
40+ */
41+ asm volatile ( "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
42+ ".section .fixup,\"ax\"\n"
43+ "2: mov %5,%%ecx \n"
44+ " xor %1,%1 \n"
45+ " rep stosb \n"
46+ " lea %2,%0 \n"
47+ " mov %3,%1 \n"
48+ " jmp 1b \n"
49+ ".previous \n"
50+ _ASM_EXTABLE(1b, 2b)
51+ : "+&D" (ptr), "+&a" (lmask)
52+ : "m" (*ptr), "g" (lmask), "d" (hmask),
53+ "m" (xsave_cntxt_size)
54+ : "ecx" );
55 }
56
57 bool_t xsave_enabled(const struct vcpu *v)
diff --git a/main/xen/xsa54.patch b/main/xen/xsa54.patch
deleted file mode 100644
index 83c8993d6a..0000000000
--- a/main/xen/xsa54.patch
+++ /dev/null
@@ -1,24 +0,0 @@
1x86/xsave: properly check guest input to XSETBV
2
3Other than the HVM emulation path, the PV case so far failed to check
4that YMM state requires SSE state to be enabled, allowing for a #GP to
5occur upon passing the inputs to XSETBV inside the hypervisor.
6
7This is CVE-2013-2078 / XSA-54.
8
9Signed-off-by: Jan Beulich <jbeulich@suse.com>
10
11--- a/xen/arch/x86/traps.c
12+++ b/xen/arch/x86/traps.c
13@@ -2205,6 +2205,11 @@ static int emulate_privileged_op(struct
14 if ( !(new_xfeature & XSTATE_FP) || (new_xfeature & ~xfeature_mask) )
15 goto fail;
16
17+ /* YMM state takes SSE state as prerequisite. */
18+ if ( (xfeature_mask & new_xfeature & XSTATE_YMM) &&
19+ !(new_xfeature & XSTATE_SSE) )
20+ goto fail;
21+
22 v->arch.xcr0 = new_xfeature;
23 v->arch.xcr0_accum |= new_xfeature;
24 set_xcr0(new_xfeature);
diff --git a/main/xen/xsa55.patch b/main/xen/xsa55.patch
deleted file mode 100644
index 35fe7afd06..0000000000
--- a/main/xen/xsa55.patch
+++ /dev/null
@@ -1,3431 +0,0 @@
1diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
2index 876c148..1a5249c 100644
3--- a/tools/libxc/Makefile
4+++ b/tools/libxc/Makefile
5@@ -52,8 +52,13 @@ endif
6 vpath %.c ../../xen/common/libelf
7 CFLAGS += -I../../xen/common/libelf
8
9-GUEST_SRCS-y += libelf-tools.c libelf-loader.c
10-GUEST_SRCS-y += libelf-dominfo.c libelf-relocate.c
11+ELF_SRCS-y += libelf-tools.c libelf-loader.c
12+ELF_SRCS-y += libelf-dominfo.c
13+
14+GUEST_SRCS-y += $(ELF_SRCS-y)
15+
16+$(patsubst %.c,%.o,$(ELF_SRCS-y)): CFLAGS += -Wno-pointer-sign
17+$(patsubst %.c,%.opic,$(ELF_SRCS-y)): CFLAGS += -Wno-pointer-sign
18
19 # new domain builder
20 GUEST_SRCS-y += xc_dom_core.c xc_dom_boot.c
21diff --git a/tools/libxc/ia64/xc_ia64_dom_fwloader.c b/tools/libxc/ia64/xc_ia64_dom_fwloader.c
22index cdf3333..dbd3349 100644
23--- a/tools/libxc/ia64/xc_ia64_dom_fwloader.c
24+++ b/tools/libxc/ia64/xc_ia64_dom_fwloader.c
25@@ -60,6 +60,8 @@ static int xc_dom_load_fw_kernel(struct xc_dom_image *dom)
26 unsigned long i;
27
28 dest = xc_dom_vaddr_to_ptr(dom, dom->kernel_seg.vstart);
29+ if ( dest == NULL )
30+ return -1;
31 memcpy(dest, dom->kernel_blob, FW_SIZE);
32
33 /* Synchronize cache. */
34diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
35index 0882ce6..da435ce 100644
36--- a/tools/libxc/xc_cpuid_x86.c
37+++ b/tools/libxc/xc_cpuid_x86.c
38@@ -589,6 +589,8 @@ static int xc_cpuid_do_domctl(
39 static char *alloc_str(void)
40 {
41 char *s = malloc(33);
42+ if ( s == NULL )
43+ return s;
44 memset(s, 0, 33);
45 return s;
46 }
47@@ -600,6 +602,8 @@ void xc_cpuid_to_str(const unsigned int *regs, char **strs)
48 for ( i = 0; i < 4; i++ )
49 {
50 strs[i] = alloc_str();
51+ if ( strs[i] == NULL )
52+ continue;
53 for ( j = 0; j < 32; j++ )
54 strs[i][j] = !!((regs[i] & (1U << (31 - j)))) ? '1' : '0';
55 }
56@@ -680,7 +684,7 @@ int xc_cpuid_check(
57 const char **config,
58 char **config_transformed)
59 {
60- int i, j;
61+ int i, j, rc;
62 unsigned int regs[4];
63
64 memset(config_transformed, 0, 4 * sizeof(*config_transformed));
65@@ -692,6 +696,11 @@ int xc_cpuid_check(
66 if ( config[i] == NULL )
67 continue;
68 config_transformed[i] = alloc_str();
69+ if ( config_transformed[i] == NULL )
70+ {
71+ rc = -ENOMEM;
72+ goto fail_rc;
73+ }
74 for ( j = 0; j < 32; j++ )
75 {
76 unsigned char val = !!((regs[i] & (1U << (31 - j))));
77@@ -708,12 +717,14 @@ int xc_cpuid_check(
78 return 0;
79
80 fail:
81+ rc = -EPERM;
82+ fail_rc:
83 for ( i = 0; i < 4; i++ )
84 {
85 free(config_transformed[i]);
86 config_transformed[i] = NULL;
87 }
88- return -EPERM;
89+ return rc;
90 }
91
92 /*
93@@ -758,6 +769,11 @@ int xc_cpuid_set(
94 }
95
96 config_transformed[i] = alloc_str();
97+ if ( config_transformed[i] == NULL )
98+ {
99+ rc = -ENOMEM;
100+ goto fail;
101+ }
102
103 for ( j = 0; j < 32; j++ )
104 {
105diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
106index 6a72aa9..d801f66 100644
107--- a/tools/libxc/xc_dom.h
108+++ b/tools/libxc/xc_dom.h
109@@ -140,9 +140,10 @@ struct xc_dom_image {
110
111 struct xc_dom_loader {
112 char *name;
113- int (*probe) (struct xc_dom_image * dom);
114- int (*parser) (struct xc_dom_image * dom);
115- int (*loader) (struct xc_dom_image * dom);
116+ /* Sadly the error returns from these functions are not consistent: */
117+ elf_negerrnoval (*probe) (struct xc_dom_image * dom);
118+ elf_negerrnoval (*parser) (struct xc_dom_image * dom);
119+ elf_errorstatus (*loader) (struct xc_dom_image * dom);
120
121 struct xc_dom_loader *next;
122 };
123@@ -275,27 +276,50 @@ int xc_dom_alloc_segment(struct xc_dom_image *dom,
124
125 void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t first,
126 xen_pfn_t count);
127+void *xc_dom_pfn_to_ptr_retcount(struct xc_dom_image *dom, xen_pfn_t first,
128+ xen_pfn_t count, xen_pfn_t *count_out);
129 void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn);
130 void xc_dom_unmap_all(struct xc_dom_image *dom);
131
132-static inline void *xc_dom_seg_to_ptr(struct xc_dom_image *dom,
133- struct xc_dom_seg *seg)
134+static inline void *xc_dom_seg_to_ptr_pages(struct xc_dom_image *dom,
135+ struct xc_dom_seg *seg,
136+ xen_pfn_t *pages_out)
137 {
138 xen_vaddr_t segsize = seg->vend - seg->vstart;
139 unsigned int page_size = XC_DOM_PAGE_SIZE(dom);
140 xen_pfn_t pages = (segsize + page_size - 1) / page_size;
141+ void *retval;
142+
143+ retval = xc_dom_pfn_to_ptr(dom, seg->pfn, pages);
144
145- return xc_dom_pfn_to_ptr(dom, seg->pfn, pages);
146+ *pages_out = retval ? pages : 0;
147+ return retval;
148+}
149+
150+static inline void *xc_dom_seg_to_ptr(struct xc_dom_image *dom,
151+ struct xc_dom_seg *seg)
152+{
153+ xen_pfn_t dummy;
154+
155+ return xc_dom_seg_to_ptr_pages(dom, seg, &dummy);
156 }
157
158 static inline void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
159- xen_vaddr_t vaddr)
160+ xen_vaddr_t vaddr,
161+ size_t *safe_region_out)
162 {
163 unsigned int page_size = XC_DOM_PAGE_SIZE(dom);
164 xen_pfn_t page = (vaddr - dom->parms.virt_base) / page_size;
165 unsigned int offset = (vaddr - dom->parms.virt_base) % page_size;
166- void *ptr = xc_dom_pfn_to_ptr(dom, page, 0);
167- return (ptr ? (ptr + offset) : NULL);
168+ xen_pfn_t safe_region_count;
169+ void *ptr;
170+
171+ *safe_region_out = 0;
172+ ptr = xc_dom_pfn_to_ptr_retcount(dom, page, 0, &safe_region_count);
173+ if ( ptr == NULL )
174+ return ptr;
175+ *safe_region_out = (safe_region_count << XC_DOM_PAGE_SHIFT(dom)) - offset;
176+ return ptr;
177 }
178
179 static inline int xc_dom_feature_translated(struct xc_dom_image *dom)
180@@ -307,6 +331,8 @@ static inline xen_pfn_t xc_dom_p2m_host(struct xc_dom_image *dom, xen_pfn_t pfn)
181 {
182 if (dom->shadow_enabled)
183 return pfn;
184+ if (pfn >= dom->total_pages)
185+ return INVALID_MFN;
186 return dom->p2m_host[pfn];
187 }
188
189@@ -315,6 +341,8 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct xc_dom_image *dom,
190 {
191 if (xc_dom_feature_translated(dom))
192 return pfn;
193+ if (pfn >= dom->total_pages)
194+ return INVALID_MFN;
195 return dom->p2m_host[pfn];
196 }
197
198diff --git a/tools/libxc/xc_dom_binloader.c b/tools/libxc/xc_dom_binloader.c
199index 769e97d..553b366 100644
200--- a/tools/libxc/xc_dom_binloader.c
201+++ b/tools/libxc/xc_dom_binloader.c
202@@ -123,10 +123,13 @@ static struct xen_bin_image_table *find_table(struct xc_dom_image *dom)
203 uint32_t *probe_ptr;
204 uint32_t *probe_end;
205
206+ if ( dom->kernel_size < sizeof(*table) )
207+ return NULL;
208 probe_ptr = dom->kernel_blob;
209- probe_end = dom->kernel_blob + dom->kernel_size - sizeof(*table);
210- if ( (void*)probe_end > (dom->kernel_blob + 8192) )
211+ if ( dom->kernel_size > (8192 + sizeof(*table)) )
212 probe_end = dom->kernel_blob + 8192;
213+ else
214+ probe_end = dom->kernel_blob + dom->kernel_size - sizeof(*table);
215
216 for ( table = NULL; probe_ptr < probe_end; probe_ptr++ )
217 {
218@@ -249,6 +252,7 @@ static int xc_dom_load_bin_kernel(struct xc_dom_image *dom)
219 char *image = dom->kernel_blob;
220 char *dest;
221 size_t image_size = dom->kernel_size;
222+ size_t dest_size;
223 uint32_t start_addr;
224 uint32_t load_end_addr;
225 uint32_t bss_end_addr;
226@@ -272,7 +276,29 @@ static int xc_dom_load_bin_kernel(struct xc_dom_image *dom)
227 DOMPRINTF(" text_size: 0x%" PRIx32 "", text_size);
228 DOMPRINTF(" bss_size: 0x%" PRIx32 "", bss_size);
229
230- dest = xc_dom_vaddr_to_ptr(dom, dom->kernel_seg.vstart);
231+ dest = xc_dom_vaddr_to_ptr(dom, dom->kernel_seg.vstart, &dest_size);
232+ if ( dest == NULL )
233+ {
234+ DOMPRINTF("%s: xc_dom_vaddr_to_ptr(dom, dom->kernel_seg.vstart)"
235+ " => NULL", __FUNCTION__);
236+ return -EINVAL;
237+ }
238+
239+ if ( dest_size < text_size ||
240+ dest_size - text_size < bss_size )
241+ {
242+ DOMPRINTF("%s: mapped region is too small for image", __FUNCTION__);
243+ return -EINVAL;
244+ }
245+
246+ if ( image_size < skip ||
247+ image_size - skip < text_size )
248+ {
249+ DOMPRINTF("%s: image is too small for declared text size",
250+ __FUNCTION__);
251+ return -EINVAL;
252+ }
253+
254 memcpy(dest, image + skip, text_size);
255 memset(dest + text_size, 0, bss_size);
256
257diff --git a/tools/libxc/xc_dom_core.c b/tools/libxc/xc_dom_core.c
258index 2a01d7c..e79e38d 100644
259--- a/tools/libxc/xc_dom_core.c
260+++ b/tools/libxc/xc_dom_core.c
261@@ -120,9 +120,17 @@ void *xc_dom_malloc(struct xc_dom_image *dom, size_t size)
262 {
263 struct xc_dom_mem *block;
264
265+ if ( size > SIZE_MAX - sizeof(*block) )
266+ {
267+ DOMPRINTF("%s: unreasonable allocation size", __FUNCTION__);
268+ return NULL;
269+ }
270 block = malloc(sizeof(*block) + size);
271 if ( block == NULL )
272+ {
273+ DOMPRINTF("%s: allocation failed", __FUNCTION__);
274 return NULL;
275+ }
276 memset(block, 0, sizeof(*block) + size);
277 block->next = dom->memblocks;
278 dom->memblocks = block;
279@@ -138,7 +146,10 @@ void *xc_dom_malloc_page_aligned(struct xc_dom_image *dom, size_t size)
280
281 block = malloc(sizeof(*block));
282 if ( block == NULL )
283+ {
284+ DOMPRINTF("%s: allocation failed", __FUNCTION__);
285 return NULL;
286+ }
287 memset(block, 0, sizeof(*block));
288 block->mmap_len = size;
289 block->mmap_ptr = mmap(NULL, block->mmap_len,
290@@ -146,6 +157,7 @@ void *xc_dom_malloc_page_aligned(struct xc_dom_image *dom, size_t size)
291 -1, 0);
292 if ( block->mmap_ptr == MAP_FAILED )
293 {
294+ DOMPRINTF("%s: mmap failed", __FUNCTION__);
295 free(block);
296 return NULL;
297 }
298@@ -202,6 +214,7 @@ void *xc_dom_malloc_filemap(struct xc_dom_image *dom,
299 close(fd);
300 if ( block != NULL )
301 free(block);
302+ DOMPRINTF("%s: failed (on file `%s')", __FUNCTION__, filename);
303 return NULL;
304 }
305
306@@ -271,6 +284,11 @@ size_t xc_dom_check_gzip(xc_interface *xch, void *blob, size_t ziplen)
307 unsigned char *gzlen;
308 size_t unziplen;
309
310+ if ( ziplen < 6 )
311+ /* Too small. We need (i.e. the subsequent code relies on)
312+ * 2 bytes for the magic number plus 4 bytes length. */
313+ return 0;
314+
315 if ( strncmp(blob, "\037\213", 2) )
316 /* not gzipped */
317 return 0;
318@@ -351,10 +369,19 @@ int xc_dom_try_gunzip(struct xc_dom_image *dom, void **blob, size_t * size)
319 void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t pfn,
320 xen_pfn_t count)
321 {
322+ xen_pfn_t count_out_dummy;
323+ return xc_dom_pfn_to_ptr_retcount(dom, pfn, count, &count_out_dummy);
324+}
325+
326+void *xc_dom_pfn_to_ptr_retcount(struct xc_dom_image *dom, xen_pfn_t pfn,
327+ xen_pfn_t count, xen_pfn_t *count_out)
328+{
329 struct xc_dom_phys *phys;
330 unsigned int page_shift = XC_DOM_PAGE_SHIFT(dom);
331 char *mode = "unset";
332
333+ *count_out = 0;
334+
335 if ( pfn > dom->total_pages || /* multiple checks to avoid overflows */
336 count > dom->total_pages ||
337 pfn > dom->total_pages - count )
338@@ -384,6 +411,7 @@ void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t pfn,
339 phys->count);
340 return NULL;
341 }
342+ *count_out = count;
343 }
344 else
345 {
346@@ -391,6 +419,9 @@ void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t pfn,
347 just hand out a pointer to it */
348 if ( pfn < phys->first )
349 continue;
350+ if ( pfn >= phys->first + phys->count )
351+ continue;
352+ *count_out = phys->count - (pfn - phys->first);
353 }
354 return phys->ptr + ((pfn - phys->first) << page_shift);
355 }
356@@ -478,7 +509,8 @@ int xc_dom_alloc_segment(struct xc_dom_image *dom,
357 seg->vstart = start;
358 seg->pfn = (seg->vstart - dom->parms.virt_base) / page_size;
359
360- if ( pages > dom->total_pages || /* double test avoids overflow probs */
361+ if ( pages > dom->total_pages || /* multiple test avoids overflow probs */
362+ seg->pfn > dom->total_pages ||
363 pages > dom->total_pages - seg->pfn)
364 {
365 xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY,
366@@ -855,6 +887,12 @@ int xc_dom_build_image(struct xc_dom_image *dom)
367 ramdisklen) != 0 )
368 goto err;
369 ramdiskmap = xc_dom_seg_to_ptr(dom, &dom->ramdisk_seg);
370+ if ( ramdiskmap == NULL )
371+ {
372+ DOMPRINTF("%s: xc_dom_seg_to_ptr(dom, &dom->ramdisk_seg) => NULL",
373+ __FUNCTION__);
374+ goto err;
375+ }
376 if ( unziplen )
377 {
378 if ( xc_dom_do_gunzip(dom->xch,
379diff --git a/tools/libxc/xc_dom_elfloader.c b/tools/libxc/xc_dom_elfloader.c
380index 2e69559..be58276 100644
381--- a/tools/libxc/xc_dom_elfloader.c
382+++ b/tools/libxc/xc_dom_elfloader.c
383@@ -28,13 +28,14 @@
384
385 #include "xg_private.h"
386 #include "xc_dom.h"
387+#include "xc_bitops.h"
388
389 #define XEN_VER "xen-3.0"
390
391 /* ------------------------------------------------------------------------ */
392
393 static void log_callback(struct elf_binary *elf, void *caller_data,
394- int iserr, const char *fmt, va_list al) {
395+ bool iserr, const char *fmt, va_list al) {
396 xc_interface *xch = caller_data;
397
398 xc_reportv(xch,
399@@ -46,7 +47,7 @@ static void log_callback(struct elf_binary *elf, void *caller_data,
400
401 void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
402 int verbose) {
403- elf_set_log(elf, log_callback, xch, verbose);
404+ elf_set_log(elf, log_callback, xch, verbose /* convert to bool */);
405 }
406
407 /* ------------------------------------------------------------------------ */
408@@ -84,7 +85,7 @@ static char *xc_dom_guest_type(struct xc_dom_image *dom,
409 /* ------------------------------------------------------------------------ */
410 /* parse elf binary */
411
412-static int check_elf_kernel(struct xc_dom_image *dom, int verbose)
413+static elf_negerrnoval check_elf_kernel(struct xc_dom_image *dom, bool verbose)
414 {
415 if ( dom->kernel_blob == NULL )
416 {
417@@ -95,7 +96,7 @@ static int check_elf_kernel(struct xc_dom_image *dom, int verbose)
418 return -EINVAL;
419 }
420
421- if ( !elf_is_elfbinary(dom->kernel_blob) )
422+ if ( !elf_is_elfbinary(dom->kernel_blob, dom->kernel_size) )
423 {
424 if ( verbose )
425 xc_dom_panic(dom->xch,
426@@ -106,20 +107,21 @@ static int check_elf_kernel(struct xc_dom_image *dom, int verbose)
427 return 0;
428 }
429
430-static int xc_dom_probe_elf_kernel(struct xc_dom_image *dom)
431+static elf_negerrnoval xc_dom_probe_elf_kernel(struct xc_dom_image *dom)
432 {
433 return check_elf_kernel(dom, 0);
434 }
435
436-static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
437- struct elf_binary *elf, int load)
438+static elf_errorstatus xc_dom_load_elf_symtab(struct xc_dom_image *dom,
439+ struct elf_binary *elf, bool load)
440 {
441 struct elf_binary syms;
442- const elf_shdr *shdr, *shdr2;
443+ ELF_HANDLE_DECL(elf_shdr) shdr; ELF_HANDLE_DECL(elf_shdr) shdr2;
444 xen_vaddr_t symtab, maxaddr;
445- char *hdr;
446+ elf_ptrval hdr;
447 size_t size;
448- int h, count, type, i, tables = 0;
449+ unsigned h, count, type, i, tables = 0;
450+ unsigned long *strtab_referenced = NULL;
451
452 if ( elf_swap(elf) )
453 {
454@@ -130,31 +132,48 @@ static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
455
456 if ( load )
457 {
458+ char *hdr_ptr;
459+ size_t allow_size;
460+
461 if ( !dom->bsd_symtab_start )
462 return 0;
463 size = dom->kernel_seg.vend - dom->bsd_symtab_start;
464- hdr = xc_dom_vaddr_to_ptr(dom, dom->bsd_symtab_start);
465- *(int *)hdr = size - sizeof(int);
466+ hdr_ptr = xc_dom_vaddr_to_ptr(dom, dom->bsd_symtab_start, &allow_size);
467+ if ( hdr_ptr == NULL )
468+ {
469+ DOMPRINTF("%s/load: xc_dom_vaddr_to_ptr(dom,dom->bsd_symtab_start"
470+ " => NULL", __FUNCTION__);
471+ return -1;
472+ }
473+ elf->caller_xdest_base = hdr_ptr;
474+ elf->caller_xdest_size = allow_size;
475+ hdr = ELF_REALPTR2PTRVAL(hdr_ptr);
476+ elf_store_val(elf, unsigned, hdr, size - sizeof(unsigned));
477 }
478 else
479 {
480- size = sizeof(int) + elf_size(elf, elf->ehdr) +
481+ char *hdr_ptr;
482+
483+ size = sizeof(unsigned) + elf_size(elf, elf->ehdr) +
484 elf_shdr_count(elf) * elf_size(elf, shdr);
485- hdr = xc_dom_malloc(dom, size);
486- if ( hdr == NULL )
487+ hdr_ptr = xc_dom_malloc(dom, size);
488+ if ( hdr_ptr == NULL )
489 return 0;
490- dom->bsd_symtab_start = elf_round_up(&syms, dom->kernel_seg.vend);
491+ elf->caller_xdest_base = hdr_ptr;
492+ elf->caller_xdest_size = size;
493+ hdr = ELF_REALPTR2PTRVAL(hdr_ptr);
494+ dom->bsd_symtab_start = elf_round_up(elf, dom->kernel_seg.vend);
495 }
496
497- memcpy(hdr + sizeof(int),
498- elf->image,
499+ elf_memcpy_safe(elf, hdr + sizeof(unsigned),
500+ ELF_IMAGE_BASE(elf),
501 elf_size(elf, elf->ehdr));
502- memcpy(hdr + sizeof(int) + elf_size(elf, elf->ehdr),
503- elf->image + elf_uval(elf, elf->ehdr, e_shoff),
504+ elf_memcpy_safe(elf, hdr + sizeof(unsigned) + elf_size(elf, elf->ehdr),
505+ ELF_IMAGE_BASE(elf) + elf_uval(elf, elf->ehdr, e_shoff),
506 elf_shdr_count(elf) * elf_size(elf, shdr));
507 if ( elf_64bit(elf) )
508 {
509- Elf64_Ehdr *ehdr = (Elf64_Ehdr *)(hdr + sizeof(int));
510+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)(hdr + sizeof(unsigned));
511 ehdr->e_phoff = 0;
512 ehdr->e_phentsize = 0;
513 ehdr->e_phnum = 0;
514@@ -163,19 +182,42 @@ static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
515 }
516 else
517 {
518- Elf32_Ehdr *ehdr = (Elf32_Ehdr *)(hdr + sizeof(int));
519+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)(hdr + sizeof(unsigned));
520 ehdr->e_phoff = 0;
521 ehdr->e_phentsize = 0;
522 ehdr->e_phnum = 0;
523 ehdr->e_shoff = elf_size(elf, elf->ehdr);
524 ehdr->e_shstrndx = SHN_UNDEF;
525 }
526- if ( elf_init(&syms, hdr + sizeof(int), size - sizeof(int)) )
527+ if ( elf->caller_xdest_size < sizeof(unsigned) )
528+ {
529+ DOMPRINTF("%s/%s: header size %"PRIx64" too small",
530+ __FUNCTION__, load ? "load" : "parse",
531+ (uint64_t)elf->caller_xdest_size);
532+ return -1;
533+ }
534+ if ( elf_init(&syms, elf->caller_xdest_base + sizeof(unsigned),
535+ elf->caller_xdest_size - sizeof(unsigned)) )
536 return -1;
537
538+ /*
539+ * The caller_xdest_{base,size} and dest_{base,size} need to
540+ * remain valid so long as each struct elf_image does. The
541+ * principle we adopt is that these values are set when the
542+ * memory is allocated or mapped, and cleared when (and if)
543+ * they are unmapped.
544+ *
545+ * Mappings of the guest are normally undone by xc_dom_unmap_all
546+ * (directly or via xc_dom_release). We do not explicitly clear
547+ * these because in fact that happens only at the end of
548+ * xc_dom_boot_image, at which time all of these ELF loading
549+ * functions have returned. No relevant struct elf_binary*
550+ * escapes this file.
551+ */
552+
553 xc_elf_set_logfile(dom->xch, &syms, 1);
554
555- symtab = dom->bsd_symtab_start + sizeof(int);
556+ symtab = dom->bsd_symtab_start + sizeof(unsigned);
557 maxaddr = elf_round_up(&syms, symtab + elf_size(&syms, syms.ehdr) +
558 elf_shdr_count(&syms) * elf_size(&syms, shdr));
559
560@@ -186,27 +228,40 @@ static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
561 symtab, maxaddr);
562
563 count = elf_shdr_count(&syms);
564+ /* elf_shdr_count guarantees that count is reasonable */
565+
566+ strtab_referenced = xc_dom_malloc(dom, bitmap_size(count));
567+ if ( strtab_referenced == NULL )
568+ return -1;
569+ bitmap_clear(strtab_referenced, count);
570+ /* Note the symtabs @h linked to by any strtab @i. */
571+ for ( i = 0; i < count; i++ )
572+ {
573+ shdr2 = elf_shdr_by_index(&syms, i);
574+ if ( elf_uval(&syms, shdr2, sh_type) == SHT_SYMTAB )
575+ {
576+ h = elf_uval(&syms, shdr2, sh_link);
577+ if (h < count)
578+ set_bit(h, strtab_referenced);
579+ }
580+ }
581+
582 for ( h = 0; h < count; h++ )
583 {
584 shdr = elf_shdr_by_index(&syms, h);
585+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(shdr), 1) )
586+ /* input has an insane section header count field */
587+ break;
588 type = elf_uval(&syms, shdr, sh_type);
589 if ( type == SHT_STRTAB )
590 {
591- /* Look for a strtab @i linked to symtab @h. */
592- for ( i = 0; i < count; i++ )
593- {
594- shdr2 = elf_shdr_by_index(&syms, i);
595- if ( (elf_uval(&syms, shdr2, sh_type) == SHT_SYMTAB) &&
596- (elf_uval(&syms, shdr2, sh_link) == h) )
597- break;
598- }
599 /* Skip symtab @h if we found no corresponding strtab @i. */
600- if ( i == count )
601+ if ( !test_bit(h, strtab_referenced) )
602 {
603 if ( elf_64bit(&syms) )
604- *(Elf64_Off*)(&shdr->e64.sh_offset) = 0;
605+ elf_store_field(elf, shdr, e64.sh_offset, 0);
606 else
607- *(Elf32_Off*)(&shdr->e32.sh_offset) = 0;
608+ elf_store_field(elf, shdr, e32.sh_offset, 0);
609 continue;
610 }
611 }
612@@ -215,13 +270,13 @@ static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
613 {
614 /* Mangled to be based on ELF header location. */
615 if ( elf_64bit(&syms) )
616- *(Elf64_Off*)(&shdr->e64.sh_offset) = maxaddr - symtab;
617+ elf_store_field(elf, shdr, e64.sh_offset, maxaddr - symtab);
618 else
619- *(Elf32_Off*)(&shdr->e32.sh_offset) = maxaddr - symtab;
620+ elf_store_field(elf, shdr, e32.sh_offset, maxaddr - symtab);
621 size = elf_uval(&syms, shdr, sh_size);
622 maxaddr = elf_round_up(&syms, maxaddr + size);
623 tables++;
624- DOMPRINTF("%s: h=%d %s, size=0x%zx, maxaddr=0x%" PRIx64 "",
625+ DOMPRINTF("%s: h=%u %s, size=0x%zx, maxaddr=0x%" PRIx64 "",
626 __FUNCTION__, h,
627 type == SHT_SYMTAB ? "symtab" : "strtab",
628 size, maxaddr);
629@@ -229,7 +284,7 @@ static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
630 if ( load )
631 {
632 shdr2 = elf_shdr_by_index(elf, h);
633- memcpy((void*)elf_section_start(&syms, shdr),
634+ elf_memcpy_safe(elf, elf_section_start(&syms, shdr),
635 elf_section_start(elf, shdr2),
636 size);
637 }
638@@ -237,11 +292,18 @@ static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
639
640 /* Name is NULL. */
641 if ( elf_64bit(&syms) )
642- *(Elf64_Word*)(&shdr->e64.sh_name) = 0;
643+ elf_store_field(elf, shdr, e64.sh_name, 0);
644 else
645- *(Elf32_Word*)(&shdr->e32.sh_name) = 0;
646+ elf_store_field(elf, shdr, e32.sh_name, 0);
647 }
648
649+ if ( elf_check_broken(&syms) )
650+ DOMPRINTF("%s: symbols ELF broken: %s", __FUNCTION__,
651+ elf_check_broken(&syms));
652+ if ( elf_check_broken(elf) )
653+ DOMPRINTF("%s: ELF broken: %s", __FUNCTION__,
654+ elf_check_broken(elf));
655+
656 if ( tables == 0 )
657 {
658 DOMPRINTF("%s: no symbol table present", __FUNCTION__);
659@@ -253,16 +315,22 @@ static int xc_dom_load_elf_symtab(struct xc_dom_image *dom,
660 return 0;
661 }
662
663-static int xc_dom_parse_elf_kernel(struct xc_dom_image *dom)
664+static elf_errorstatus xc_dom_parse_elf_kernel(struct xc_dom_image *dom)
665+ /*
666+ * This function sometimes returns -1 for error and sometimes
667+ * an errno value. ?!?!
668+ */
669 {
670 struct elf_binary *elf;
671- int rc;
672+ elf_errorstatus rc;
673
674 rc = check_elf_kernel(dom, 1);
675 if ( rc != 0 )
676 return rc;
677
678 elf = xc_dom_malloc(dom, sizeof(*elf));
679+ if ( elf == NULL )
680+ return -1;
681 dom->private_loader = elf;
682 rc = elf_init(elf, dom->kernel_blob, dom->kernel_size);
683 xc_elf_set_logfile(dom->xch, elf, 1);
684@@ -274,23 +342,27 @@ static int xc_dom_parse_elf_kernel(struct xc_dom_image *dom)
685 }
686
687 /* Find the section-header strings table. */
688- if ( elf->sec_strtab == NULL )
689+ if ( ELF_PTRVAL_INVALID(elf->sec_strtab) )
690 {
691 xc_dom_panic(dom->xch, XC_INVALID_KERNEL, "%s: ELF image"
692 " has no shstrtab", __FUNCTION__);
693- return -EINVAL;
694+ rc = -EINVAL;
695+ goto out;
696 }
697
698 /* parse binary and get xen meta info */
699 elf_parse_binary(elf);
700 if ( (rc = elf_xen_parse(elf, &dom->parms)) != 0 )
701- return rc;
702+ {
703+ goto out;
704+ }
705
706 if ( elf_xen_feature_get(XENFEAT_dom0, dom->parms.f_required) )
707 {
708 xc_dom_panic(dom->xch, XC_INVALID_KERNEL, "%s: Kernel does not"
709 " support unprivileged (DomU) operation", __FUNCTION__);
710- return -EINVAL;
711+ rc = -EINVAL;
712+ goto out;
713 }
714
715 /* find kernel segment */
716@@ -304,15 +376,30 @@ static int xc_dom_parse_elf_kernel(struct xc_dom_image *dom)
717 DOMPRINTF("%s: %s: 0x%" PRIx64 " -> 0x%" PRIx64 "",
718 __FUNCTION__, dom->guest_type,
719 dom->kernel_seg.vstart, dom->kernel_seg.vend);
720- return 0;
721+ rc = 0;
722+out:
723+ if ( elf_check_broken(elf) )
724+ DOMPRINTF("%s: ELF broken: %s", __FUNCTION__,
725+ elf_check_broken(elf));
726+
727+ return rc;
728 }
729
730-static int xc_dom_load_elf_kernel(struct xc_dom_image *dom)
731+static elf_errorstatus xc_dom_load_elf_kernel(struct xc_dom_image *dom)
732 {
733 struct elf_binary *elf = dom->private_loader;
734- int rc;
735+ elf_errorstatus rc;
736+ xen_pfn_t pages;
737+
738+ elf->dest_base = xc_dom_seg_to_ptr_pages(dom, &dom->kernel_seg, &pages);
739+ if ( elf->dest_base == NULL )
740+ {
741+ DOMPRINTF("%s: xc_dom_vaddr_to_ptr(dom,dom->kernel_seg)"
742+ " => NULL", __FUNCTION__);
743+ return -1;
744+ }
745+ elf->dest_size = pages * XC_DOM_PAGE_SIZE(dom);
746
747- elf->dest = xc_dom_seg_to_ptr(dom, &dom->kernel_seg);
748 rc = elf_load_binary(elf);
749 if ( rc < 0 )
750 {
751diff --git a/tools/libxc/xc_dom_ia64.c b/tools/libxc/xc_dom_ia64.c
752index dcd1523..076821c 100644
753--- a/tools/libxc/xc_dom_ia64.c
754+++ b/tools/libxc/xc_dom_ia64.c
755@@ -60,6 +60,12 @@ int start_info_ia64(struct xc_dom_image *dom)
756
757 DOMPRINTF_CALLED(dom->xch);
758
759+ if ( start_info == NULL )
760+ {
761+ DOMPRINTF("%s: xc_dom_pfn_to_ptr failed on start_info", __FUNCTION__);
762+ return -1; /* our caller throws away our return value :-/ */
763+ }
764+
765 memset(start_info, 0, sizeof(*start_info));
766 sprintf(start_info->magic, dom->guest_type);
767 start_info->flags = dom->flags;
768@@ -182,6 +188,12 @@ int arch_setup_meminit(struct xc_dom_image *dom)
769
770 /* setup initial p2m */
771 dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * nbr);
772+ if ( dom->p2m_host == NULL )
773+ {
774+ DOMPRINTF("%s: xc_dom_malloc failed for p2m_host",
775+ __FUNCTION__);
776+ return -1;
777+ }
778 for ( pfn = 0; pfn < nbr; pfn++ )
779 dom->p2m_host[pfn] = start + pfn;
780
781diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
782index 0cf1687..448d9a1 100644
783--- a/tools/libxc/xc_dom_x86.c
784+++ b/tools/libxc/xc_dom_x86.c
785@@ -144,6 +144,9 @@ static int setup_pgtables_x86_32(struct xc_dom_image *dom)
786 xen_vaddr_t addr;
787 xen_pfn_t pgpfn;
788
789+ if ( l2tab == NULL )
790+ goto pfn_error;
791+
792 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
793 addr += PAGE_SIZE_X86 )
794 {
795@@ -151,6 +154,8 @@ static int setup_pgtables_x86_32(struct xc_dom_image *dom)
796 {
797 /* get L1 tab, make L2 entry */
798 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
799+ if ( l1tab == NULL )
800+ goto pfn_error;
801 l2off = l2_table_offset_i386(addr);
802 l2tab[l2off] =
803 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
804@@ -169,6 +174,11 @@ static int setup_pgtables_x86_32(struct xc_dom_image *dom)
805 l1tab = NULL;
806 }
807 return 0;
808+
809+pfn_error:
810+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
811+ "%s: xc_dom_pfn_to_ptr failed", __FUNCTION__);
812+ return -EINVAL;
813 }
814
815 /*
816@@ -219,6 +229,12 @@ static xen_pfn_t move_l3_below_4G(struct xc_dom_image *dom,
817 goto out;
818
819 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
820+ if ( l3tab == NULL )
821+ {
822+ DOMPRINTF("%s: xc_dom_pfn_to_ptr(dom, l3pfn, 1) => NULL",
823+ __FUNCTION__);
824+ return l3mfn; /* our one call site will call xc_dom_panic and fail */
825+ }
826 memset(l3tab, 0, XC_DOM_PAGE_SIZE(dom));
827
828 DOMPRINTF("%s: successfully relocated L3 below 4G. "
829@@ -262,6 +278,8 @@ static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
830 }
831
832 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
833+ if ( l3tab == NULL )
834+ goto pfn_error;
835
836 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
837 addr += PAGE_SIZE_X86 )
838@@ -270,6 +288,8 @@ static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
839 {
840 /* get L2 tab, make L3 entry */
841 l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
842+ if ( l2tab == NULL )
843+ goto pfn_error;
844 l3off = l3_table_offset_pae(addr);
845 l3tab[l3off] =
846 pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
847@@ -280,6 +300,8 @@ static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
848 {
849 /* get L1 tab, make L2 entry */
850 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
851+ if ( l1tab == NULL )
852+ goto pfn_error;
853 l2off = l2_table_offset_pae(addr);
854 l2tab[l2off] =
855 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
856@@ -306,6 +328,11 @@ static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
857 l3tab[3] = pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
858 }
859 return 0;
860+
861+pfn_error:
862+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
863+ "%s: xc_dom_pfn_to_ptr failed", __FUNCTION__);
864+ return -EINVAL;
865 }
866
867 #undef L1_PROT
868@@ -344,6 +371,9 @@ static int setup_pgtables_x86_64(struct xc_dom_image *dom)
869 uint64_t addr;
870 xen_pfn_t pgpfn;
871
872+ if ( l4tab == NULL )
873+ goto pfn_error;
874+
875 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
876 addr += PAGE_SIZE_X86 )
877 {
878@@ -351,6 +381,8 @@ static int setup_pgtables_x86_64(struct xc_dom_image *dom)
879 {
880 /* get L3 tab, make L4 entry */
881 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
882+ if ( l3tab == NULL )
883+ goto pfn_error;
884 l4off = l4_table_offset_x86_64(addr);
885 l4tab[l4off] =
886 pfn_to_paddr(xc_dom_p2m_guest(dom, l3pfn)) | L4_PROT;
887@@ -361,6 +393,8 @@ static int setup_pgtables_x86_64(struct xc_dom_image *dom)
888 {
889 /* get L2 tab, make L3 entry */
890 l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
891+ if ( l2tab == NULL )
892+ goto pfn_error;
893 l3off = l3_table_offset_x86_64(addr);
894 l3tab[l3off] =
895 pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
896@@ -373,6 +407,8 @@ static int setup_pgtables_x86_64(struct xc_dom_image *dom)
897 {
898 /* get L1 tab, make L2 entry */
899 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
900+ if ( l1tab == NULL )
901+ goto pfn_error;
902 l2off = l2_table_offset_x86_64(addr);
903 l2tab[l2off] =
904 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
905@@ -393,6 +429,11 @@ static int setup_pgtables_x86_64(struct xc_dom_image *dom)
906 l1tab = NULL;
907 }
908 return 0;
909+
910+pfn_error:
911+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
912+ "%s: xc_dom_pfn_to_ptr failed", __FUNCTION__);
913+ return -EINVAL;
914 }
915
916 #undef L1_PROT
917@@ -410,6 +451,8 @@ static int alloc_magic_pages(struct xc_dom_image *dom)
918 if ( xc_dom_alloc_segment(dom, &dom->p2m_seg, "phys2mach", 0, p2m_size) )
919 return -1;
920 dom->p2m_guest = xc_dom_seg_to_ptr(dom, &dom->p2m_seg);
921+ if ( dom->p2m_guest == NULL )
922+ return -1;
923
924 /* allocate special pages */
925 dom->start_info_pfn = xc_dom_alloc_page(dom, "start info");
926@@ -434,6 +477,12 @@ static int start_info_x86_32(struct xc_dom_image *dom)
927
928 DOMPRINTF_CALLED(dom->xch);
929
930+ if ( start_info == NULL )
931+ {
932+ DOMPRINTF("%s: xc_dom_pfn_to_ptr failed on start_info", __FUNCTION__);
933+ return -1; /* our caller throws away our return value :-/ */
934+ }
935+
936 memset(start_info, 0, sizeof(*start_info));
937 strncpy(start_info->magic, dom->guest_type, sizeof(start_info->magic));
938 start_info->magic[sizeof(start_info->magic) - 1] = '\0';
939@@ -474,6 +523,12 @@ static int start_info_x86_64(struct xc_dom_image *dom)
940
941 DOMPRINTF_CALLED(dom->xch);
942
943+ if ( start_info == NULL )
944+ {
945+ DOMPRINTF("%s: xc_dom_pfn_to_ptr failed on start_info", __FUNCTION__);
946+ return -1; /* our caller throws away our return value :-/ */
947+ }
948+
949 memset(start_info, 0, sizeof(*start_info));
950 strncpy(start_info->magic, dom->guest_type, sizeof(start_info->magic));
951 start_info->magic[sizeof(start_info->magic) - 1] = '\0';
952@@ -725,6 +780,9 @@ int arch_setup_meminit(struct xc_dom_image *dom)
953 }
954
955 dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->total_pages);
956+ if ( dom->p2m_host == NULL )
957+ return -EINVAL;
958+
959 if ( dom->superpages )
960 {
961 int count = dom->total_pages >> SUPERPAGE_PFN_SHIFT;
962diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
963index b4c0b10..f9ed6b2 100644
964--- a/tools/libxc/xc_domain_restore.c
965+++ b/tools/libxc/xc_domain_restore.c
966@@ -1180,6 +1180,11 @@ static int apply_batch(xc_interface *xch, uint32_t dom, struct restore_ctx *ctx,
967
968 /* Map relevant mfns */
969 pfn_err = calloc(j, sizeof(*pfn_err));
970+ if ( pfn_err == NULL )
971+ {
972+ PERROR("allocation for pfn_err failed");
973+ return -1;
974+ }
975 region_base = xc_map_foreign_bulk(
976 xch, dom, PROT_WRITE, region_mfn, pfn_err, j);
977
978@@ -1556,6 +1561,12 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
979 mfn = ctx->p2m[pfn];
980 buf = xc_map_foreign_range(xch, dom, PAGE_SIZE,
981 PROT_READ | PROT_WRITE, mfn);
982+ if ( buf == NULL )
983+ {
984+ ERROR("xc_map_foreign_range for generation id"
985+ " buffer failed");
986+ goto out;
987+ }
988
989 generationid = *(unsigned long long *)(buf + offset);
990 *(unsigned long long *)(buf + offset) = generationid + 1;
991@@ -1713,6 +1724,11 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
992 l3tab = (uint64_t *)
993 xc_map_foreign_range(xch, dom, PAGE_SIZE,
994 PROT_READ, ctx->p2m[i]);
995+ if ( l3tab == NULL )
996+ {
997+ PERROR("xc_map_foreign_range failed (for l3tab)");
998+ goto out;
999+ }
1000
1001 for ( j = 0; j < 4; j++ )
1002 l3ptes[j] = l3tab[j];
1003@@ -1739,6 +1755,11 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
1004 l3tab = (uint64_t *)
1005 xc_map_foreign_range(xch, dom, PAGE_SIZE,
1006 PROT_READ | PROT_WRITE, ctx->p2m[i]);
1007+ if ( l3tab == NULL )
1008+ {
1009+ PERROR("xc_map_foreign_range failed (for l3tab, 2nd)");
1010+ goto out;
1011+ }
1012
1013 for ( j = 0; j < 4; j++ )
1014 l3tab[j] = l3ptes[j];
1015@@ -1909,6 +1930,12 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
1016 SET_FIELD(ctxt, user_regs.edx, mfn);
1017 start_info = xc_map_foreign_range(
1018 xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, mfn);
1019+ if ( start_info == NULL )
1020+ {
1021+ PERROR("xc_map_foreign_range failed (for start_info)");
1022+ goto out;
1023+ }
1024+
1025 SET_FIELD(start_info, nr_pages, dinfo->p2m_size);
1026 SET_FIELD(start_info, shared_info, shared_info_frame<<PAGE_SHIFT);
1027 SET_FIELD(start_info, flags, 0);
1028@@ -2056,6 +2083,11 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
1029 /* Restore contents of shared-info page. No checking needed. */
1030 new_shared_info = xc_map_foreign_range(
1031 xch, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
1032+ if ( new_shared_info == NULL )
1033+ {
1034+ PERROR("xc_map_foreign_range failed (for new_shared_info)");
1035+ goto out;
1036+ }
1037
1038 /* restore saved vcpu_info and arch specific info */
1039 MEMCPY_FIELD(new_shared_info, old_shared_info, vcpu_info);
1040diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
1041index cf5d7fb..8165287 100644
1042--- a/tools/libxc/xc_hvm_build_x86.c
1043+++ b/tools/libxc/xc_hvm_build_x86.c
1044@@ -104,21 +104,23 @@ static int loadelfimage(
1045 for ( i = 0; i < pages; i++ )
1046 entries[i].mfn = parray[(elf->pstart >> PAGE_SHIFT) + i];
1047
1048- elf->dest = xc_map_foreign_ranges(
1049+ elf->dest_base = xc_map_foreign_ranges(
1050 xch, dom, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE, 1 << PAGE_SHIFT,
1051 entries, pages);
1052- if ( elf->dest == NULL )
1053+ if ( elf->dest_base == NULL )
1054 goto err;
1055+ elf->dest_size = pages * PAGE_SIZE;
1056
1057- elf->dest += elf->pstart & (PAGE_SIZE - 1);
1058+ ELF_ADVANCE_DEST(elf, elf->pstart & (PAGE_SIZE - 1));
1059
1060 /* Load the initial elf image. */
1061 rc = elf_load_binary(elf);
1062 if ( rc < 0 )
1063 PERROR("Failed to load elf binary\n");
1064
1065- munmap(elf->dest, pages << PAGE_SHIFT);
1066- elf->dest = NULL;
1067+ munmap(elf->dest_base, pages << PAGE_SHIFT);
1068+ elf->dest_base = NULL;
1069+ elf->dest_size = 0;
1070
1071 err:
1072 free(entries);
1073@@ -401,11 +403,16 @@ static int setup_guest(xc_interface *xch,
1074 munmap(page0, PAGE_SIZE);
1075 }
1076
1077+ if ( elf_check_broken(&elf) )
1078+ ERROR("HVM ELF broken: %s", elf_check_broken(&elf));
1079+
1080 free(page_array);
1081 return 0;
1082
1083 error_out:
1084 free(page_array);
1085+ if ( elf_check_broken(&elf) )
1086+ ERROR("HVM ELF broken, failing: %s", elf_check_broken(&elf));
1087 return -1;
1088 }
1089
1090diff --git a/tools/libxc/xc_linux_osdep.c b/tools/libxc/xc_linux_osdep.c
1091index 787e742..98e041c 100644
1092--- a/tools/libxc/xc_linux_osdep.c
1093+++ b/tools/libxc/xc_linux_osdep.c
1094@@ -378,6 +378,8 @@ static void *linux_privcmd_map_foreign_range(xc_interface *xch, xc_osdep_handle
1095
1096 num = (size + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
1097 arr = calloc(num, sizeof(xen_pfn_t));
1098+ if ( arr == NULL )
1099+ return NULL;
1100
1101 for ( i = 0; i < num; i++ )
1102 arr[i] = mfn + i;
1103@@ -402,6 +404,8 @@ static void *linux_privcmd_map_foreign_ranges(xc_interface *xch, xc_osdep_handle
1104 num_per_entry = chunksize >> XC_PAGE_SHIFT;
1105 num = num_per_entry * nentries;
1106 arr = calloc(num, sizeof(xen_pfn_t));
1107+ if ( arr == NULL )
1108+ return NULL;
1109
1110 for ( i = 0; i < nentries; i++ )
1111 for ( j = 0; j < num_per_entry; j++ )
1112diff --git a/tools/libxc/xc_offline_page.c b/tools/libxc/xc_offline_page.c
1113index 089a361..36b9812 100644
1114--- a/tools/libxc/xc_offline_page.c
1115+++ b/tools/libxc/xc_offline_page.c
1116@@ -714,6 +714,11 @@ int xc_exchange_page(xc_interface *xch, int domid, xen_pfn_t mfn)
1117
1118 new_p = xc_map_foreign_range(xch, domid, PAGE_SIZE,
1119 PROT_READ|PROT_WRITE, new_mfn);
1120+ if ( new_p == NULL )
1121+ {
1122+ ERROR("failed to map new_p for copy, guest may be broken?");
1123+ goto failed;
1124+ }
1125 memcpy(new_p, backup, PAGE_SIZE);
1126 munmap(new_p, PAGE_SIZE);
1127 mops.arg1.mfn = new_mfn;
1128diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c
1129index 3e03a91..848ceed 100644
1130--- a/tools/libxc/xc_private.c
1131+++ b/tools/libxc/xc_private.c
1132@@ -771,6 +771,8 @@ const char *xc_strerror(xc_interface *xch, int errcode)
1133 errbuf = pthread_getspecific(errbuf_pkey);
1134 if (errbuf == NULL) {
1135 errbuf = malloc(XS_BUFSIZE);
1136+ if ( errbuf == NULL )
1137+ return "(failed to allocate errbuf)";
1138 pthread_setspecific(errbuf_pkey, errbuf);
1139 }
1140
1141diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
1142index b7741ca..8952048 100644
1143--- a/tools/libxc/xenctrl.h
1144+++ b/tools/libxc/xenctrl.h
1145@@ -1778,7 +1778,7 @@ int xc_cpuid_set(xc_interface *xch,
1146 int xc_cpuid_apply_policy(xc_interface *xch,
1147 domid_t domid);
1148 void xc_cpuid_to_str(const unsigned int *regs,
1149- char **strs);
1150+ char **strs); /* some strs[] may be NULL if ENOMEM */
1151 int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
1152 #endif
1153
1154diff --git a/tools/xcutils/readnotes.c b/tools/xcutils/readnotes.c
1155index c926186..5fa445e 100644
1156--- a/tools/xcutils/readnotes.c
1157+++ b/tools/xcutils/readnotes.c
1158@@ -61,51 +61,56 @@ struct setup_header {
1159 } __attribute__((packed));
1160
1161 static void print_string_note(const char *prefix, struct elf_binary *elf,
1162- const elf_note *note)
1163+ ELF_HANDLE_DECL(elf_note) note)
1164 {
1165- printf("%s: %s\n", prefix, (char*)elf_note_desc(elf, note));
1166+ printf("%s: %s\n", prefix, elf_strfmt(elf, elf_note_desc(elf, note)));
1167 }
1168
1169 static void print_numeric_note(const char *prefix, struct elf_binary *elf,
1170- const elf_note *note)
1171+ ELF_HANDLE_DECL(elf_note) note)
1172 {
1173 uint64_t value = elf_note_numeric(elf, note);
1174- int descsz = elf_uval(elf, note, descsz);
1175+ unsigned descsz = elf_uval(elf, note, descsz);
1176
1177 printf("%s: %#*" PRIx64 " (%d bytes)\n",
1178 prefix, 2+2*descsz, value, descsz);
1179 }
1180
1181 static void print_l1_mfn_valid_note(const char *prefix, struct elf_binary *elf,
1182- const elf_note *note)
1183+ ELF_HANDLE_DECL(elf_note) note)
1184 {
1185- int descsz = elf_uval(elf, note, descsz);
1186- const uint32_t *desc32 = elf_note_desc(elf, note);
1187- const uint64_t *desc64 = elf_note_desc(elf, note);
1188+ unsigned descsz = elf_uval(elf, note, descsz);
1189+ elf_ptrval desc = elf_note_desc(elf, note);
1190
1191 /* XXX should be able to cope with a list of values. */
1192 switch ( descsz / 2 )
1193 {
1194 case 8:
1195 printf("%s: mask=%#"PRIx64" value=%#"PRIx64"\n", prefix,
1196- desc64[0], desc64[1]);
1197+ elf_access_unsigned(elf, desc, 0, 8),
1198+ elf_access_unsigned(elf, desc, 8, 8));
1199 break;
1200 case 4:
1201 printf("%s: mask=%#"PRIx32" value=%#"PRIx32"\n", prefix,
1202- desc32[0],desc32[1]);
1203+ (uint32_t)elf_access_unsigned(elf, desc, 0, 4),
1204+ (uint32_t)elf_access_unsigned(elf, desc, 4, 4));
1205 break;
1206 }
1207
1208 }
1209
1210-static int print_notes(struct elf_binary *elf, const elf_note *start, const elf_note *end)
1211+static unsigned print_notes(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) start, ELF_HANDLE_DECL(elf_note) end)
1212 {
1213- const elf_note *note;
1214- int notes_found = 0;
1215+ ELF_HANDLE_DECL(elf_note) note;
1216+ unsigned notes_found = 0;
1217+ const char *this_note_name;
1218
1219- for ( note = start; note < end; note = elf_note_next(elf, note) )
1220+ for ( note = start; ELF_HANDLE_PTRVAL(note) < ELF_HANDLE_PTRVAL(end); note = elf_note_next(elf, note) )
1221 {
1222- if (0 != strcmp(elf_note_name(elf, note), "Xen"))
1223+ this_note_name = elf_note_name(elf, note);
1224+ if (NULL == this_note_name)
1225+ continue;
1226+ if (0 != strcmp(this_note_name, "Xen"))
1227 continue;
1228
1229 notes_found++;
1230@@ -156,7 +161,7 @@ static int print_notes(struct elf_binary *elf, const elf_note *start, const elf_
1231 break;
1232 default:
1233 printf("unknown note type %#x\n",
1234- (int)elf_uval(elf, note, type));
1235+ (unsigned)elf_uval(elf, note, type));
1236 break;
1237 }
1238 }
1239@@ -166,12 +171,13 @@ static int print_notes(struct elf_binary *elf, const elf_note *start, const elf_
1240 int main(int argc, char **argv)
1241 {
1242 const char *f;
1243- int fd,h,size,usize,count;
1244+ int fd;
1245+ unsigned h,size,usize,count;
1246 void *image,*tmp;
1247 struct stat st;
1248 struct elf_binary elf;
1249- const elf_shdr *shdr;
1250- int notes_found = 0;
1251+ ELF_HANDLE_DECL(elf_shdr) shdr;
1252+ unsigned notes_found = 0;
1253
1254 struct setup_header *hdr;
1255 uint64_t payload_offset, payload_length;
1256@@ -257,7 +263,7 @@ int main(int argc, char **argv)
1257 count = elf_phdr_count(&elf);
1258 for ( h=0; h < count; h++)
1259 {
1260- const elf_phdr *phdr;
1261+ ELF_HANDLE_DECL(elf_phdr) phdr;
1262 phdr = elf_phdr_by_index(&elf, h);
1263 if (elf_uval(&elf, phdr, p_type) != PT_NOTE)
1264 continue;
1265@@ -269,8 +275,8 @@ int main(int argc, char **argv)
1266 continue;
1267
1268 notes_found = print_notes(&elf,
1269- elf_segment_start(&elf, phdr),
1270- elf_segment_end(&elf, phdr));
1271+ ELF_MAKE_HANDLE(elf_note, elf_segment_start(&elf, phdr)),
1272+ ELF_MAKE_HANDLE(elf_note, elf_segment_end(&elf, phdr)));
1273 }
1274
1275 if ( notes_found == 0 )
1276@@ -278,13 +284,13 @@ int main(int argc, char **argv)
1277 count = elf_shdr_count(&elf);
1278 for ( h=0; h < count; h++)
1279 {
1280- const elf_shdr *shdr;
1281+ ELF_HANDLE_DECL(elf_shdr) shdr;
1282 shdr = elf_shdr_by_index(&elf, h);
1283 if (elf_uval(&elf, shdr, sh_type) != SHT_NOTE)
1284 continue;
1285 notes_found = print_notes(&elf,
1286- elf_section_start(&elf, shdr),
1287- elf_section_end(&elf, shdr));
1288+ ELF_MAKE_HANDLE(elf_note, elf_section_start(&elf, shdr)),
1289+ ELF_MAKE_HANDLE(elf_note, elf_section_end(&elf, shdr)));
1290 if ( notes_found )
1291 fprintf(stderr, "using notes from SHT_NOTE section\n");
1292
1293@@ -292,8 +298,12 @@ int main(int argc, char **argv)
1294 }
1295
1296 shdr = elf_shdr_by_name(&elf, "__xen_guest");
1297- if (shdr)
1298- printf("__xen_guest: %s\n", (char*)elf_section_start(&elf, shdr));
1299+ if (ELF_HANDLE_VALID(shdr))
1300+ printf("__xen_guest: %s\n",
1301+ elf_strfmt(&elf, elf_section_start(&elf, shdr)));
1302+
1303+ if (elf_check_broken(&elf))
1304+ printf("warning: broken ELF: %s\n", elf_check_broken(&elf));
1305
1306 return 0;
1307 }
1308diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
1309index 2d56130..dec0519 100644
1310--- a/xen/arch/arm/kernel.c
1311+++ b/xen/arch/arm/kernel.c
1312@@ -146,6 +146,8 @@ static int kernel_try_elf_prepare(struct kernel_info *info)
1313 {
1314 int rc;
1315
1316+ memset(&info->elf.elf, 0, sizeof(info->elf.elf));
1317+
1318 info->kernel_order = get_order_from_bytes(KERNEL_FLASH_SIZE);
1319 info->kernel_img = alloc_xenheap_pages(info->kernel_order, 0);
1320 if ( info->kernel_img == NULL )
1321@@ -160,7 +162,7 @@ static int kernel_try_elf_prepare(struct kernel_info *info)
1322 #endif
1323 elf_parse_binary(&info->elf.elf);
1324 if ( (rc = elf_xen_parse(&info->elf.elf, &info->elf.parms)) != 0 )
1325- return rc;
1326+ goto err;
1327
1328 /*
1329 * TODO: can the ELF header be used to find the physical address
1330@@ -169,7 +171,18 @@ static int kernel_try_elf_prepare(struct kernel_info *info)
1331 info->entry = info->elf.parms.virt_entry;
1332 info->load = kernel_elf_load;
1333
1334+ if ( elf_check_broken(&info->elf.elf) )
1335+ printk("Xen: warning: ELF kernel broken: %s\n",
1336+ elf_check_broken(&info->elf.elf));
1337+
1338 return 0;
1339+
1340+err:
1341+ if ( elf_check_broken(&info->elf.elf) )
1342+ printk("Xen: ELF kernel broken: %s\n",
1343+ elf_check_broken(&info->elf.elf));
1344+
1345+ return rc;
1346 }
1347
1348 int kernel_prepare(struct kernel_info *info)
1349diff --git a/xen/arch/x86/bzimage.c b/xen/arch/x86/bzimage.c
1350index 5adc223..3600dca 100644
1351--- a/xen/arch/x86/bzimage.c
1352+++ b/xen/arch/x86/bzimage.c
1353@@ -220,7 +220,7 @@ unsigned long __init bzimage_headroom(char *image_start,
1354 image_length = hdr->payload_length;
1355 }
1356
1357- if ( elf_is_elfbinary(image_start) )
1358+ if ( elf_is_elfbinary(image_start, image_length) )
1359 return 0;
1360
1361 orig_image_len = image_length;
1362@@ -251,7 +251,7 @@ int __init bzimage_parse(char *image_base, char **image_start, unsigned long *im
1363 *image_len = hdr->payload_length;
1364 }
1365
1366- if ( elf_is_elfbinary(*image_start) )
1367+ if ( elf_is_elfbinary(*image_start, *image_len) )
1368 return 0;
1369
1370 BUG_ON(!(image_base < *image_start));
1371diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
1372index 469d363..0dbec96 100644
1373--- a/xen/arch/x86/domain_build.c
1374+++ b/xen/arch/x86/domain_build.c
1375@@ -374,7 +374,7 @@ int __init construct_dom0(
1376 #endif
1377 elf_parse_binary(&elf);
1378 if ( (rc = elf_xen_parse(&elf, &parms)) != 0 )
1379- return rc;
1380+ goto out;
1381
1382 /* compatibility check */
1383 compatible = 0;
1384@@ -413,14 +413,16 @@ int __init construct_dom0(
1385 if ( !compatible )
1386 {
1387 printk("Mismatch between Xen and DOM0 kernel\n");
1388- return -EINVAL;
1389+ rc = -EINVAL;
1390+ goto out;
1391 }
1392
1393 if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE &&
1394 !test_bit(XENFEAT_dom0, parms.f_supported) )
1395 {
1396 printk("Kernel does not support Dom0 operation\n");
1397- return -EINVAL;
1398+ rc = -EINVAL;
1399+ goto out;
1400 }
1401
1402 #if defined(__x86_64__)
1403@@ -734,7 +736,8 @@ int __init construct_dom0(
1404 (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) )
1405 {
1406 printk("DOM0 image overlaps with Xen private area.\n");
1407- return -EINVAL;
1408+ rc = -EINVAL;
1409+ goto out;
1410 }
1411
1412 if ( is_pv_32on64_domain(d) )
1413@@ -908,12 +911,13 @@ int __init construct_dom0(
1414 write_ptbase(v);
1415
1416 /* Copy the OS image and free temporary buffer. */
1417- elf.dest = (void*)vkern_start;
1418+ elf.dest_base = (void*)vkern_start;
1419+ elf.dest_size = vkern_end - vkern_start;
1420 rc = elf_load_binary(&elf);
1421 if ( rc < 0 )
1422 {
1423 printk("Failed to load the kernel binary\n");
1424- return rc;
1425+ goto out;
1426 }
1427 bootstrap_map(NULL);
1428
1429@@ -924,7 +928,8 @@ int __init construct_dom0(
1430 {
1431 write_ptbase(current);
1432 printk("Invalid HYPERCALL_PAGE field in ELF notes.\n");
1433- return -1;
1434+ rc = -1;
1435+ goto out;
1436 }
1437 hypercall_page_initialise(
1438 d, (void *)(unsigned long)parms.virt_hypercall);
1439@@ -1271,9 +1276,19 @@ int __init construct_dom0(
1440
1441 BUG_ON(rc != 0);
1442
1443- iommu_dom0_init(dom0);
1444+ if ( elf_check_broken(&elf) )
1445+ printk(" Xen warning: dom0 kernel broken ELF: %s\n",
1446+ elf_check_broken(&elf));
1447
1448+ iommu_dom0_init(dom0);
1449 return 0;
1450+
1451+out:
1452+ if ( elf_check_broken(&elf) )
1453+ printk(" Xen dom0 kernel broken ELF: %s\n",
1454+ elf_check_broken(&elf));
1455+
1456+ return rc;
1457 }
1458
1459 /*
1460diff --git a/xen/common/libelf/Makefile b/xen/common/libelf/Makefile
1461index 18dc8e2..5bf8f76 100644
1462--- a/xen/common/libelf/Makefile
1463+++ b/xen/common/libelf/Makefile
1464@@ -2,6 +2,8 @@ obj-bin-y := libelf.o
1465
1466 SECTIONS := text data $(SPECIAL_DATA_SECTIONS)
1467
1468+CFLAGS += -Wno-pointer-sign
1469+
1470 libelf.o: libelf-temp.o Makefile
1471 $(OBJCOPY) $(foreach s,$(SECTIONS),--rename-section .$(s)=.init.$(s)) $< $@
1472
1473diff --git a/xen/common/libelf/libelf-dominfo.c b/xen/common/libelf/libelf-dominfo.c
1474index 523837f..412ea70 100644
1475--- a/xen/common/libelf/libelf-dominfo.c
1476+++ b/xen/common/libelf/libelf-dominfo.c
1477@@ -29,22 +29,22 @@ static const char *const elf_xen_feature_names[] = {
1478 [XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb",
1479 [XENFEAT_dom0] = "dom0"
1480 };
1481-static const int elf_xen_features =
1482+static const unsigned elf_xen_features =
1483 sizeof(elf_xen_feature_names) / sizeof(elf_xen_feature_names[0]);
1484
1485-int elf_xen_parse_features(const char *features,
1486+elf_errorstatus elf_xen_parse_features(const char *features,
1487 uint32_t *supported,
1488 uint32_t *required)
1489 {
1490- char feature[64];
1491- int pos, len, i;
1492+ unsigned char feature[64];
1493+ unsigned pos, len, i;
1494
1495 if ( features == NULL )
1496 return 0;
1497
1498 for ( pos = 0; features[pos] != '\0'; pos += len )
1499 {
1500- memset(feature, 0, sizeof(feature));
1501+ elf_memset_unchecked(feature, 0, sizeof(feature));
1502 for ( len = 0;; len++ )
1503 {
1504 if ( len >= sizeof(feature)-1 )
1505@@ -94,14 +94,14 @@ int elf_xen_parse_features(const char *features,
1506 /* ------------------------------------------------------------------------ */
1507 /* xen elf notes */
1508
1509-int elf_xen_parse_note(struct elf_binary *elf,
1510+elf_errorstatus elf_xen_parse_note(struct elf_binary *elf,
1511 struct elf_dom_parms *parms,
1512- const elf_note *note)
1513+ ELF_HANDLE_DECL(elf_note) note)
1514 {
1515 /* *INDENT-OFF* */
1516 static const struct {
1517 char *name;
1518- int str;
1519+ bool str;
1520 } note_desc[] = {
1521 [XEN_ELFNOTE_ENTRY] = { "ENTRY", 0},
1522 [XEN_ELFNOTE_HYPERCALL_PAGE] = { "HYPERCALL_PAGE", 0},
1523@@ -125,7 +125,7 @@ int elf_xen_parse_note(struct elf_binary *elf,
1524 const char *str = NULL;
1525 uint64_t val = 0;
1526 unsigned int i;
1527- int type = elf_uval(elf, note, type);
1528+ unsigned type = elf_uval(elf, note, type);
1529
1530 if ( (type >= sizeof(note_desc) / sizeof(note_desc[0])) ||
1531 (note_desc[type].name == NULL) )
1532@@ -137,7 +137,10 @@ int elf_xen_parse_note(struct elf_binary *elf,
1533
1534 if ( note_desc[type].str )
1535 {
1536- str = elf_note_desc(elf, note);
1537+ str = elf_strval(elf, elf_note_desc(elf, note));
1538+ if (str == NULL)
1539+ /* elf_strval will mark elf broken if it fails so no need to log */
1540+ return 0;
1541 elf_msg(elf, "%s: %s = \"%s\"\n", __FUNCTION__,
1542 note_desc[type].name, str);
1543 parms->elf_notes[type].type = XEN_ENT_STR;
1544@@ -213,23 +216,37 @@ int elf_xen_parse_note(struct elf_binary *elf,
1545 return 0;
1546 }
1547
1548-static int elf_xen_parse_notes(struct elf_binary *elf,
1549+#define ELF_NOTE_INVALID (~0U)
1550+
1551+static unsigned elf_xen_parse_notes(struct elf_binary *elf,
1552 struct elf_dom_parms *parms,
1553- const void *start, const void *end)
1554+ elf_ptrval start,
1555+ elf_ptrval end,
1556+ unsigned *total_note_count)
1557 {
1558- int xen_elfnotes = 0;
1559- const elf_note *note;
1560+ unsigned xen_elfnotes = 0;
1561+ ELF_HANDLE_DECL(elf_note) note;
1562+ const char *note_name;
1563
1564 parms->elf_note_start = start;
1565 parms->elf_note_end = end;
1566- for ( note = parms->elf_note_start;
1567- (void *)note < parms->elf_note_end;
1568+ for ( note = ELF_MAKE_HANDLE(elf_note, parms->elf_note_start);
1569+ ELF_HANDLE_PTRVAL(note) < parms->elf_note_end;
1570 note = elf_note_next(elf, note) )
1571 {
1572- if ( strcmp(elf_note_name(elf, note), "Xen") )
1573+ if ( *total_note_count >= ELF_MAX_TOTAL_NOTE_COUNT )
1574+ {
1575+ elf_mark_broken(elf, "too many ELF notes");
1576+ break;
1577+ }
1578+ (*total_note_count)++;
1579+ note_name = elf_note_name(elf, note);
1580+ if ( note_name == NULL )
1581+ continue;
1582+ if ( strcmp(note_name, "Xen") )
1583 continue;
1584 if ( elf_xen_parse_note(elf, parms, note) )
1585- return -1;
1586+ return ELF_NOTE_INVALID;
1587 xen_elfnotes++;
1588 }
1589 return xen_elfnotes;
1590@@ -238,48 +255,49 @@ static int elf_xen_parse_notes(struct elf_binary *elf,
1591 /* ------------------------------------------------------------------------ */
1592 /* __xen_guest section */
1593
1594-int elf_xen_parse_guest_info(struct elf_binary *elf,
1595+elf_errorstatus elf_xen_parse_guest_info(struct elf_binary *elf,
1596 struct elf_dom_parms *parms)
1597 {
1598- const char *h;
1599- char name[32], value[128];
1600- int len;
1601+ elf_ptrval h;
1602+ unsigned char name[32], value[128];
1603+ unsigned len;
1604
1605 h = parms->guest_info;
1606- while ( *h )
1607+#define STAR(h) (elf_access_unsigned(elf, (h), 0, 1))
1608+ while ( STAR(h) )
1609 {
1610- memset(name, 0, sizeof(name));
1611- memset(value, 0, sizeof(value));
1612+ elf_memset_unchecked(name, 0, sizeof(name));
1613+ elf_memset_unchecked(value, 0, sizeof(value));
1614 for ( len = 0;; len++, h++ )
1615 {
1616 if ( len >= sizeof(name)-1 )
1617 break;
1618- if ( *h == '\0' )
1619+ if ( STAR(h) == '\0' )
1620 break;
1621- if ( *h == ',' )
1622+ if ( STAR(h) == ',' )
1623 {
1624 h++;
1625 break;
1626 }
1627- if ( *h == '=' )
1628+ if ( STAR(h) == '=' )
1629 {
1630 h++;
1631 for ( len = 0;; len++, h++ )
1632 {
1633 if ( len >= sizeof(value)-1 )
1634 break;
1635- if ( *h == '\0' )
1636+ if ( STAR(h) == '\0' )
1637 break;
1638- if ( *h == ',' )
1639+ if ( STAR(h) == ',' )
1640 {
1641 h++;
1642 break;
1643 }
1644- value[len] = *h;
1645+ value[len] = STAR(h);
1646 }
1647 break;
1648 }
1649- name[len] = *h;
1650+ name[len] = STAR(h);
1651 }
1652 elf_msg(elf, "%s: %s=\"%s\"\n", __FUNCTION__, name, value);
1653
1654@@ -325,12 +343,13 @@ int elf_xen_parse_guest_info(struct elf_binary *elf,
1655 /* ------------------------------------------------------------------------ */
1656 /* sanity checks */
1657
1658-static int elf_xen_note_check(struct elf_binary *elf,
1659+static elf_errorstatus elf_xen_note_check(struct elf_binary *elf,
1660 struct elf_dom_parms *parms)
1661 {
1662- if ( (parms->elf_note_start == NULL) && (parms->guest_info == NULL) )
1663+ if ( (ELF_PTRVAL_INVALID(parms->elf_note_start)) &&
1664+ (ELF_PTRVAL_INVALID(parms->guest_info)) )
1665 {
1666- int machine = elf_uval(elf, elf->ehdr, e_machine);
1667+ unsigned machine = elf_uval(elf, elf->ehdr, e_machine);
1668 if ( (machine == EM_386) || (machine == EM_X86_64) )
1669 {
1670 elf_err(elf, "%s: ERROR: Not a Xen-ELF image: "
1671@@ -368,7 +387,7 @@ static int elf_xen_note_check(struct elf_binary *elf,
1672 return 0;
1673 }
1674
1675-static int elf_xen_addr_calc_check(struct elf_binary *elf,
1676+static elf_errorstatus elf_xen_addr_calc_check(struct elf_binary *elf,
1677 struct elf_dom_parms *parms)
1678 {
1679 if ( (parms->elf_paddr_offset != UNSET_ADDR) &&
1680@@ -454,15 +473,16 @@ static int elf_xen_addr_calc_check(struct elf_binary *elf,
1681 /* ------------------------------------------------------------------------ */
1682 /* glue it all together ... */
1683
1684-int elf_xen_parse(struct elf_binary *elf,
1685+elf_errorstatus elf_xen_parse(struct elf_binary *elf,
1686 struct elf_dom_parms *parms)
1687 {
1688- const elf_shdr *shdr;
1689- const elf_phdr *phdr;
1690- int xen_elfnotes = 0;
1691- int i, count, rc;
1692+ ELF_HANDLE_DECL(elf_shdr) shdr;
1693+ ELF_HANDLE_DECL(elf_phdr) phdr;
1694+ unsigned xen_elfnotes = 0;
1695+ unsigned i, count, more_notes;
1696+ unsigned total_note_count = 0;
1697
1698- memset(parms, 0, sizeof(*parms));
1699+ elf_memset_unchecked(parms, 0, sizeof(*parms));
1700 parms->virt_base = UNSET_ADDR;
1701 parms->virt_entry = UNSET_ADDR;
1702 parms->virt_hypercall = UNSET_ADDR;
1703@@ -475,6 +495,9 @@ int elf_xen_parse(struct elf_binary *elf,
1704 for ( i = 0; i < count; i++ )
1705 {
1706 phdr = elf_phdr_by_index(elf, i);
1707+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(phdr), 1) )
1708+ /* input has an insane program header count field */
1709+ break;
1710 if ( elf_uval(elf, phdr, p_type) != PT_NOTE )
1711 continue;
1712
1713@@ -485,13 +508,14 @@ int elf_xen_parse(struct elf_binary *elf,
1714 if (elf_uval(elf, phdr, p_offset) == 0)
1715 continue;
1716
1717- rc = elf_xen_parse_notes(elf, parms,
1718+ more_notes = elf_xen_parse_notes(elf, parms,
1719 elf_segment_start(elf, phdr),
1720- elf_segment_end(elf, phdr));
1721- if ( rc == -1 )
1722+ elf_segment_end(elf, phdr),
1723+ &total_note_count);
1724+ if ( more_notes == ELF_NOTE_INVALID )
1725 return -1;
1726
1727- xen_elfnotes += rc;
1728+ xen_elfnotes += more_notes;
1729 }
1730
1731 /*
1732@@ -504,21 +528,25 @@ int elf_xen_parse(struct elf_binary *elf,
1733 for ( i = 0; i < count; i++ )
1734 {
1735 shdr = elf_shdr_by_index(elf, i);
1736+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(shdr), 1) )
1737+ /* input has an insane section header count field */
1738+ break;
1739
1740 if ( elf_uval(elf, shdr, sh_type) != SHT_NOTE )
1741 continue;
1742
1743- rc = elf_xen_parse_notes(elf, parms,
1744+ more_notes = elf_xen_parse_notes(elf, parms,
1745 elf_section_start(elf, shdr),
1746- elf_section_end(elf, shdr));
1747+ elf_section_end(elf, shdr),
1748+ &total_note_count);
1749
1750- if ( rc == -1 )
1751+ if ( more_notes == ELF_NOTE_INVALID )
1752 return -1;
1753
1754- if ( xen_elfnotes == 0 && rc > 0 )
1755+ if ( xen_elfnotes == 0 && more_notes > 0 )
1756 elf_msg(elf, "%s: using notes from SHT_NOTE section\n", __FUNCTION__);
1757
1758- xen_elfnotes += rc;
1759+ xen_elfnotes += more_notes;
1760 }
1761
1762 }
1763@@ -528,20 +556,15 @@ int elf_xen_parse(struct elf_binary *elf,
1764 */
1765 if ( xen_elfnotes == 0 )
1766 {
1767- count = elf_shdr_count(elf);
1768- for ( i = 0; i < count; i++ )
1769+ shdr = elf_shdr_by_name(elf, "__xen_guest");
1770+ if ( ELF_HANDLE_VALID(shdr) )
1771 {
1772- shdr = elf_shdr_by_name(elf, "__xen_guest");
1773- if ( shdr )
1774- {
1775- parms->guest_info = elf_section_start(elf, shdr);
1776- parms->elf_note_start = NULL;
1777- parms->elf_note_end = NULL;
1778- elf_msg(elf, "%s: __xen_guest: \"%s\"\n", __FUNCTION__,
1779- parms->guest_info);
1780- elf_xen_parse_guest_info(elf, parms);
1781- break;
1782- }
1783+ parms->guest_info = elf_section_start(elf, shdr);
1784+ parms->elf_note_start = ELF_INVALID_PTRVAL;
1785+ parms->elf_note_end = ELF_INVALID_PTRVAL;
1786+ elf_msg(elf, "%s: __xen_guest: \"%s\"\n", __FUNCTION__,
1787+ elf_strfmt(elf, parms->guest_info));
1788+ elf_xen_parse_guest_info(elf, parms);
1789 }
1790 }
1791
1792diff --git a/xen/common/libelf/libelf-loader.c b/xen/common/libelf/libelf-loader.c
1793index ab58b8b..e2e75af 100644
1794--- a/xen/common/libelf/libelf-loader.c
1795+++ b/xen/common/libelf/libelf-loader.c
1796@@ -16,27 +16,33 @@
1797 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
1798 */
1799
1800+#ifdef __XEN__
1801+#include <asm/guest_access.h>
1802+#endif
1803+
1804 #include "libelf-private.h"
1805
1806 /* ------------------------------------------------------------------------ */
1807
1808-int elf_init(struct elf_binary *elf, const char *image, size_t size)
1809+elf_errorstatus elf_init(struct elf_binary *elf, const char *image_input, size_t size)
1810 {
1811- const elf_shdr *shdr;
1812+ ELF_HANDLE_DECL(elf_shdr) shdr;
1813 uint64_t i, count, section, offset;
1814
1815- if ( !elf_is_elfbinary(image) )
1816+ if ( !elf_is_elfbinary(image_input, size) )
1817 {
1818 elf_err(elf, "%s: not an ELF binary\n", __FUNCTION__);
1819 return -1;
1820 }
1821
1822- memset(elf, 0, sizeof(*elf));
1823- elf->image = image;
1824+ elf_memset_unchecked(elf, 0, sizeof(*elf));
1825+ elf->image_base = image_input;
1826 elf->size = size;
1827- elf->ehdr = (elf_ehdr *)image;
1828- elf->class = elf->ehdr->e32.e_ident[EI_CLASS];
1829- elf->data = elf->ehdr->e32.e_ident[EI_DATA];
1830+ elf->ehdr = ELF_MAKE_HANDLE(elf_ehdr, (elf_ptrval)image_input);
1831+ elf->class = elf_uval_3264(elf, elf->ehdr, e32.e_ident[EI_CLASS]);
1832+ elf->data = elf_uval_3264(elf, elf->ehdr, e32.e_ident[EI_DATA]);
1833+ elf->caller_xdest_base = NULL;
1834+ elf->caller_xdest_size = 0;
1835
1836 /* Sanity check phdr. */
1837 offset = elf_uval(elf, elf->ehdr, e_phoff) +
1838@@ -61,7 +67,7 @@ int elf_init(struct elf_binary *elf, const char *image, size_t size)
1839 /* Find section string table. */
1840 section = elf_uval(elf, elf->ehdr, e_shstrndx);
1841 shdr = elf_shdr_by_index(elf, section);
1842- if ( shdr != NULL )
1843+ if ( ELF_HANDLE_VALID(shdr) )
1844 elf->sec_strtab = elf_section_start(elf, shdr);
1845
1846 /* Find symbol table and symbol string table. */
1847@@ -69,13 +75,16 @@ int elf_init(struct elf_binary *elf, const char *image, size_t size)
1848 for ( i = 0; i < count; i++ )
1849 {
1850 shdr = elf_shdr_by_index(elf, i);
1851+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(shdr), 1) )
1852+ /* input has an insane section header count field */
1853+ break;
1854 if ( elf_uval(elf, shdr, sh_type) != SHT_SYMTAB )
1855 continue;
1856 elf->sym_tab = shdr;
1857 shdr = elf_shdr_by_index(elf, elf_uval(elf, shdr, sh_link));
1858- if ( shdr == NULL )
1859+ if ( !ELF_HANDLE_VALID(shdr) )
1860 {
1861- elf->sym_tab = NULL;
1862+ elf->sym_tab = ELF_INVALID_HANDLE(elf_shdr);
1863 continue;
1864 }
1865 elf->sym_strtab = elf_section_start(elf, shdr);
1866@@ -86,7 +95,7 @@ int elf_init(struct elf_binary *elf, const char *image, size_t size)
1867 }
1868
1869 #ifndef __XEN__
1870-void elf_call_log_callback(struct elf_binary *elf, int iserr,
1871+void elf_call_log_callback(struct elf_binary *elf, bool iserr,
1872 const char *fmt,...) {
1873 va_list al;
1874
1875@@ -101,36 +110,39 @@ void elf_call_log_callback(struct elf_binary *elf, int iserr,
1876 }
1877
1878 void elf_set_log(struct elf_binary *elf, elf_log_callback *log_callback,
1879- void *log_caller_data, int verbose)
1880+ void *log_caller_data, bool verbose)
1881 {
1882 elf->log_callback = log_callback;
1883 elf->log_caller_data = log_caller_data;
1884 elf->verbose = verbose;
1885 }
1886
1887-static int elf_load_image(void *dst, const void *src, uint64_t filesz, uint64_t memsz)
1888+static elf_errorstatus elf_load_image(struct elf_binary *elf,
1889+ elf_ptrval dst, elf_ptrval src,
1890+ uint64_t filesz, uint64_t memsz)
1891 {
1892- memcpy(dst, src, filesz);
1893- memset(dst + filesz, 0, memsz - filesz);
1894+ elf_memcpy_safe(elf, dst, src, filesz);
1895+ elf_memset_safe(elf, dst + filesz, 0, memsz - filesz);
1896 return 0;
1897 }
1898 #else
1899-#include <asm/guest_access.h>
1900
1901 void elf_set_verbose(struct elf_binary *elf)
1902 {
1903 elf->verbose = 1;
1904 }
1905
1906-static int elf_load_image(void *dst, const void *src, uint64_t filesz, uint64_t memsz)
1907+static elf_errorstatus elf_load_image(struct elf_binary *elf, elf_ptrval dst, elf_ptrval src, uint64_t filesz, uint64_t memsz)
1908 {
1909- int rc;
1910+ elf_errorstatus rc;
1911 if ( filesz > ULONG_MAX || memsz > ULONG_MAX )
1912 return -1;
1913- rc = raw_copy_to_guest(dst, src, filesz);
1914+ /* We trust the dom0 kernel image completely, so we don't care
1915+ * about overruns etc. here. */
1916+ rc = raw_copy_to_guest(ELF_UNSAFE_PTR(dst), ELF_UNSAFE_PTR(src), filesz);
1917 if ( rc != 0 )
1918 return -1;
1919- rc = raw_clear_guest(dst + filesz, memsz - filesz);
1920+ rc = raw_clear_guest(ELF_UNSAFE_PTR(dst + filesz), memsz - filesz);
1921 if ( rc != 0 )
1922 return -1;
1923 return 0;
1924@@ -141,10 +153,10 @@ static int elf_load_image(void *dst, const void *src, uint64_t filesz, uint64_t
1925 void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart)
1926 {
1927 uint64_t sz;
1928- const elf_shdr *shdr;
1929- int i, type;
1930+ ELF_HANDLE_DECL(elf_shdr) shdr;
1931+ unsigned i, type;
1932
1933- if ( !elf->sym_tab )
1934+ if ( !ELF_HANDLE_VALID(elf->sym_tab) )
1935 return;
1936
1937 pstart = elf_round_up(elf, pstart);
1938@@ -161,7 +173,10 @@ void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart)
1939 for ( i = 0; i < elf_shdr_count(elf); i++ )
1940 {
1941 shdr = elf_shdr_by_index(elf, i);
1942- type = elf_uval(elf, (elf_shdr *)shdr, sh_type);
1943+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(shdr), 1) )
1944+ /* input has an insane section header count field */
1945+ break;
1946+ type = elf_uval(elf, shdr, sh_type);
1947 if ( (type == SHT_STRTAB) || (type == SHT_SYMTAB) )
1948 sz = elf_round_up(elf, sz + elf_uval(elf, shdr, sh_size));
1949 }
1950@@ -172,11 +187,13 @@ void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart)
1951
1952 static void elf_load_bsdsyms(struct elf_binary *elf)
1953 {
1954- elf_ehdr *sym_ehdr;
1955+ ELF_HANDLE_DECL(elf_ehdr) sym_ehdr;
1956 unsigned long sz;
1957- char *maxva, *symbase, *symtab_addr;
1958- elf_shdr *shdr;
1959- int i, type;
1960+ elf_ptrval maxva;
1961+ elf_ptrval symbase;
1962+ elf_ptrval symtab_addr;
1963+ ELF_HANDLE_DECL(elf_shdr) shdr;
1964+ unsigned i, type;
1965
1966 if ( !elf->bsd_symtab_pstart )
1967 return;
1968@@ -184,18 +201,18 @@ static void elf_load_bsdsyms(struct elf_binary *elf)
1969 #define elf_hdr_elm(_elf, _hdr, _elm, _val) \
1970 do { \
1971 if ( elf_64bit(_elf) ) \
1972- (_hdr)->e64._elm = _val; \
1973+ elf_store_field(_elf, _hdr, e64._elm, _val); \
1974 else \
1975- (_hdr)->e32._elm = _val; \
1976+ elf_store_field(_elf, _hdr, e32._elm, _val); \
1977 } while ( 0 )
1978
1979 symbase = elf_get_ptr(elf, elf->bsd_symtab_pstart);
1980 symtab_addr = maxva = symbase + sizeof(uint32_t);
1981
1982 /* Set up Elf header. */
1983- sym_ehdr = (elf_ehdr *)symtab_addr;
1984+ sym_ehdr = ELF_MAKE_HANDLE(elf_ehdr, symtab_addr);
1985 sz = elf_uval(elf, elf->ehdr, e_ehsize);
1986- memcpy(sym_ehdr, elf->ehdr, sz);
1987+ elf_memcpy_safe(elf, ELF_HANDLE_PTRVAL(sym_ehdr), ELF_HANDLE_PTRVAL(elf->ehdr), sz);
1988 maxva += sz; /* no round up */
1989
1990 elf_hdr_elm(elf, sym_ehdr, e_phoff, 0);
1991@@ -204,37 +221,50 @@ do { \
1992 elf_hdr_elm(elf, sym_ehdr, e_phnum, 0);
1993
1994 /* Copy Elf section headers. */
1995- shdr = (elf_shdr *)maxva;
1996+ shdr = ELF_MAKE_HANDLE(elf_shdr, maxva);
1997 sz = elf_shdr_count(elf) * elf_uval(elf, elf->ehdr, e_shentsize);
1998- memcpy(shdr, elf->image + elf_uval(elf, elf->ehdr, e_shoff), sz);
1999- maxva = (char *)(long)elf_round_up(elf, (long)maxva + sz);
2000+ elf_memcpy_safe(elf, ELF_HANDLE_PTRVAL(shdr),
2001+ ELF_IMAGE_BASE(elf) + elf_uval(elf, elf->ehdr, e_shoff),
2002+ sz);
2003+ maxva = elf_round_up(elf, (unsigned long)maxva + sz);
2004
2005 for ( i = 0; i < elf_shdr_count(elf); i++ )
2006 {
2007+ elf_ptrval old_shdr_p;
2008+ elf_ptrval new_shdr_p;
2009+
2010 type = elf_uval(elf, shdr, sh_type);
2011 if ( (type == SHT_STRTAB) || (type == SHT_SYMTAB) )
2012 {
2013- elf_msg(elf, "%s: shdr %i at 0x%p -> 0x%p\n", __func__, i,
2014+ elf_msg(elf, "%s: shdr %i at 0x%"ELF_PRPTRVAL" -> 0x%"ELF_PRPTRVAL"\n", __func__, i,
2015 elf_section_start(elf, shdr), maxva);
2016 sz = elf_uval(elf, shdr, sh_size);
2017- memcpy(maxva, elf_section_start(elf, shdr), sz);
2018+ elf_memcpy_safe(elf, maxva, elf_section_start(elf, shdr), sz);
2019 /* Mangled to be based on ELF header location. */
2020 elf_hdr_elm(elf, shdr, sh_offset, maxva - symtab_addr);
2021- maxva = (char *)(long)elf_round_up(elf, (long)maxva + sz);
2022+ maxva = elf_round_up(elf, (unsigned long)maxva + sz);
2023 }
2024- shdr = (elf_shdr *)((long)shdr +
2025- (long)elf_uval(elf, elf->ehdr, e_shentsize));
2026+ old_shdr_p = ELF_HANDLE_PTRVAL(shdr);
2027+ new_shdr_p = old_shdr_p + elf_uval(elf, elf->ehdr, e_shentsize);
2028+ if ( new_shdr_p <= old_shdr_p ) /* wrapped or stuck */
2029+ {
2030+ elf_mark_broken(elf, "bad section header length");
2031+ break;
2032+ }
2033+ if ( !elf_access_ok(elf, new_shdr_p, 1) ) /* outside image */
2034+ break;
2035+ shdr = ELF_MAKE_HANDLE(elf_shdr, new_shdr_p);
2036 }
2037
2038 /* Write down the actual sym size. */
2039- *(uint32_t *)symbase = maxva - symtab_addr;
2040+ elf_store_val(elf, uint32_t, symbase, maxva - symtab_addr);
2041
2042 #undef elf_ehdr_elm
2043 }
2044
2045 void elf_parse_binary(struct elf_binary *elf)
2046 {
2047- const elf_phdr *phdr;
2048+ ELF_HANDLE_DECL(elf_phdr) phdr;
2049 uint64_t low = -1;
2050 uint64_t high = 0;
2051 uint64_t i, count, paddr, memsz;
2052@@ -243,6 +273,9 @@ void elf_parse_binary(struct elf_binary *elf)
2053 for ( i = 0; i < count; i++ )
2054 {
2055 phdr = elf_phdr_by_index(elf, i);
2056+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(phdr), 1) )
2057+ /* input has an insane program header count field */
2058+ break;
2059 if ( !elf_phdr_is_loadable(elf, phdr) )
2060 continue;
2061 paddr = elf_uval(elf, phdr, p_paddr);
2062@@ -260,16 +293,25 @@ void elf_parse_binary(struct elf_binary *elf)
2063 __FUNCTION__, elf->pstart, elf->pend);
2064 }
2065
2066-int elf_load_binary(struct elf_binary *elf)
2067+elf_errorstatus elf_load_binary(struct elf_binary *elf)
2068 {
2069- const elf_phdr *phdr;
2070+ ELF_HANDLE_DECL(elf_phdr) phdr;
2071 uint64_t i, count, paddr, offset, filesz, memsz;
2072- char *dest;
2073+ elf_ptrval dest;
2074+ /*
2075+ * Let bizarre ELFs write the output image up to twice; this
2076+ * calculation is just to ensure our copying loop is no worse than
2077+ * O(domain_size).
2078+ */
2079+ uint64_t remain_allow_copy = (uint64_t)elf->dest_size * 2;
2080
2081 count = elf_uval(elf, elf->ehdr, e_phnum);
2082 for ( i = 0; i < count; i++ )
2083 {
2084 phdr = elf_phdr_by_index(elf, i);
2085+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(phdr), 1) )
2086+ /* input has an insane program header count field */
2087+ break;
2088 if ( !elf_phdr_is_loadable(elf, phdr) )
2089 continue;
2090 paddr = elf_uval(elf, phdr, p_paddr);
2091@@ -277,9 +319,23 @@ int elf_load_binary(struct elf_binary *elf)
2092 filesz = elf_uval(elf, phdr, p_filesz);
2093 memsz = elf_uval(elf, phdr, p_memsz);
2094 dest = elf_get_ptr(elf, paddr);
2095- elf_msg(elf, "%s: phdr %" PRIu64 " at 0x%p -> 0x%p\n",
2096- __func__, i, dest, dest + filesz);
2097- if ( elf_load_image(dest, elf->image + offset, filesz, memsz) != 0 )
2098+
2099+ /*
2100+ * We need to check that the input image doesn't have us copy
2101+ * the whole image zillions of times, as that could lead to
2102+ * O(n^2) time behaviour and possible DoS by a malicous ELF.
2103+ */
2104+ if ( remain_allow_copy < memsz )
2105+ {
2106+ elf_mark_broken(elf, "program segments total to more"
2107+ " than the input image size");
2108+ break;
2109+ }
2110+ remain_allow_copy -= memsz;
2111+
2112+ elf_msg(elf, "%s: phdr %" PRIu64 " at 0x%"ELF_PRPTRVAL" -> 0x%"ELF_PRPTRVAL"\n",
2113+ __func__, i, dest, (elf_ptrval)(dest + filesz));
2114+ if ( elf_load_image(elf, dest, ELF_IMAGE_BASE(elf) + offset, filesz, memsz) != 0 )
2115 return -1;
2116 }
2117
2118@@ -287,18 +343,18 @@ int elf_load_binary(struct elf_binary *elf)
2119 return 0;
2120 }
2121
2122-void *elf_get_ptr(struct elf_binary *elf, unsigned long addr)
2123+elf_ptrval elf_get_ptr(struct elf_binary *elf, unsigned long addr)
2124 {
2125- return elf->dest + addr - elf->pstart;
2126+ return ELF_REALPTR2PTRVAL(elf->dest_base) + addr - elf->pstart;
2127 }
2128
2129 uint64_t elf_lookup_addr(struct elf_binary * elf, const char *symbol)
2130 {
2131- const elf_sym *sym;
2132+ ELF_HANDLE_DECL(elf_sym) sym;
2133 uint64_t value;
2134
2135 sym = elf_sym_by_name(elf, symbol);
2136- if ( sym == NULL )
2137+ if ( !ELF_HANDLE_VALID(sym) )
2138 {
2139 elf_err(elf, "%s: not found: %s\n", __FUNCTION__, symbol);
2140 return -1;
2141diff --git a/xen/common/libelf/libelf-private.h b/xen/common/libelf/libelf-private.h
2142index 3ef753c..277be04 100644
2143--- a/xen/common/libelf/libelf-private.h
2144+++ b/xen/common/libelf/libelf-private.h
2145@@ -77,7 +77,7 @@
2146 #define elf_err(elf, fmt, args ... ) \
2147 elf_call_log_callback(elf, 1, fmt , ## args );
2148
2149-void elf_call_log_callback(struct elf_binary*, int iserr, const char *fmt,...);
2150+void elf_call_log_callback(struct elf_binary*, bool iserr, const char *fmt,...);
2151
2152 #define safe_strcpy(d,s) \
2153 do { strncpy((d),(s),sizeof((d))-1); \
2154@@ -86,6 +86,19 @@ do { strncpy((d),(s),sizeof((d))-1); \
2155
2156 #endif
2157
2158+#undef memcpy
2159+#undef memset
2160+#undef memmove
2161+#undef strcpy
2162+
2163+#define memcpy MISTAKE_unspecified_memcpy
2164+#define memset MISTAKE_unspecified_memset
2165+#define memmove MISTAKE_unspecified_memmove
2166+#define strcpy MISTAKE_unspecified_strcpy
2167+ /* This prevents libelf from using these undecorated versions
2168+ * of memcpy, memset, memmove and strcpy. Every call site
2169+ * must either use elf_mem*_unchecked, or elf_mem*_safe. */
2170+
2171 #endif /* __LIBELF_PRIVATE_H_ */
2172
2173 /*
2174diff --git a/xen/common/libelf/libelf-relocate.c b/xen/common/libelf/libelf-relocate.c
2175deleted file mode 100644
2176index 7ef4b01..0000000
2177--- a/xen/common/libelf/libelf-relocate.c
2178+++ /dev/null
2179@@ -1,372 +0,0 @@
2180-/*
2181- * ELF relocation code (not used by xen kernel right now).
2182- *
2183- * This library is free software; you can redistribute it and/or
2184- * modify it under the terms of the GNU Lesser General Public
2185- * License as published by the Free Software Foundation;
2186- * version 2.1 of the License.
2187- *
2188- * This library is distributed in the hope that it will be useful,
2189- * but WITHOUT ANY WARRANTY; without even the implied warranty of
2190- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
2191- * Lesser General Public License for more details.
2192- *
2193- * You should have received a copy of the GNU Lesser General Public
2194- * License along with this library; if not, write to the Free Software
2195- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2196- */
2197-
2198-#include "libelf-private.h"
2199-
2200-/* ------------------------------------------------------------------------ */
2201-
2202-static const char *rel_names_i386[] = {
2203- "R_386_NONE",
2204- "R_386_32",
2205- "R_386_PC32",
2206- "R_386_GOT32",
2207- "R_386_PLT32",
2208- "R_386_COPY",
2209- "R_386_GLOB_DAT",
2210- "R_386_JMP_SLOT",
2211- "R_386_RELATIVE",
2212- "R_386_GOTOFF",
2213- "R_386_GOTPC",
2214- "R_386_32PLT",
2215- "R_386_TLS_TPOFF",
2216- "R_386_TLS_IE",
2217- "R_386_TLS_GOTIE",
2218- "R_386_TLS_LE",
2219- "R_386_TLS_GD",
2220- "R_386_TLS_LDM",
2221- "R_386_16",
2222- "R_386_PC16",
2223- "R_386_8",
2224- "R_386_PC8",
2225- "R_386_TLS_GD_32",
2226- "R_386_TLS_GD_PUSH",
2227- "R_386_TLS_GD_CALL",
2228- "R_386_TLS_GD_POP",
2229- "R_386_TLS_LDM_32",
2230- "R_386_TLS_LDM_PUSH",
2231- "R_386_TLS_LDM_CALL",
2232- "R_386_TLS_LDM_POP",
2233- "R_386_TLS_LDO_32",
2234- "R_386_TLS_IE_32",
2235- "R_386_TLS_LE_32",
2236- "R_386_TLS_DTPMOD32",
2237- "R_386_TLS_DTPOFF32",
2238- "R_386_TLS_TPOFF32",
2239-};
2240-
2241-static int elf_reloc_i386(struct elf_binary *elf, int type,
2242- uint64_t addr, uint64_t value)
2243-{
2244- void *ptr = elf_get_ptr(elf, addr);
2245- uint32_t *u32;
2246-
2247- switch ( type )
2248- {
2249- case 1 /* R_386_32 */ :
2250- u32 = ptr;
2251- *u32 += elf->reloc_offset;
2252- break;
2253- case 2 /* R_386_PC32 */ :
2254- /* nothing */
2255- break;
2256- default:
2257- return -1;
2258- }
2259- return 0;
2260-}
2261-
2262-/* ------------------------------------------------------------------------ */
2263-
2264-static const char *rel_names_x86_64[] = {
2265- "R_X86_64_NONE",
2266- "R_X86_64_64",
2267- "R_X86_64_PC32",
2268- "R_X86_64_GOT32",
2269- "R_X86_64_PLT32",
2270- "R_X86_64_COPY",
2271- "R_X86_64_GLOB_DAT",
2272- "R_X86_64_JUMP_SLOT",
2273- "R_X86_64_RELATIVE",
2274- "R_X86_64_GOTPCREL",
2275- "R_X86_64_32",
2276- "R_X86_64_32S",
2277- "R_X86_64_16",
2278- "R_X86_64_PC16",
2279- "R_X86_64_8",
2280- "R_X86_64_PC8",
2281- "R_X86_64_DTPMOD64",
2282- "R_X86_64_DTPOFF64",
2283- "R_X86_64_TPOFF64",
2284- "R_X86_64_TLSGD",
2285- "R_X86_64_TLSLD",
2286- "R_X86_64_DTPOFF32",
2287- "R_X86_64_GOTTPOFF",
2288- "R_X86_64_TPOFF32",
2289-};
2290-
2291-static int elf_reloc_x86_64(struct elf_binary *elf, int type,
2292- uint64_t addr, uint64_t value)
2293-{
2294- void *ptr = elf_get_ptr(elf, addr);
2295- uint64_t *u64;
2296- uint32_t *u32;
2297- int32_t *s32;
2298-
2299- switch ( type )
2300- {
2301- case 1 /* R_X86_64_64 */ :
2302- u64 = ptr;
2303- value += elf->reloc_offset;
2304- *u64 = value;
2305- break;
2306- case 2 /* R_X86_64_PC32 */ :
2307- u32 = ptr;
2308- *u32 = value - addr;
2309- if ( *u32 != (uint32_t)(value - addr) )
2310- {
2311- elf_err(elf, "R_X86_64_PC32 overflow: 0x%" PRIx32
2312- " != 0x%" PRIx32 "\n",
2313- *u32, (uint32_t) (value - addr));
2314- return -1;
2315- }
2316- break;
2317- case 10 /* R_X86_64_32 */ :
2318- u32 = ptr;
2319- value += elf->reloc_offset;
2320- *u32 = value;
2321- if ( *u32 != value )
2322- {
2323- elf_err(elf, "R_X86_64_32 overflow: 0x%" PRIx32
2324- " != 0x%" PRIx64 "\n",
2325- *u32, value);
2326- return -1;
2327- }
2328- break;
2329- case 11 /* R_X86_64_32S */ :
2330- s32 = ptr;
2331- value += elf->reloc_offset;
2332- *s32 = value;
2333- if ( *s32 != (int64_t) value )
2334- {
2335- elf_err(elf, "R_X86_64_32S overflow: 0x%" PRIx32
2336- " != 0x%" PRIx64 "\n",
2337- *s32, (int64_t) value);
2338- return -1;
2339- }
2340- break;
2341- default:
2342- return -1;
2343- }
2344- return 0;
2345-}
2346-
2347-/* ------------------------------------------------------------------------ */
2348-
2349-static struct relocs {
2350- const char **names;
2351- int count;
2352- int (*func) (struct elf_binary * elf, int type, uint64_t addr,
2353- uint64_t value);
2354-} relocs[] =
2355-/* *INDENT-OFF* */
2356-{
2357- [EM_386] = {
2358- .names = rel_names_i386,
2359- .count = sizeof(rel_names_i386) / sizeof(rel_names_i386[0]),
2360- .func = elf_reloc_i386,
2361- },
2362- [EM_X86_64] = {
2363- .names = rel_names_x86_64,
2364- .count = sizeof(rel_names_x86_64) / sizeof(rel_names_x86_64[0]),
2365- .func = elf_reloc_x86_64,
2366- }
2367-};
2368-/* *INDENT-ON* */
2369-
2370-/* ------------------------------------------------------------------------ */
2371-
2372-static const char *rela_name(int machine, int type)
2373-{
2374- if ( machine > sizeof(relocs) / sizeof(relocs[0]) )
2375- return "unknown mach";
2376- if ( !relocs[machine].names )
2377- return "unknown mach";
2378- if ( type > relocs[machine].count )
2379- return "unknown rela";
2380- return relocs[machine].names[type];
2381-}
2382-
2383-static int elf_reloc_section(struct elf_binary *elf,
2384- const elf_shdr * rels,
2385- const elf_shdr * sect, const elf_shdr * syms)
2386-{
2387- const void *ptr, *end;
2388- const elf_shdr *shdr;
2389- const elf_rela *rela;
2390- const elf_rel *rel;
2391- const elf_sym *sym;
2392- uint64_t s_type;
2393- uint64_t r_offset;
2394- uint64_t r_info;
2395- uint64_t r_addend;
2396- int r_type, r_sym;
2397- size_t rsize;
2398- uint64_t shndx, sbase, addr, value;
2399- const char *sname;
2400- int machine;
2401-
2402- machine = elf_uval(elf, elf->ehdr, e_machine);
2403- if ( (machine >= (sizeof(relocs) / sizeof(relocs[0]))) ||
2404- (relocs[machine].func == NULL) )
2405- {
2406- elf_err(elf, "%s: can't handle machine %d\n",
2407- __FUNCTION__, machine);
2408- return -1;
2409- }
2410- if ( elf_swap(elf) )
2411- {
2412- elf_err(elf, "%s: non-native byte order, relocation not supported\n",
2413- __FUNCTION__);
2414- return -1;
2415- }
2416-
2417- s_type = elf_uval(elf, rels, sh_type);
2418- rsize = (SHT_REL == s_type) ? elf_size(elf, rel) : elf_size(elf, rela);
2419- ptr = elf_section_start(elf, rels);
2420- end = elf_section_end(elf, rels);
2421-
2422- for ( ; ptr < end; ptr += rsize )
2423- {
2424- switch ( s_type )
2425- {
2426- case SHT_REL:
2427- rel = ptr;
2428- r_offset = elf_uval(elf, rel, r_offset);
2429- r_info = elf_uval(elf, rel, r_info);
2430- r_addend = 0;
2431- break;
2432- case SHT_RELA:
2433- rela = ptr;
2434- r_offset = elf_uval(elf, rela, r_offset);
2435- r_info = elf_uval(elf, rela, r_info);
2436- r_addend = elf_uval(elf, rela, r_addend);
2437- break;
2438- default:
2439- /* can't happen */
2440- return -1;
2441- }
2442- if ( elf_64bit(elf) )
2443- {
2444- r_type = ELF64_R_TYPE(r_info);
2445- r_sym = ELF64_R_SYM(r_info);
2446- }
2447- else
2448- {
2449- r_type = ELF32_R_TYPE(r_info);
2450- r_sym = ELF32_R_SYM(r_info);
2451- }
2452-
2453- sym = elf_sym_by_index(elf, r_sym);
2454- shndx = elf_uval(elf, sym, st_shndx);
2455- switch ( shndx )
2456- {
2457- case SHN_UNDEF:
2458- sname = "*UNDEF*";
2459- sbase = 0;
2460- break;
2461- case SHN_COMMON:
2462- elf_err(elf, "%s: invalid section: %" PRId64 "\n",
2463- __FUNCTION__, shndx);
2464- return -1;
2465- case SHN_ABS:
2466- sname = "*ABS*";
2467- sbase = 0;
2468- break;
2469- default:
2470- shdr = elf_shdr_by_index(elf, shndx);
2471- if ( shdr == NULL )
2472- {
2473- elf_err(elf, "%s: invalid section: %" PRId64 "\n",
2474- __FUNCTION__, shndx);
2475- return -1;
2476- }
2477- sname = elf_section_name(elf, shdr);
2478- sbase = elf_uval(elf, shdr, sh_addr);
2479- }
2480-
2481- addr = r_offset;
2482- value = elf_uval(elf, sym, st_value);
2483- value += r_addend;
2484-
2485- if ( elf->log_callback && (elf->verbose > 1) )
2486- {
2487- uint64_t st_name = elf_uval(elf, sym, st_name);
2488- const char *name = st_name ? elf->sym_strtab + st_name : "*NONE*";
2489-
2490- elf_msg(elf,
2491- "%s: type %s [%d], off 0x%" PRIx64 ", add 0x%" PRIx64 ","
2492- " sym %s [0x%" PRIx64 "], sec %s [0x%" PRIx64 "]"
2493- " -> addr 0x%" PRIx64 " value 0x%" PRIx64 "\n",
2494- __FUNCTION__, rela_name(machine, r_type), r_type, r_offset,
2495- r_addend, name, elf_uval(elf, sym, st_value), sname, sbase,
2496- addr, value);
2497- }
2498-
2499- if ( relocs[machine].func(elf, r_type, addr, value) == -1 )
2500- {
2501- elf_err(elf, "%s: unknown/unsupported reloc type %s [%d]\n",
2502- __FUNCTION__, rela_name(machine, r_type), r_type);
2503- return -1;
2504- }
2505- }
2506- return 0;
2507-}
2508-
2509-int elf_reloc(struct elf_binary *elf)
2510-{
2511- const elf_shdr *rels, *sect, *syms;
2512- uint64_t i, count, type;
2513-
2514- count = elf_shdr_count(elf);
2515- for ( i = 0; i < count; i++ )
2516- {
2517- rels = elf_shdr_by_index(elf, i);
2518- type = elf_uval(elf, rels, sh_type);
2519- if ( (type != SHT_REL) && (type != SHT_RELA) )
2520- continue;
2521-
2522- sect = elf_shdr_by_index(elf, elf_uval(elf, rels, sh_info));
2523- syms = elf_shdr_by_index(elf, elf_uval(elf, rels, sh_link));
2524- if ( NULL == sect || NULL == syms )
2525- continue;
2526-
2527- if ( !(elf_uval(elf, sect, sh_flags) & SHF_ALLOC) )
2528- {
2529- elf_msg(elf, "%s: relocations for %s, skipping\n",
2530- __FUNCTION__, elf_section_name(elf, sect));
2531- continue;
2532- }
2533-
2534- elf_msg(elf, "%s: relocations for %s @ 0x%" PRIx64 "\n",
2535- __FUNCTION__, elf_section_name(elf, sect),
2536- elf_uval(elf, sect, sh_addr));
2537- if ( elf_reloc_section(elf, rels, sect, syms) != 0 )
2538- return -1;
2539- }
2540- return 0;
2541-}
2542-
2543-/*
2544- * Local variables:
2545- * mode: C
2546- * c-set-style: "BSD"
2547- * c-basic-offset: 4
2548- * tab-width: 4
2549- * indent-tabs-mode: nil
2550- * End:
2551- */
2552diff --git a/xen/common/libelf/libelf-tools.c b/xen/common/libelf/libelf-tools.c
2553index cb97908..e202249 100644
2554--- a/xen/common/libelf/libelf-tools.c
2555+++ b/xen/common/libelf/libelf-tools.c
2556@@ -20,201 +20,292 @@
2557
2558 /* ------------------------------------------------------------------------ */
2559
2560-uint64_t elf_access_unsigned(struct elf_binary * elf, const void *ptr,
2561- uint64_t offset, size_t size)
2562+void elf_mark_broken(struct elf_binary *elf, const char *msg)
2563 {
2564- int need_swap = elf_swap(elf);
2565+ if ( elf->broken == NULL )
2566+ elf->broken = msg;
2567+}
2568+
2569+const char *elf_check_broken(const struct elf_binary *elf)
2570+{
2571+ return elf->broken;
2572+}
2573+
2574+static bool elf_ptrval_in_range(elf_ptrval ptrval, uint64_t size,
2575+ const void *region, uint64_t regionsize)
2576+ /*
2577+ * Returns true if the putative memory area [ptrval,ptrval+size>
2578+ * is completely inside the region [region,region+regionsize>.
2579+ *
2580+ * ptrval and size are the untrusted inputs to be checked.
2581+ * region and regionsize are trusted and must be correct and valid,
2582+ * although it is OK for region to perhaps be maliciously NULL
2583+ * (but not some other malicious value).
2584+ */
2585+{
2586+ elf_ptrval regionp = (elf_ptrval)region;
2587+
2588+ if ( (region == NULL) ||
2589+ (ptrval < regionp) || /* start is before region */
2590+ (ptrval > regionp + regionsize) || /* start is after region */
2591+ (size > regionsize - (ptrval - regionp)) ) /* too big */
2592+ return 0;
2593+ return 1;
2594+}
2595+
2596+bool elf_access_ok(struct elf_binary * elf,
2597+ uint64_t ptrval, size_t size)
2598+{
2599+ if ( elf_ptrval_in_range(ptrval, size, elf->image_base, elf->size) )
2600+ return 1;
2601+ if ( elf_ptrval_in_range(ptrval, size, elf->dest_base, elf->dest_size) )
2602+ return 1;
2603+ if ( elf_ptrval_in_range(ptrval, size,
2604+ elf->caller_xdest_base, elf->caller_xdest_size) )
2605+ return 1;
2606+ elf_mark_broken(elf, "out of range access");
2607+ return 0;
2608+}
2609+
2610+void elf_memcpy_safe(struct elf_binary *elf, elf_ptrval dst,
2611+ elf_ptrval src, size_t size)
2612+{
2613+ if ( elf_access_ok(elf, dst, size) &&
2614+ elf_access_ok(elf, src, size) )
2615+ {
2616+ /* use memmove because these checks do not prove that the
2617+ * regions don't overlap and overlapping regions grant
2618+ * permission for compiler malice */
2619+ elf_memmove_unchecked(ELF_UNSAFE_PTR(dst), ELF_UNSAFE_PTR(src), size);
2620+ }
2621+}
2622+
2623+void elf_memset_safe(struct elf_binary *elf, elf_ptrval dst, int c, size_t size)
2624+{
2625+ if ( elf_access_ok(elf, dst, size) )
2626+ {
2627+ elf_memset_unchecked(ELF_UNSAFE_PTR(dst), c, size);
2628+ }
2629+}
2630+
2631+uint64_t elf_access_unsigned(struct elf_binary * elf, elf_ptrval base,
2632+ uint64_t moreoffset, size_t size)
2633+{
2634+ elf_ptrval ptrval = base + moreoffset;
2635+ bool need_swap = elf_swap(elf);
2636 const uint8_t *u8;
2637 const uint16_t *u16;
2638 const uint32_t *u32;
2639 const uint64_t *u64;
2640
2641+ if ( !elf_access_ok(elf, ptrval, size) )
2642+ return 0;
2643+
2644 switch ( size )
2645 {
2646 case 1:
2647- u8 = ptr + offset;
2648+ u8 = (const void*)ptrval;
2649 return *u8;
2650 case 2:
2651- u16 = ptr + offset;
2652+ u16 = (const void*)ptrval;
2653 return need_swap ? bswap_16(*u16) : *u16;
2654 case 4:
2655- u32 = ptr + offset;
2656+ u32 = (const void*)ptrval;
2657 return need_swap ? bswap_32(*u32) : *u32;
2658 case 8:
2659- u64 = ptr + offset;
2660+ u64 = (const void*)ptrval;
2661 return need_swap ? bswap_64(*u64) : *u64;
2662 default:
2663 return 0;
2664 }
2665 }
2666
2667-int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
2668- uint64_t offset, size_t size)
2669-{
2670- int need_swap = elf_swap(elf);
2671- const int8_t *s8;
2672- const int16_t *s16;
2673- const int32_t *s32;
2674- const int64_t *s64;
2675-
2676- switch ( size )
2677- {
2678- case 1:
2679- s8 = ptr + offset;
2680- return *s8;
2681- case 2:
2682- s16 = ptr + offset;
2683- return need_swap ? bswap_16(*s16) : *s16;
2684- case 4:
2685- s32 = ptr + offset;
2686- return need_swap ? bswap_32(*s32) : *s32;
2687- case 8:
2688- s64 = ptr + offset;
2689- return need_swap ? bswap_64(*s64) : *s64;
2690- default:
2691- return 0;
2692- }
2693-}
2694-
2695 uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr)
2696 {
2697- int elf_round = (elf_64bit(elf) ? 8 : 4) - 1;
2698+ uint64_t elf_round = (elf_64bit(elf) ? 8 : 4) - 1;
2699
2700 return (addr + elf_round) & ~elf_round;
2701 }
2702
2703 /* ------------------------------------------------------------------------ */
2704
2705-int elf_shdr_count(struct elf_binary *elf)
2706+unsigned elf_shdr_count(struct elf_binary *elf)
2707 {
2708- return elf_uval(elf, elf->ehdr, e_shnum);
2709+ unsigned count = elf_uval(elf, elf->ehdr, e_shnum);
2710+ uint64_t max = elf->size / sizeof(Elf32_Shdr);
2711+ if (max > ~(unsigned)0)
2712+ max = ~(unsigned)0; /* Xen doesn't have limits.h :-/ */
2713+ if (count > max)
2714+ {
2715+ elf_mark_broken(elf, "far too many section headers");
2716+ count = max;
2717+ }
2718+ return count;
2719 }
2720
2721-int elf_phdr_count(struct elf_binary *elf)
2722+unsigned elf_phdr_count(struct elf_binary *elf)
2723 {
2724 return elf_uval(elf, elf->ehdr, e_phnum);
2725 }
2726
2727-const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name)
2728+ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_name(struct elf_binary *elf, const char *name)
2729 {
2730 uint64_t count = elf_shdr_count(elf);
2731- const elf_shdr *shdr;
2732+ ELF_HANDLE_DECL(elf_shdr) shdr;
2733 const char *sname;
2734- int i;
2735+ unsigned i;
2736
2737 for ( i = 0; i < count; i++ )
2738 {
2739 shdr = elf_shdr_by_index(elf, i);
2740+ if ( !elf_access_ok(elf, ELF_HANDLE_PTRVAL(shdr), 1) )
2741+ /* input has an insane section header count field */
2742+ break;
2743 sname = elf_section_name(elf, shdr);
2744 if ( sname && !strcmp(sname, name) )
2745 return shdr;
2746 }
2747- return NULL;
2748+ return ELF_INVALID_HANDLE(elf_shdr);
2749 }
2750
2751-const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index)
2752+ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_index(struct elf_binary *elf, unsigned index)
2753 {
2754 uint64_t count = elf_shdr_count(elf);
2755- const void *ptr;
2756+ elf_ptrval ptr;
2757
2758 if ( index >= count )
2759- return NULL;
2760+ return ELF_INVALID_HANDLE(elf_shdr);
2761
2762- ptr = (elf->image
2763+ ptr = (ELF_IMAGE_BASE(elf)
2764 + elf_uval(elf, elf->ehdr, e_shoff)
2765 + elf_uval(elf, elf->ehdr, e_shentsize) * index);
2766- return ptr;
2767+ return ELF_MAKE_HANDLE(elf_shdr, ptr);
2768 }
2769
2770-const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index)
2771+ELF_HANDLE_DECL(elf_phdr) elf_phdr_by_index(struct elf_binary *elf, unsigned index)
2772 {
2773 uint64_t count = elf_uval(elf, elf->ehdr, e_phnum);
2774- const void *ptr;
2775+ elf_ptrval ptr;
2776
2777 if ( index >= count )
2778- return NULL;
2779+ return ELF_INVALID_HANDLE(elf_phdr);
2780
2781- ptr = (elf->image
2782+ ptr = (ELF_IMAGE_BASE(elf)
2783 + elf_uval(elf, elf->ehdr, e_phoff)
2784 + elf_uval(elf, elf->ehdr, e_phentsize) * index);
2785- return ptr;
2786+ return ELF_MAKE_HANDLE(elf_phdr, ptr);
2787 }
2788
2789-const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr)
2790+
2791+const char *elf_section_name(struct elf_binary *elf,
2792+ ELF_HANDLE_DECL(elf_shdr) shdr)
2793 {
2794- if ( elf->sec_strtab == NULL )
2795+ if ( ELF_PTRVAL_INVALID(elf->sec_strtab) )
2796 return "unknown";
2797- return elf->sec_strtab + elf_uval(elf, shdr, sh_name);
2798+
2799+ return elf_strval(elf, elf->sec_strtab + elf_uval(elf, shdr, sh_name));
2800 }
2801
2802-const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr)
2803+const char *elf_strval(struct elf_binary *elf, elf_ptrval start)
2804 {
2805- return elf->image + elf_uval(elf, shdr, sh_offset);
2806+ uint64_t length;
2807+
2808+ for ( length = 0; ; length++ ) {
2809+ if ( !elf_access_ok(elf, start + length, 1) )
2810+ return NULL;
2811+ if ( !elf_access_unsigned(elf, start, length, 1) )
2812+ /* ok */
2813+ return ELF_UNSAFE_PTR(start);
2814+ if ( length >= ELF_MAX_STRING_LENGTH )
2815+ {
2816+ elf_mark_broken(elf, "excessively long string");
2817+ return NULL;
2818+ }
2819+ }
2820 }
2821
2822-const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr)
2823+const char *elf_strfmt(struct elf_binary *elf, elf_ptrval start)
2824 {
2825- return elf->image
2826+ const char *str = elf_strval(elf, start);
2827+
2828+ if ( str == NULL )
2829+ return "(invalid)";
2830+ return str;
2831+}
2832+
2833+elf_ptrval elf_section_start(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr)
2834+{
2835+ return ELF_IMAGE_BASE(elf) + elf_uval(elf, shdr, sh_offset);
2836+}
2837+
2838+elf_ptrval elf_section_end(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr)
2839+{
2840+ return ELF_IMAGE_BASE(elf)
2841 + elf_uval(elf, shdr, sh_offset) + elf_uval(elf, shdr, sh_size);
2842 }
2843
2844-const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr)
2845+elf_ptrval elf_segment_start(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr)
2846 {
2847- return elf->image + elf_uval(elf, phdr, p_offset);
2848+ return ELF_IMAGE_BASE(elf)
2849+ + elf_uval(elf, phdr, p_offset);
2850 }
2851
2852-const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr)
2853+elf_ptrval elf_segment_end(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr)
2854 {
2855- return elf->image
2856+ return ELF_IMAGE_BASE(elf)
2857 + elf_uval(elf, phdr, p_offset) + elf_uval(elf, phdr, p_filesz);
2858 }
2859
2860-const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol)
2861+ELF_HANDLE_DECL(elf_sym) elf_sym_by_name(struct elf_binary *elf, const char *symbol)
2862 {
2863- const void *ptr = elf_section_start(elf, elf->sym_tab);
2864- const void *end = elf_section_end(elf, elf->sym_tab);
2865- const elf_sym *sym;
2866+ elf_ptrval ptr = elf_section_start(elf, elf->sym_tab);
2867+ elf_ptrval end = elf_section_end(elf, elf->sym_tab);
2868+ ELF_HANDLE_DECL(elf_sym) sym;
2869 uint64_t info, name;
2870+ const char *sym_name;
2871
2872 for ( ; ptr < end; ptr += elf_size(elf, sym) )
2873 {
2874- sym = ptr;
2875+ sym = ELF_MAKE_HANDLE(elf_sym, ptr);
2876 info = elf_uval(elf, sym, st_info);
2877 name = elf_uval(elf, sym, st_name);
2878 if ( ELF32_ST_BIND(info) != STB_GLOBAL )
2879 continue;
2880- if ( strcmp(elf->sym_strtab + name, symbol) )
2881+ sym_name = elf_strval(elf, elf->sym_strtab + name);
2882+ if ( sym_name == NULL ) /* out of range, oops */
2883+ return ELF_INVALID_HANDLE(elf_sym);
2884+ if ( strcmp(sym_name, symbol) )
2885 continue;
2886 return sym;
2887 }
2888- return NULL;
2889+ return ELF_INVALID_HANDLE(elf_sym);
2890 }
2891
2892-const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index)
2893+ELF_HANDLE_DECL(elf_sym) elf_sym_by_index(struct elf_binary *elf, unsigned index)
2894 {
2895- const void *ptr = elf_section_start(elf, elf->sym_tab);
2896- const elf_sym *sym;
2897+ elf_ptrval ptr = elf_section_start(elf, elf->sym_tab);
2898+ ELF_HANDLE_DECL(elf_sym) sym;
2899
2900- sym = ptr + index * elf_size(elf, sym);
2901+ sym = ELF_MAKE_HANDLE(elf_sym, ptr + index * elf_size(elf, sym));
2902 return sym;
2903 }
2904
2905-const char *elf_note_name(struct elf_binary *elf, const elf_note * note)
2906+const char *elf_note_name(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note)
2907 {
2908- return (void *)note + elf_size(elf, note);
2909+ return elf_strval(elf, ELF_HANDLE_PTRVAL(note) + elf_size(elf, note));
2910 }
2911
2912-const void *elf_note_desc(struct elf_binary *elf, const elf_note * note)
2913+elf_ptrval elf_note_desc(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note)
2914 {
2915- int namesz = (elf_uval(elf, note, namesz) + 3) & ~3;
2916+ unsigned namesz = (elf_uval(elf, note, namesz) + 3) & ~3;
2917
2918- return (void *)note + elf_size(elf, note) + namesz;
2919+ return ELF_HANDLE_PTRVAL(note) + elf_size(elf, note) + namesz;
2920 }
2921
2922-uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note)
2923+uint64_t elf_note_numeric(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note)
2924 {
2925- const void *desc = elf_note_desc(elf, note);
2926- int descsz = elf_uval(elf, note, descsz);
2927+ elf_ptrval desc = elf_note_desc(elf, note);
2928+ unsigned descsz = elf_uval(elf, note, descsz);
2929
2930 switch (descsz)
2931 {
2932@@ -228,11 +319,11 @@ uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note)
2933 }
2934 }
2935
2936-uint64_t elf_note_numeric_array(struct elf_binary *elf, const elf_note *note,
2937+uint64_t elf_note_numeric_array(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note,
2938 unsigned int unitsz, unsigned int idx)
2939 {
2940- const void *desc = elf_note_desc(elf, note);
2941- int descsz = elf_uval(elf, note, descsz);
2942+ elf_ptrval desc = elf_note_desc(elf, note);
2943+ unsigned descsz = elf_uval(elf, note, descsz);
2944
2945 if ( descsz % unitsz || idx >= descsz / unitsz )
2946 return 0;
2947@@ -248,24 +339,34 @@ uint64_t elf_note_numeric_array(struct elf_binary *elf, const elf_note *note,
2948 }
2949 }
2950
2951-const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note)
2952+ELF_HANDLE_DECL(elf_note) elf_note_next(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note)
2953 {
2954- int namesz = (elf_uval(elf, note, namesz) + 3) & ~3;
2955- int descsz = (elf_uval(elf, note, descsz) + 3) & ~3;
2956+ unsigned namesz = (elf_uval(elf, note, namesz) + 3) & ~3;
2957+ unsigned descsz = (elf_uval(elf, note, descsz) + 3) & ~3;
2958+
2959+ elf_ptrval ptrval = ELF_HANDLE_PTRVAL(note)
2960+ + elf_size(elf, note) + namesz + descsz;
2961
2962- return (void *)note + elf_size(elf, note) + namesz + descsz;
2963+ if ( ( ptrval <= ELF_HANDLE_PTRVAL(note) || /* wrapped or stuck */
2964+ !elf_access_ok(elf, ELF_HANDLE_PTRVAL(note), 1) ) )
2965+ ptrval = ELF_MAX_PTRVAL; /* terminate caller's loop */
2966+
2967+ return ELF_MAKE_HANDLE(elf_note, ptrval);
2968 }
2969
2970 /* ------------------------------------------------------------------------ */
2971
2972-int elf_is_elfbinary(const void *image)
2973+bool elf_is_elfbinary(const void *image_start, size_t image_size)
2974 {
2975- const Elf32_Ehdr *ehdr = image;
2976+ const Elf32_Ehdr *ehdr = image_start;
2977+
2978+ if ( image_size < sizeof(*ehdr) )
2979+ return 0;
2980
2981 return IS_ELF(*ehdr);
2982 }
2983
2984-int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr)
2985+bool elf_phdr_is_loadable(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr)
2986 {
2987 uint64_t p_type = elf_uval(elf, phdr, p_type);
2988 uint64_t p_flags = elf_uval(elf, phdr, p_flags);
2989diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h
2990index e8f6508..174f8da 100644
2991--- a/xen/include/xen/libelf.h
2992+++ b/xen/include/xen/libelf.h
2993@@ -29,6 +29,11 @@
2994 #error define architectural endianness
2995 #endif
2996
2997+#include <stdbool.h>
2998+
2999+typedef int elf_errorstatus; /* 0: ok; -ve (normally -1): error */
3000+typedef int elf_negerrnoval; /* 0: ok; -EFOO: error */
3001+
3002 #undef ELFSIZE
3003 #include "elfstructs.h"
3004 #ifdef __XEN__
3005@@ -42,12 +47,98 @@
3006
3007 struct elf_binary;
3008 typedef void elf_log_callback(struct elf_binary*, void *caller_data,
3009- int iserr, const char *fmt, va_list al);
3010+ bool iserr, const char *fmt, va_list al);
3011+
3012+#endif
3013+
3014+#define ELF_MAX_STRING_LENGTH 4096
3015+#define ELF_MAX_TOTAL_NOTE_COUNT 65536
3016+
3017+/* ------------------------------------------------------------------------ */
3018+
3019+/* Macros for accessing the input image and output area. */
3020+
3021+/*
3022+ * We abstract away the pointerness of these pointers, replacing
3023+ * various void*, char* and struct* with the following:
3024+ * elf_ptrval A pointer to a byte; one can do pointer arithmetic
3025+ * on this.
3026+ * HANDLE A pointer to a struct. There is one of these types
3027+ * for each pointer type - that is, for each "structname".
3028+ * In the arguments to the various HANDLE macros, structname
3029+ * must be a single identifier which is a typedef.
3030+ * It is not permitted to do arithmetic on these
3031+ * pointers. In the current code attempts to do so will
3032+ * compile, but in the next patch this will become a
3033+ * compile error.
3034+ */
3035+
3036+typedef uintptr_t elf_ptrval;
3037+
3038+#define ELF_REALPTR2PTRVAL(realpointer) ((elf_ptrval)(realpointer))
3039+ /* Converts an actual C pointer into a PTRVAL */
3040+
3041+#define ELF_HANDLE_DECL(structname) structname##_handle
3042+ /* Provides a type declaration for a HANDLE. */
3043
3044+#ifdef __XEN__
3045+# define ELF_PRPTRVAL "lu"
3046+ /*
3047+ * PRIuPTR is misdefined in xen/include/xen/inttypes.h, on 32-bit,
3048+ * to "u", when in fact uintptr_t is an unsigned long.
3049+ */
3050+#else
3051+# define ELF_PRPTRVAL PRIuPTR
3052 #endif
3053+ /* printf format a la PRId... for a PTRVAL */
3054+
3055+#define ELF_DEFINE_HANDLE(structname) \
3056+ typedef union { \
3057+ elf_ptrval ptrval; \
3058+ const structname *typeonly; /* for sizeof, offsetof, &c only */ \
3059+ } structname##_handle;
3060+ /*
3061+ * This must be invoked for each HANDLE type to define
3062+ * the actual C type used for that kind of HANDLE.
3063+ */
3064+
3065+#define ELF_MAKE_HANDLE(structname, ptrval) ((structname##_handle){ ptrval })
3066+ /* Converts a PTRVAL to a HANDLE */
3067+
3068+#define ELF_IMAGE_BASE(elf) ((elf_ptrval)(elf)->image_base)
3069+ /* Returns the base of the image as a PTRVAL. */
3070+
3071+#define ELF_HANDLE_PTRVAL(handleval) ((handleval).ptrval)
3072+ /* Converts a HANDLE to a PTRVAL. */
3073+
3074+#define ELF_UNSAFE_PTR(ptrval) ((void*)(elf_ptrval)(ptrval))
3075+ /*
3076+ * Turns a PTRVAL into an actual C pointer. Before this is done
3077+ * the caller must have ensured that the PTRVAL does in fact point
3078+ * to a permissible location.
3079+ */
3080+
3081+/* PTRVALs can be INVALID (ie, NULL). */
3082+#define ELF_INVALID_PTRVAL ((elf_ptrval)0) /* returns NULL PTRVAL */
3083+#define ELF_INVALID_HANDLE(structname) /* returns NULL handle */ \
3084+ ELF_MAKE_HANDLE(structname, ELF_INVALID_PTRVAL)
3085+#define ELF_PTRVAL_VALID(ptrval) (!!(ptrval)) /* } */
3086+#define ELF_HANDLE_VALID(handleval) (!!(handleval).ptrval) /* } predicates */
3087+#define ELF_PTRVAL_INVALID(ptrval) (!ELF_PTRVAL_VALID((ptrval))) /* } */
3088+
3089+#define ELF_MAX_PTRVAL (~(elf_ptrval)0)
3090+ /* PTRVAL value guaranteed to compare > to any valid PTRVAL */
3091+
3092+/* For internal use by other macros here */
3093+#define ELF__HANDLE_FIELD_TYPE(handleval, elm) \
3094+ typeof((handleval).typeonly->elm)
3095+#define ELF__HANDLE_FIELD_OFFSET(handleval, elm) \
3096+ offsetof(typeof(*(handleval).typeonly),elm)
3097+
3098
3099 /* ------------------------------------------------------------------------ */
3100
3101+
3102 typedef union {
3103 Elf32_Ehdr e32;
3104 Elf64_Ehdr e64;
3105@@ -83,20 +174,32 @@ typedef union {
3106 Elf64_Note e64;
3107 } elf_note;
3108
3109+ELF_DEFINE_HANDLE(elf_ehdr)
3110+ELF_DEFINE_HANDLE(elf_shdr)
3111+ELF_DEFINE_HANDLE(elf_phdr)
3112+ELF_DEFINE_HANDLE(elf_sym)
3113+ELF_DEFINE_HANDLE(elf_note)
3114+
3115 struct elf_binary {
3116 /* elf binary */
3117- const char *image;
3118+ const void *image_base;
3119 size_t size;
3120 char class;
3121 char data;
3122
3123- const elf_ehdr *ehdr;
3124- const char *sec_strtab;
3125- const elf_shdr *sym_tab;
3126- const char *sym_strtab;
3127+ ELF_HANDLE_DECL(elf_ehdr) ehdr;
3128+ elf_ptrval sec_strtab;
3129+ ELF_HANDLE_DECL(elf_shdr) sym_tab;
3130+ uint64_t sym_strtab;
3131
3132 /* loaded to */
3133- char *dest;
3134+ /*
3135+ * dest_base and dest_size are trusted and must be correct;
3136+ * whenever dest_size is not 0, both of these must be valid
3137+ * so long as the struct elf_binary is in use.
3138+ */
3139+ char *dest_base;
3140+ size_t dest_size;
3141 uint64_t pstart;
3142 uint64_t pend;
3143 uint64_t reloc_offset;
3144@@ -104,12 +207,22 @@ struct elf_binary {
3145 uint64_t bsd_symtab_pstart;
3146 uint64_t bsd_symtab_pend;
3147
3148+ /*
3149+ * caller's other acceptable destination
3150+ *
3151+ * Again, these are trusted and must be valid (or 0) so long
3152+ * as the struct elf_binary is in use.
3153+ */
3154+ void *caller_xdest_base;
3155+ uint64_t caller_xdest_size;
3156+
3157 #ifndef __XEN__
3158 /* misc */
3159 elf_log_callback *log_callback;
3160 void *log_caller_data;
3161 #endif
3162- int verbose;
3163+ bool verbose;
3164+ const char *broken;
3165 };
3166
3167 /* ------------------------------------------------------------------------ */
3168@@ -127,88 +240,145 @@ struct elf_binary {
3169 #define elf_lsb(elf) (ELFDATA2LSB == (elf)->data)
3170 #define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data)
3171
3172-#define elf_uval(elf, str, elem) \
3173- ((ELFCLASS64 == (elf)->class) \
3174- ? elf_access_unsigned((elf), (str), \
3175- offsetof(typeof(*(str)),e64.elem), \
3176- sizeof((str)->e64.elem)) \
3177- : elf_access_unsigned((elf), (str), \
3178- offsetof(typeof(*(str)),e32.elem), \
3179- sizeof((str)->e32.elem)))
3180-
3181-#define elf_sval(elf, str, elem) \
3182- ((ELFCLASS64 == (elf)->class) \
3183- ? elf_access_signed((elf), (str), \
3184- offsetof(typeof(*(str)),e64.elem), \
3185- sizeof((str)->e64.elem)) \
3186- : elf_access_signed((elf), (str), \
3187- offsetof(typeof(*(str)),e32.elem), \
3188- sizeof((str)->e32.elem)))
3189-
3190-#define elf_size(elf, str) \
3191- ((ELFCLASS64 == (elf)->class) \
3192- ? sizeof((str)->e64) : sizeof((str)->e32))
3193+#define elf_uval_3264(elf, handle, elem) \
3194+ elf_access_unsigned((elf), (handle).ptrval, \
3195+ offsetof(typeof(*(handle).typeonly),elem), \
3196+ sizeof((handle).typeonly->elem))
3197+
3198+#define elf_uval(elf, handle, elem) \
3199+ ((ELFCLASS64 == (elf)->class) \
3200+ ? elf_uval_3264(elf, handle, e64.elem) \
3201+ : elf_uval_3264(elf, handle, e32.elem))
3202+ /*
3203+ * Reads an unsigned field in a header structure in the ELF.
3204+ * str is a HANDLE, and elem is the field name in it.
3205+ */
3206
3207-uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
3208+
3209+#define elf_size(elf, handle_or_handletype) ({ \
3210+ typeof(handle_or_handletype) elf_size__dummy; \
3211+ ((ELFCLASS64 == (elf)->class) \
3212+ ? sizeof(elf_size__dummy.typeonly->e64) \
3213+ : sizeof(elf_size__dummy.typeonly->e32)); \
3214+})
3215+ /*
3216+ * Returns the size of the substructure for the appropriate 32/64-bitness.
3217+ * str should be a HANDLE.
3218+ */
3219+
3220+uint64_t elf_access_unsigned(struct elf_binary *elf, elf_ptrval ptr,
3221 uint64_t offset, size_t size);
3222-int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
3223- uint64_t offset, size_t size);
3224+ /* Reads a field at arbitrary offset and alignemnt */
3225
3226 uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
3227
3228+const char *elf_strval(struct elf_binary *elf, elf_ptrval start);
3229+ /* may return NULL if the string is out of range etc. */
3230+
3231+const char *elf_strfmt(struct elf_binary *elf, elf_ptrval start);
3232+ /* like elf_strval but returns "(invalid)" instead of NULL */
3233+
3234+void elf_memcpy_safe(struct elf_binary*, elf_ptrval dst, elf_ptrval src, size_t);
3235+void elf_memset_safe(struct elf_binary*, elf_ptrval dst, int c, size_t);
3236+ /*
3237+ * Versions of memcpy and memset which arrange never to write
3238+ * outside permitted areas.
3239+ */
3240+
3241+bool elf_access_ok(struct elf_binary * elf,
3242+ uint64_t ptrval, size_t size);
3243+
3244+#define elf_store_val(elf, type, ptr, val) \
3245+ ({ \
3246+ typeof(type) elf_store__val = (val); \
3247+ elf_ptrval elf_store__targ = ptr; \
3248+ if (elf_access_ok((elf), elf_store__targ, \
3249+ sizeof(elf_store__val))) { \
3250+ elf_memcpy_unchecked((void*)elf_store__targ, &elf_store__val, \
3251+ sizeof(elf_store__val)); \
3252+ } \
3253+ }) \
3254+ /* Stores a value at a particular PTRVAL. */
3255+
3256+#define elf_store_field(elf, hdr, elm, val) \
3257+ (elf_store_val((elf), ELF__HANDLE_FIELD_TYPE(hdr, elm), \
3258+ ELF_HANDLE_PTRVAL(hdr) + ELF__HANDLE_FIELD_OFFSET(hdr, elm), \
3259+ (val)))
3260+ /* Stores a 32/64-bit field. hdr is a HANDLE and elm is the field name. */
3261+
3262+
3263 /* ------------------------------------------------------------------------ */
3264 /* xc_libelf_tools.c */
3265
3266-int elf_shdr_count(struct elf_binary *elf);
3267-int elf_phdr_count(struct elf_binary *elf);
3268+unsigned elf_shdr_count(struct elf_binary *elf);
3269+unsigned elf_phdr_count(struct elf_binary *elf);
3270
3271-const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
3272-const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
3273-const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
3274+ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_name(struct elf_binary *elf, const char *name);
3275+ELF_HANDLE_DECL(elf_shdr) elf_shdr_by_index(struct elf_binary *elf, unsigned index);
3276+ELF_HANDLE_DECL(elf_phdr) elf_phdr_by_index(struct elf_binary *elf, unsigned index);
3277
3278-const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
3279-const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
3280-const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
3281+const char *elf_section_name(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr); /* might return NULL if inputs are invalid */
3282+elf_ptrval elf_section_start(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr);
3283+elf_ptrval elf_section_end(struct elf_binary *elf, ELF_HANDLE_DECL(elf_shdr) shdr);
3284
3285-const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
3286-const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
3287+elf_ptrval elf_segment_start(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr);
3288+elf_ptrval elf_segment_end(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr);
3289
3290-const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
3291-const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
3292+ELF_HANDLE_DECL(elf_sym) elf_sym_by_name(struct elf_binary *elf, const char *symbol);
3293+ELF_HANDLE_DECL(elf_sym) elf_sym_by_index(struct elf_binary *elf, unsigned index);
3294
3295-const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
3296-const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
3297-uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
3298-uint64_t elf_note_numeric_array(struct elf_binary *, const elf_note *,
3299+const char *elf_note_name(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note); /* may return NULL */
3300+elf_ptrval elf_note_desc(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note);
3301+uint64_t elf_note_numeric(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note);
3302+uint64_t elf_note_numeric_array(struct elf_binary *, ELF_HANDLE_DECL(elf_note),
3303 unsigned int unitsz, unsigned int idx);
3304-const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
3305
3306-int elf_is_elfbinary(const void *image);
3307-int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
3308+/*
3309+ * If you use elf_note_next in a loop, you must put a nontrivial upper
3310+ * bound on the returned value as part of your loop condition. In
3311+ * some cases elf_note_next will substitute ELF_PTRVAL_MAX as return
3312+ * value to indicate that the iteration isn't going well (for example,
3313+ * the putative "next" value would be earlier in memory). In this
3314+ * case the caller's loop must terminate. Checking against the
3315+ * end of the notes segment with a strict inequality is sufficient.
3316+ */
3317+ELF_HANDLE_DECL(elf_note) elf_note_next(struct elf_binary *elf, ELF_HANDLE_DECL(elf_note) note);
3318+
3319+/* (Only) checks that the image has the right magic number. */
3320+bool elf_is_elfbinary(const void *image_start, size_t image_size);
3321+
3322+bool elf_phdr_is_loadable(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr);
3323
3324 /* ------------------------------------------------------------------------ */
3325 /* xc_libelf_loader.c */
3326
3327-int elf_init(struct elf_binary *elf, const char *image, size_t size);
3328+elf_errorstatus elf_init(struct elf_binary *elf, const char *image, size_t size);
3329+ /*
3330+ * image and size must be correct. They will be recorded in
3331+ * *elf, and must remain valid while the elf is in use.
3332+ */
3333 #ifdef __XEN__
3334 void elf_set_verbose(struct elf_binary *elf);
3335 #else
3336 void elf_set_log(struct elf_binary *elf, elf_log_callback*,
3337- void *log_caller_pointer, int verbose);
3338+ void *log_caller_pointer, bool verbose);
3339 #endif
3340
3341 void elf_parse_binary(struct elf_binary *elf);
3342-int elf_load_binary(struct elf_binary *elf);
3343+elf_errorstatus elf_load_binary(struct elf_binary *elf);
3344
3345-void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
3346+elf_ptrval elf_get_ptr(struct elf_binary *elf, unsigned long addr);
3347 uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
3348
3349 void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart); /* private */
3350
3351+void elf_mark_broken(struct elf_binary *elf, const char *msg);
3352+const char *elf_check_broken(const struct elf_binary *elf); /* NULL means OK */
3353+
3354 /* ------------------------------------------------------------------------ */
3355 /* xc_libelf_relocate.c */
3356
3357-int elf_reloc(struct elf_binary *elf);
3358+elf_errorstatus elf_reloc(struct elf_binary *elf);
3359
3360 /* ------------------------------------------------------------------------ */
3361 /* xc_libelf_dominfo.c */
3362@@ -232,9 +402,9 @@ struct xen_elfnote {
3363
3364 struct elf_dom_parms {
3365 /* raw */
3366- const char *guest_info;
3367- const void *elf_note_start;
3368- const void *elf_note_end;
3369+ elf_ptrval guest_info;
3370+ elf_ptrval elf_note_start;
3371+ elf_ptrval elf_note_end;
3372 struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
3373
3374 /* parsed */
3375@@ -242,8 +412,8 @@ struct elf_dom_parms {
3376 char guest_ver[16];
3377 char xen_ver[16];
3378 char loader[16];
3379- int pae;
3380- int bsd_symtab;
3381+ int pae; /* some kind of enum apparently */
3382+ bool bsd_symtab;
3383 uint64_t virt_base;
3384 uint64_t virt_entry;
3385 uint64_t virt_hypercall;
3386@@ -273,10 +443,44 @@ int elf_xen_parse_features(const char *features,
3387 uint32_t *required);
3388 int elf_xen_parse_note(struct elf_binary *elf,
3389 struct elf_dom_parms *parms,
3390- const elf_note *note);
3391+ ELF_HANDLE_DECL(elf_note) note);
3392 int elf_xen_parse_guest_info(struct elf_binary *elf,
3393 struct elf_dom_parms *parms);
3394 int elf_xen_parse(struct elf_binary *elf,
3395 struct elf_dom_parms *parms);
3396
3397+static inline void *elf_memcpy_unchecked(void *dest, const void *src, size_t n)
3398+ { return memcpy(dest, src, n); }
3399+static inline void *elf_memmove_unchecked(void *dest, const void *src, size_t n)
3400+ { return memmove(dest, src, n); }
3401+static inline void *elf_memset_unchecked(void *s, int c, size_t n)
3402+ { return memset(s, c, n); }
3403+ /*
3404+ * Unsafe versions of memcpy, memmove memset which take actual C
3405+ * pointers. These are just like the real functions.
3406+ * We provide these so that in libelf-private.h we can #define
3407+ * memcpy, memset and memmove to undefined MISTAKE things.
3408+ */
3409+
3410+
3411+/* Advances past amount bytes of the current destination area. */
3412+static inline void ELF_ADVANCE_DEST(struct elf_binary *elf, uint64_t amount)
3413+{
3414+ if ( elf->dest_base == NULL )
3415+ {
3416+ elf_mark_broken(elf, "advancing in null image");
3417+ }
3418+ else if ( elf->dest_size >= amount )
3419+ {
3420+ elf->dest_base += amount;
3421+ elf->dest_size -= amount;
3422+ }
3423+ else
3424+ {
3425+ elf->dest_size = 0;
3426+ elf_mark_broken(elf, "advancing past end (image very short?)");
3427+ }
3428+}
3429+
3430+
3431 #endif /* __XEN_LIBELF_H__ */
diff --git a/main/xen/xsa56.patch b/main/xen/xsa56.patch
deleted file mode 100644
index 1368ac3514..0000000000
--- a/main/xen/xsa56.patch
+++ /dev/null
@@ -1,50 +0,0 @@
1libxc: limit cpu values when setting vcpu affinity
2
3When support for pinning more than 64 cpus was added, check for cpu
4out-of-range values was removed. This can lead to subsequent
5out-of-bounds cpumap array accesses in case the cpu number is higher
6than the actual count.
7
8This patch returns the check.
9
10This is CVE-2013-2072 / XSA-56
11
12Signed-off-by: Petr Matousek <pmatouse@redhat.com>
13
14diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
15index e220f68..e611b24 100644
16--- a/tools/python/xen/lowlevel/xc/xc.c
17+++ b/tools/python/xen/lowlevel/xc/xc.c
18@@ -228,6 +228,7 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
19 int vcpu = 0, i;
20 xc_cpumap_t cpumap;
21 PyObject *cpulist = NULL;
22+ int nr_cpus;
23
24 static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
25
26@@ -235,6 +236,10 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
27 &dom, &vcpu, &cpulist) )
28 return NULL;
29
30+ nr_cpus = xc_get_max_cpus(self->xc_handle);
31+ if ( nr_cpus == 0 )
32+ return pyxc_error_to_exception(self->xc_handle);
33+
34 cpumap = xc_cpumap_alloc(self->xc_handle);
35 if(cpumap == NULL)
36 return pyxc_error_to_exception(self->xc_handle);
37@@ -244,6 +249,13 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
38 for ( i = 0; i < PyList_Size(cpulist); i++ )
39 {
40 long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
41+ if ( cpu < 0 || cpu >= nr_cpus )
42+ {
43+ free(cpumap);
44+ errno = EINVAL;
45+ PyErr_SetFromErrno(xc_error_obj);
46+ return NULL;
47+ }
48 cpumap[cpu / 8] |= 1 << (cpu % 8);
49 }
50 }
diff --git a/main/xen/xsa57.patch b/main/xen/xsa57.patch
deleted file mode 100644
index 178b818890..0000000000
--- a/main/xen/xsa57.patch
+++ /dev/null
@@ -1,333 +0,0 @@
1libxl: Restrict permissions on PV console device xenstore nodes
2
3Matthew Daley has observed that the PV console protocol places sensitive host
4state into a guest writeable xenstore locations, this includes:
5
6 - The pty used to communicate between the console backend daemon and its
7 client, allowing the guest administrator to read and write arbitrary host
8 files.
9 - The output file, allowing the guest administrator to write arbitrary host
10 files or to target arbitrary qemu chardevs which include sockets, udp, ptr,
11 pipes etc (see -chardev in qemu(1) for a more complete list).
12 - The maximum buffer size, allowing the guest administrator to consume more
13 resources than the host administrator has configured.
14 - The backend to use (qemu vs xenconsoled), potentially allowing the guest
15 administrator to confuse host software.
16
17So we arrange to make the sensitive keys in the xenstore frontend directory
18read only for the guest. This is safe since the xenstore permissions model,
19unlike POSIX directory permissions, does not allow the guest to remove and
20recreate a node if it has write access to the containing directory.
21
22There are a few associated wrinkles:
23
24 - The primary PV console is "special". It's xenstore node is not under the
25 usual /devices/ subtree and it does not use the customary xenstore state
26 machine protocol. Unfortunately its directory is used for other things,
27 including the vnc-port node, which we do not want the guest to be able to
28 write to. Rather than trying to track down all the possible secondary uses
29 of this directory just make it r/o to the guest. All newly created
30 subdirectories inherit these permissions and so are now safe by default.
31
32 - The other serial consoles do use the customary xenstore state machine and
33 therefore need write access to at least the "protocol" and "state" nodes,
34 however they may also want to use arbitrary "feature-foo" nodes (although
35 I'm not aware of any) and therefore we cannot simply lock down the entire
36 frontend directory. Instead we add support to libxl__device_generic_add for
37 frontend keys which are explicitly read only and use that to lock down the
38 sensitive keys.
39
40 - Minios' console frontend wants to write the "type" node, which it has no
41 business doing since this is a host/toolstack level decision. This fails
42 now that the node has become read only to the PV guest. Since the toolstack
43 already writes this node just remove the attempt to set it.
44
45This is CVE-XXXX-XXX / XSA-57
46
47Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
48
49Conflicts:
50 tools/libxl/libxl.c (no vtpm, free front_ro on error in
51 libxl__device_console_add)
52
53diff --git a/extras/mini-os/console/xenbus.c b/extras/mini-os/console/xenbus.c
54index 77de82a..e65baf7 100644
55--- a/extras/mini-os/console/xenbus.c
56+++ b/extras/mini-os/console/xenbus.c
57@@ -122,12 +122,6 @@ again:
58 goto abort_transaction;
59 }
60
61- err = xenbus_printf(xbt, nodename, "type", "%s", "ioemu");
62- if (err) {
63- message = "writing type";
64- goto abort_transaction;
65- }
66-
67 snprintf(path, sizeof(path), "%s/state", nodename);
68 err = xenbus_switch_state(xbt, path, XenbusStateConnected);
69 if (err) {
70diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
71index a6e9601..32d788a 100644
72--- a/tools/libxl/libxl.c
73+++ b/tools/libxl/libxl.c
74@@ -1920,8 +1920,9 @@ static void device_disk_add(libxl__egc *egc, uint32_t domid,
75 flexarray_append(front, disk->is_cdrom ? "cdrom" : "disk");
76
77 libxl__device_generic_add(gc, t, device,
78- libxl__xs_kvs_of_flexarray(gc, back, back->count),
79- libxl__xs_kvs_of_flexarray(gc, front, front->count));
80+ libxl__xs_kvs_of_flexarray(gc, back, back->count),
81+ libxl__xs_kvs_of_flexarray(gc, front, front->count),
82+ NULL);
83
84 rc = libxl__xs_transaction_commit(gc, &t);
85 if (!rc) break;
86@@ -2633,8 +2634,9 @@ void libxl__device_nic_add(libxl__egc *egc, uint32_t domid,
87 flexarray_append(front, libxl__sprintf(gc,
88 LIBXL_MAC_FMT, LIBXL_MAC_BYTES(nic->mac)));
89 libxl__device_generic_add(gc, XBT_NULL, device,
90- libxl__xs_kvs_of_flexarray(gc, back, back->count),
91- libxl__xs_kvs_of_flexarray(gc, front, front->count));
92+ libxl__xs_kvs_of_flexarray(gc, back, back->count),
93+ libxl__xs_kvs_of_flexarray(gc, front, front->count),
94+ NULL);
95
96 aodev->dev = device;
97 aodev->action = DEVICE_CONNECT;
98@@ -2830,7 +2832,7 @@ int libxl__device_console_add(libxl__gc *gc, uint32_t domid,
99 libxl__device_console *console,
100 libxl__domain_build_state *state)
101 {
102- flexarray_t *front;
103+ flexarray_t *front, *ro_front;
104 flexarray_t *back;
105 libxl__device device;
106 int rc;
107@@ -2845,6 +2847,11 @@ int libxl__device_console_add(libxl__gc *gc, uint32_t domid,
108 rc = ERROR_NOMEM;
109 goto out;
110 }
111+ ro_front = flexarray_make(16, 1);
112+ if (!ro_front) {
113+ rc = ERROR_NOMEM;
114+ goto out;
115+ }
116 back = flexarray_make(16, 1);
117 if (!back) {
118 rc = ERROR_NOMEM;
119@@ -2871,21 +2878,24 @@ int libxl__device_console_add(libxl__gc *gc, uint32_t domid,
120
121 flexarray_append(front, "backend-id");
122 flexarray_append(front, libxl__sprintf(gc, "%d", console->backend_domid));
123- flexarray_append(front, "limit");
124- flexarray_append(front, libxl__sprintf(gc, "%d", LIBXL_XENCONSOLE_LIMIT));
125- flexarray_append(front, "type");
126+
127+ flexarray_append(ro_front, "limit");
128+ flexarray_append(ro_front, libxl__sprintf(gc, "%d", LIBXL_XENCONSOLE_LIMIT));
129+ flexarray_append(ro_front, "type");
130 if (console->consback == LIBXL__CONSOLE_BACKEND_XENCONSOLED)
131- flexarray_append(front, "xenconsoled");
132+ flexarray_append(ro_front, "xenconsoled");
133 else
134- flexarray_append(front, "ioemu");
135- flexarray_append(front, "output");
136- flexarray_append(front, console->output);
137+ flexarray_append(ro_front, "ioemu");
138+ flexarray_append(ro_front, "output");
139+ flexarray_append(ro_front, console->output);
140+ flexarray_append(ro_front, "tty");
141+ flexarray_append(ro_front, "");
142
143 if (state) {
144- flexarray_append(front, "port");
145- flexarray_append(front, libxl__sprintf(gc, "%"PRIu32, state->console_port));
146- flexarray_append(front, "ring-ref");
147- flexarray_append(front, libxl__sprintf(gc, "%lu", state->console_mfn));
148+ flexarray_append(ro_front, "port");
149+ flexarray_append(ro_front, libxl__sprintf(gc, "%"PRIu32, state->console_port));
150+ flexarray_append(ro_front, "ring-ref");
151+ flexarray_append(ro_front, libxl__sprintf(gc, "%lu", state->console_mfn));
152 } else {
153 flexarray_append(front, "state");
154 flexarray_append(front, libxl__sprintf(gc, "%d", 1));
155@@ -2894,11 +2904,13 @@ int libxl__device_console_add(libxl__gc *gc, uint32_t domid,
156 }
157
158 libxl__device_generic_add(gc, XBT_NULL, &device,
159- libxl__xs_kvs_of_flexarray(gc, back, back->count),
160- libxl__xs_kvs_of_flexarray(gc, front, front->count));
161+ libxl__xs_kvs_of_flexarray(gc, back, back->count),
162+ libxl__xs_kvs_of_flexarray(gc, front, front->count),
163+ libxl__xs_kvs_of_flexarray(gc, ro_front, ro_front->count));
164 rc = 0;
165 out_free:
166 flexarray_free(back);
167+ flexarray_free(ro_front);
168 flexarray_free(front);
169 out:
170 return rc;
171@@ -2982,8 +2994,9 @@ int libxl__device_vkb_add(libxl__gc *gc, uint32_t domid,
172 flexarray_append(front, libxl__sprintf(gc, "%d", 1));
173
174 libxl__device_generic_add(gc, XBT_NULL, &device,
175- libxl__xs_kvs_of_flexarray(gc, back, back->count),
176- libxl__xs_kvs_of_flexarray(gc, front, front->count));
177+ libxl__xs_kvs_of_flexarray(gc, back, back->count),
178+ libxl__xs_kvs_of_flexarray(gc, front, front->count),
179+ NULL);
180 rc = 0;
181 out_free:
182 flexarray_free(back);
183@@ -3096,8 +3109,9 @@ int libxl__device_vfb_add(libxl__gc *gc, uint32_t domid, libxl_device_vfb *vfb)
184 flexarray_append_pair(front, "state", libxl__sprintf(gc, "%d", 1));
185
186 libxl__device_generic_add(gc, XBT_NULL, &device,
187- libxl__xs_kvs_of_flexarray(gc, back, back->count),
188- libxl__xs_kvs_of_flexarray(gc, front, front->count));
189+ libxl__xs_kvs_of_flexarray(gc, back, back->count),
190+ libxl__xs_kvs_of_flexarray(gc, front, front->count),
191+ NULL);
192 rc = 0;
193 out_free:
194 flexarray_free(front);
195diff --git a/tools/libxl/libxl_device.c b/tools/libxl/libxl_device.c
196index c3283f1..1c04a21 100644
197--- a/tools/libxl/libxl_device.c
198+++ b/tools/libxl/libxl_device.c
199@@ -84,11 +84,12 @@ out:
200 }
201
202 int libxl__device_generic_add(libxl__gc *gc, xs_transaction_t t,
203- libxl__device *device, char **bents, char **fents)
204+ libxl__device *device, char **bents, char **fents, char **ro_fents)
205 {
206 libxl_ctx *ctx = libxl__gc_owner(gc);
207 char *frontend_path, *backend_path;
208 struct xs_permissions frontend_perms[2];
209+ struct xs_permissions ro_frontend_perms[2];
210 struct xs_permissions backend_perms[2];
211 int create_transaction = t == XBT_NULL;
212
213@@ -100,22 +101,37 @@ int libxl__device_generic_add(libxl__gc *gc, xs_transaction_t t,
214 frontend_perms[1].id = device->backend_domid;
215 frontend_perms[1].perms = XS_PERM_READ;
216
217- backend_perms[0].id = device->backend_domid;
218- backend_perms[0].perms = XS_PERM_NONE;
219- backend_perms[1].id = device->domid;
220- backend_perms[1].perms = XS_PERM_READ;
221+ ro_frontend_perms[0].id = backend_perms[0].id = device->backend_domid;
222+ ro_frontend_perms[0].perms = backend_perms[0].perms = XS_PERM_NONE;
223+ ro_frontend_perms[1].id = backend_perms[1].id = device->domid;
224+ ro_frontend_perms[1].perms = backend_perms[1].perms = XS_PERM_READ;
225
226 retry_transaction:
227 if (create_transaction)
228 t = xs_transaction_start(ctx->xsh);
229 /* FIXME: read frontend_path and check state before removing stuff */
230
231- if (fents) {
232+ if (fents || ro_fents) {
233 xs_rm(ctx->xsh, t, frontend_path);
234 xs_mkdir(ctx->xsh, t, frontend_path);
235- xs_set_permissions(ctx->xsh, t, frontend_path, frontend_perms, ARRAY_SIZE(frontend_perms));
236+ /* Console 0 is a special case. It doesn't use the regular PV
237+ * state machine but also the frontend directory has
238+ * historically contained other information, such as the
239+ * vnc-port, which we don't want the guest fiddling with.
240+ */
241+ if (device->kind == LIBXL__DEVICE_KIND_CONSOLE && device->devid == 0)
242+ xs_set_permissions(ctx->xsh, t, frontend_path,
243+ ro_frontend_perms, ARRAY_SIZE(ro_frontend_perms));
244+ else
245+ xs_set_permissions(ctx->xsh, t, frontend_path,
246+ frontend_perms, ARRAY_SIZE(frontend_perms));
247 xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/backend", frontend_path), backend_path, strlen(backend_path));
248- libxl__xs_writev(gc, t, frontend_path, fents);
249+ if (fents)
250+ libxl__xs_writev_perms(gc, t, frontend_path, fents,
251+ frontend_perms, ARRAY_SIZE(frontend_perms));
252+ if (ro_fents)
253+ libxl__xs_writev_perms(gc, t, frontend_path, ro_fents,
254+ ro_frontend_perms, ARRAY_SIZE(ro_frontend_perms));
255 }
256
257 if (bents) {
258diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
259index 13fa509..ae96a74 100644
260--- a/tools/libxl/libxl_internal.h
261+++ b/tools/libxl/libxl_internal.h
262@@ -516,6 +516,11 @@ _hidden char **libxl__xs_kvs_of_flexarray(libxl__gc *gc, flexarray_t *array, int
263 /* treats kvs as pairs of keys and values and writes each to dir. */
264 _hidden int libxl__xs_writev(libxl__gc *gc, xs_transaction_t t,
265 const char *dir, char **kvs);
266+/* as writev but also sets the permissions on each path */
267+_hidden int libxl__xs_writev_perms(libxl__gc *gc, xs_transaction_t t,
268+ const char *dir, char *kvs[],
269+ struct xs_permissions *perms,
270+ unsigned int num_perms);
271 /* _atonce creates a transaction and writes all keys at once */
272 _hidden int libxl__xs_writev_atonce(libxl__gc *gc,
273 const char *dir, char **kvs);
274@@ -930,7 +935,7 @@ _hidden int libxl__device_console_add(libxl__gc *gc, uint32_t domid,
275 libxl__domain_build_state *state);
276
277 _hidden int libxl__device_generic_add(libxl__gc *gc, xs_transaction_t t,
278- libxl__device *device, char **bents, char **fents);
279+ libxl__device *device, char **bents, char **fents, char **ro_fents);
280 _hidden char *libxl__device_backend_path(libxl__gc *gc, libxl__device *device);
281 _hidden char *libxl__device_frontend_path(libxl__gc *gc, libxl__device *device);
282 _hidden int libxl__parse_backend_path(libxl__gc *gc, const char *path,
283diff --git a/tools/libxl/libxl_pci.c b/tools/libxl/libxl_pci.c
284index 48986f3..d373b4d 100644
285--- a/tools/libxl/libxl_pci.c
286+++ b/tools/libxl/libxl_pci.c
287@@ -106,7 +106,8 @@ int libxl__create_pci_backend(libxl__gc *gc, uint32_t domid,
288
289 libxl__device_generic_add(gc, XBT_NULL, &device,
290 libxl__xs_kvs_of_flexarray(gc, back, back->count),
291- libxl__xs_kvs_of_flexarray(gc, front, front->count));
292+ libxl__xs_kvs_of_flexarray(gc, front, front->count),
293+ NULL);
294
295 out:
296 if (back)
297diff --git a/tools/libxl/libxl_xshelp.c b/tools/libxl/libxl_xshelp.c
298index 52af484..d7eaa66 100644
299--- a/tools/libxl/libxl_xshelp.c
300+++ b/tools/libxl/libxl_xshelp.c
301@@ -41,8 +41,10 @@ char **libxl__xs_kvs_of_flexarray(libxl__gc *gc, flexarray_t *array, int length)
302 return kvs;
303 }
304
305-int libxl__xs_writev(libxl__gc *gc, xs_transaction_t t,
306- const char *dir, char *kvs[])
307+int libxl__xs_writev_perms(libxl__gc *gc, xs_transaction_t t,
308+ const char *dir, char *kvs[],
309+ struct xs_permissions *perms,
310+ unsigned int num_perms)
311 {
312 libxl_ctx *ctx = libxl__gc_owner(gc);
313 char *path;
314@@ -56,11 +58,19 @@ int libxl__xs_writev(libxl__gc *gc, xs_transaction_t t,
315 if (path && kvs[i + 1]) {
316 int length = strlen(kvs[i + 1]);
317 xs_write(ctx->xsh, t, path, kvs[i + 1], length);
318+ if (perms)
319+ xs_set_permissions(ctx->xsh, t, path, perms, num_perms);
320 }
321 }
322 return 0;
323 }
324
325+int libxl__xs_writev(libxl__gc *gc, xs_transaction_t t,
326+ const char *dir, char *kvs[])
327+{
328+ return libxl__xs_writev_perms(gc, t, dir, kvs, NULL, 0);
329+}
330+
331 int libxl__xs_writev_atonce(libxl__gc *gc,
332 const char *dir, char *kvs[])
333 {
diff --git a/main/xen/xsa58-4.2.patch b/main/xen/xsa58-4.2.patch
deleted file mode 100644
index 1ea3aaa97d..0000000000
--- a/main/xen/xsa58-4.2.patch
+++ /dev/null
@@ -1,129 +0,0 @@
1x86: fix page refcount handling in page table pin error path
2
3In the original patch 7 of the series addressing XSA-45 I mistakenly
4took the addition of the call to get_page_light() in alloc_page_type()
5to cover two decrements that would happen: One for the PGT_partial bit
6that is getting set along with the call, and the other for the page
7reference the caller hold (and would be dropping on its error path).
8But of course the additional page reference is tied to the PGT_partial
9bit, and hence any caller of a function that may leave
10->arch.old_guest_table non-NULL for error cleanup purposes has to make
11sure a respective page reference gets retained.
12
13Similar issues were then also spotted elsewhere: In effect all callers
14of get_page_type_preemptible() need to deal with errors in similar
15ways. To make sure error handling can work this way without leaking
16page references, a respective assertion gets added to that function.
17
18This is CVE-2013-1432 / XSA-58.
19
20Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
21Signed-off-by: Jan Beulich <jbeulich@suse.com>
22Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
23Reviewed-by: Tim Deegan <tim@xen.org>
24
25--- a/xen/arch/x86/domain.c
26+++ b/xen/arch/x86/domain.c
27@@ -941,6 +941,10 @@ int arch_set_info_guest(
28 if ( v->vcpu_id == 0 )
29 d->vm_assist = c(vm_assist);
30
31+ rc = put_old_guest_table(current);
32+ if ( rc )
33+ return rc;
34+
35 if ( !compat )
36 rc = (int)set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
37 #ifdef CONFIG_COMPAT
38@@ -980,18 +984,24 @@ int arch_set_info_guest(
39 }
40 else
41 {
42- /*
43- * Since v->arch.guest_table{,_user} are both NULL, this effectively
44- * is just a call to put_old_guest_table().
45- */
46 if ( !compat )
47- rc = vcpu_destroy_pagetables(v);
48+ rc = put_old_guest_table(v);
49 if ( !rc )
50 rc = get_page_type_preemptible(cr3_page,
51 !compat ? PGT_root_page_table
52 : PGT_l3_page_table);
53- if ( rc == -EINTR )
54+ switch ( rc )
55+ {
56+ case -EINTR:
57 rc = -EAGAIN;
58+ case -EAGAIN:
59+ case 0:
60+ break;
61+ default:
62+ if ( cr3_page == current->arch.old_guest_table )
63+ cr3_page = NULL;
64+ break;
65+ }
66 }
67 if ( rc )
68 /* handled below */;
69@@ -1018,6 +1028,11 @@ int arch_set_info_guest(
70 pagetable_get_page(v->arch.guest_table);
71 v->arch.guest_table = pagetable_null();
72 break;
73+ default:
74+ if ( cr3_page == current->arch.old_guest_table )
75+ cr3_page = NULL;
76+ case 0:
77+ break;
78 }
79 }
80 if ( !rc )
81--- a/xen/arch/x86/mm.c
82+++ b/xen/arch/x86/mm.c
83@@ -718,7 +718,8 @@ static int get_page_and_type_from_pagenr
84 get_page_type_preemptible(page, type) :
85 (get_page_type(page, type) ? 0 : -EINVAL));
86
87- if ( unlikely(rc) && partial >= 0 )
88+ if ( unlikely(rc) && partial >= 0 &&
89+ (!preemptible || page != current->arch.old_guest_table) )
90 put_page(page);
91
92 return rc;
93@@ -2638,6 +2639,7 @@ int put_page_type_preemptible(struct pag
94
95 int get_page_type_preemptible(struct page_info *page, unsigned long type)
96 {
97+ ASSERT(!current->arch.old_guest_table);
98 return __get_page_type(page, type, 1);
99 }
100
101@@ -2848,7 +2850,7 @@ static void put_superpage(unsigned long
102
103 #endif
104
105-static int put_old_guest_table(struct vcpu *v)
106+int put_old_guest_table(struct vcpu *v)
107 {
108 int rc;
109
110@@ -3253,7 +3255,8 @@ long do_mmuext_op(
111 rc = -EAGAIN;
112 else if ( rc != -EAGAIN )
113 MEM_LOG("Error while pinning mfn %lx", page_to_mfn(page));
114- put_page(page);
115+ if ( page != curr->arch.old_guest_table )
116+ put_page(page);
117 break;
118 }
119
120--- a/xen/include/asm-x86/mm.h
121+++ b/xen/include/asm-x86/mm.h
122@@ -374,6 +374,7 @@ void put_page_type(struct page_info *pag
123 int get_page_type(struct page_info *page, unsigned long type);
124 int put_page_type_preemptible(struct page_info *page);
125 int get_page_type_preemptible(struct page_info *page, unsigned long type);
126+int put_old_guest_table(struct vcpu *);
127 int get_page_from_l1e(
128 l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
129 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);