]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/char/tpm/xen-tpmfront.c
xfrm: prevent ipcomp scratch buffer race condition
[karo-tx-linux.git] / drivers / char / tpm / xen-tpmfront.c
1 /*
2  * Implementation of the Xen vTPM device frontend
3  *
4  * Author:  Daniel De Graaf <dgdegra@tycho.nsa.gov>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2,
8  * as published by the Free Software Foundation.
9  */
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/interrupt.h>
13 #include <xen/events.h>
14 #include <xen/interface/io/tpmif.h>
15 #include <xen/grant_table.h>
16 #include <xen/xenbus.h>
17 #include <xen/page.h>
18 #include "tpm.h"
19
20 struct tpm_private {
21         struct tpm_chip *chip;
22         struct xenbus_device *dev;
23
24         struct vtpm_shared_page *shr;
25
26         unsigned int evtchn;
27         int ring_ref;
28         domid_t backend_id;
29 };
30
31 enum status_bits {
32         VTPM_STATUS_RUNNING  = 0x1,
33         VTPM_STATUS_IDLE     = 0x2,
34         VTPM_STATUS_RESULT   = 0x4,
35         VTPM_STATUS_CANCELED = 0x8,
36 };
37
38 static u8 vtpm_status(struct tpm_chip *chip)
39 {
40         struct tpm_private *priv = TPM_VPRIV(chip);
41         switch (priv->shr->state) {
42         case VTPM_STATE_IDLE:
43                 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
44         case VTPM_STATE_FINISH:
45                 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
46         case VTPM_STATE_SUBMIT:
47         case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
48                 return VTPM_STATUS_RUNNING;
49         default:
50                 return 0;
51         }
52 }
53
54 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
55 {
56         return status & VTPM_STATUS_CANCELED;
57 }
58
59 static void vtpm_cancel(struct tpm_chip *chip)
60 {
61         struct tpm_private *priv = TPM_VPRIV(chip);
62         priv->shr->state = VTPM_STATE_CANCEL;
63         wmb();
64         notify_remote_via_evtchn(priv->evtchn);
65 }
66
67 static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
68 {
69         return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
70 }
71
72 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
73 {
74         struct tpm_private *priv = TPM_VPRIV(chip);
75         struct vtpm_shared_page *shr = priv->shr;
76         unsigned int offset = shr_data_offset(shr);
77
78         u32 ordinal;
79         unsigned long duration;
80
81         if (offset > PAGE_SIZE)
82                 return -EINVAL;
83
84         if (offset + count > PAGE_SIZE)
85                 return -EINVAL;
86
87         /* Wait for completion of any existing command or cancellation */
88         if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
89                         &chip->vendor.read_queue, true) < 0) {
90                 vtpm_cancel(chip);
91                 return -ETIME;
92         }
93
94         memcpy(offset + (u8 *)shr, buf, count);
95         shr->length = count;
96         barrier();
97         shr->state = VTPM_STATE_SUBMIT;
98         wmb();
99         notify_remote_via_evtchn(priv->evtchn);
100
101         ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
102         duration = tpm_calc_ordinal_duration(chip, ordinal);
103
104         if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
105                         &chip->vendor.read_queue, true) < 0) {
106                 /* got a signal or timeout, try to cancel */
107                 vtpm_cancel(chip);
108                 return -ETIME;
109         }
110
111         return count;
112 }
113
114 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
115 {
116         struct tpm_private *priv = TPM_VPRIV(chip);
117         struct vtpm_shared_page *shr = priv->shr;
118         unsigned int offset = shr_data_offset(shr);
119         size_t length = shr->length;
120
121         if (shr->state == VTPM_STATE_IDLE)
122                 return -ECANCELED;
123
124         /* In theory the wait at the end of _send makes this one unnecessary */
125         if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
126                         &chip->vendor.read_queue, true) < 0) {
127                 vtpm_cancel(chip);
128                 return -ETIME;
129         }
130
131         if (offset > PAGE_SIZE)
132                 return -EIO;
133
134         if (offset + length > PAGE_SIZE)
135                 length = PAGE_SIZE - offset;
136
137         if (length > count)
138                 length = count;
139
140         memcpy(buf, offset + (u8 *)shr, length);
141
142         return length;
143 }
144
145 static const struct file_operations vtpm_ops = {
146         .owner = THIS_MODULE,
147         .llseek = no_llseek,
148         .open = tpm_open,
149         .read = tpm_read,
150         .write = tpm_write,
151         .release = tpm_release,
152 };
153
154 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
155 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
156 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
157 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
158 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
159 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
160                 NULL);
161 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
162 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
163 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
164 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
165
166 static struct attribute *vtpm_attrs[] = {
167         &dev_attr_pubek.attr,
168         &dev_attr_pcrs.attr,
169         &dev_attr_enabled.attr,
170         &dev_attr_active.attr,
171         &dev_attr_owned.attr,
172         &dev_attr_temp_deactivated.attr,
173         &dev_attr_caps.attr,
174         &dev_attr_cancel.attr,
175         &dev_attr_durations.attr,
176         &dev_attr_timeouts.attr,
177         NULL,
178 };
179
180 static struct attribute_group vtpm_attr_grp = {
181         .attrs = vtpm_attrs,
182 };
183
184 static const struct tpm_vendor_specific tpm_vtpm = {
185         .status = vtpm_status,
186         .recv = vtpm_recv,
187         .send = vtpm_send,
188         .cancel = vtpm_cancel,
189         .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
190         .req_complete_val  = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
191         .req_canceled      = vtpm_req_canceled,
192         .attr_group = &vtpm_attr_grp,
193         .miscdev = {
194                 .fops = &vtpm_ops,
195         },
196 };
197
198 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
199 {
200         struct tpm_private *priv = dev_id;
201
202         switch (priv->shr->state) {
203         case VTPM_STATE_IDLE:
204         case VTPM_STATE_FINISH:
205                 wake_up_interruptible(&priv->chip->vendor.read_queue);
206                 break;
207         case VTPM_STATE_SUBMIT:
208         case VTPM_STATE_CANCEL:
209         default:
210                 break;
211         }
212         return IRQ_HANDLED;
213 }
214
215 static int setup_chip(struct device *dev, struct tpm_private *priv)
216 {
217         struct tpm_chip *chip;
218
219         chip = tpm_register_hardware(dev, &tpm_vtpm);
220         if (!chip)
221                 return -ENODEV;
222
223         init_waitqueue_head(&chip->vendor.read_queue);
224
225         priv->chip = chip;
226         TPM_VPRIV(chip) = priv;
227
228         return 0;
229 }
230
231 /* caller must clean up in case of errors */
232 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
233 {
234         struct xenbus_transaction xbt;
235         const char *message = NULL;
236         int rv;
237
238         priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
239         if (!priv->shr) {
240                 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
241                 return -ENOMEM;
242         }
243
244         rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
245         if (rv < 0)
246                 return rv;
247
248         priv->ring_ref = rv;
249
250         rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
251         if (rv)
252                 return rv;
253
254         rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
255                                        "tpmif", priv);
256         if (rv <= 0) {
257                 xenbus_dev_fatal(dev, rv, "allocating TPM irq");
258                 return rv;
259         }
260         priv->chip->vendor.irq = rv;
261
262  again:
263         rv = xenbus_transaction_start(&xbt);
264         if (rv) {
265                 xenbus_dev_fatal(dev, rv, "starting transaction");
266                 return rv;
267         }
268
269         rv = xenbus_printf(xbt, dev->nodename,
270                         "ring-ref", "%u", priv->ring_ref);
271         if (rv) {
272                 message = "writing ring-ref";
273                 goto abort_transaction;
274         }
275
276         rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
277                         priv->evtchn);
278         if (rv) {
279                 message = "writing event-channel";
280                 goto abort_transaction;
281         }
282
283         rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
284         if (rv) {
285                 message = "writing feature-protocol-v2";
286                 goto abort_transaction;
287         }
288
289         rv = xenbus_transaction_end(xbt, 0);
290         if (rv == -EAGAIN)
291                 goto again;
292         if (rv) {
293                 xenbus_dev_fatal(dev, rv, "completing transaction");
294                 return rv;
295         }
296
297         xenbus_switch_state(dev, XenbusStateInitialised);
298
299         return 0;
300
301  abort_transaction:
302         xenbus_transaction_end(xbt, 1);
303         if (message)
304                 xenbus_dev_error(dev, rv, "%s", message);
305
306         return rv;
307 }
308
309 static void ring_free(struct tpm_private *priv)
310 {
311         if (!priv)
312                 return;
313
314         if (priv->ring_ref)
315                 gnttab_end_foreign_access(priv->ring_ref, 0,
316                                 (unsigned long)priv->shr);
317         else
318                 free_page((unsigned long)priv->shr);
319
320         if (priv->chip && priv->chip->vendor.irq)
321                 unbind_from_irqhandler(priv->chip->vendor.irq, priv);
322
323         kfree(priv);
324 }
325
326 static int tpmfront_probe(struct xenbus_device *dev,
327                 const struct xenbus_device_id *id)
328 {
329         struct tpm_private *priv;
330         int rv;
331
332         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
333         if (!priv) {
334                 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
335                 return -ENOMEM;
336         }
337
338         rv = setup_chip(&dev->dev, priv);
339         if (rv) {
340                 kfree(priv);
341                 return rv;
342         }
343
344         rv = setup_ring(dev, priv);
345         if (rv) {
346                 tpm_remove_hardware(&dev->dev);
347                 ring_free(priv);
348                 return rv;
349         }
350
351         tpm_get_timeouts(priv->chip);
352
353         dev_set_drvdata(&dev->dev, priv->chip);
354
355         return rv;
356 }
357
358 static int tpmfront_remove(struct xenbus_device *dev)
359 {
360         struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
361         struct tpm_private *priv = TPM_VPRIV(chip);
362         tpm_remove_hardware(&dev->dev);
363         ring_free(priv);
364         TPM_VPRIV(chip) = NULL;
365         return 0;
366 }
367
368 static int tpmfront_resume(struct xenbus_device *dev)
369 {
370         /* A suspend/resume/migrate will interrupt a vTPM anyway */
371         tpmfront_remove(dev);
372         return tpmfront_probe(dev, NULL);
373 }
374
375 static void backend_changed(struct xenbus_device *dev,
376                 enum xenbus_state backend_state)
377 {
378         int val;
379
380         switch (backend_state) {
381         case XenbusStateInitialised:
382         case XenbusStateConnected:
383                 if (dev->state == XenbusStateConnected)
384                         break;
385
386                 if (xenbus_scanf(XBT_NIL, dev->otherend,
387                                 "feature-protocol-v2", "%d", &val) < 0)
388                         val = 0;
389                 if (!val) {
390                         xenbus_dev_fatal(dev, -EINVAL,
391                                         "vTPM protocol 2 required");
392                         return;
393                 }
394                 xenbus_switch_state(dev, XenbusStateConnected);
395                 break;
396
397         case XenbusStateClosing:
398         case XenbusStateClosed:
399                 device_unregister(&dev->dev);
400                 xenbus_frontend_closed(dev);
401                 break;
402         default:
403                 break;
404         }
405 }
406
407 static const struct xenbus_device_id tpmfront_ids[] = {
408         { "vtpm" },
409         { "" }
410 };
411 MODULE_ALIAS("xen:vtpm");
412
413 static DEFINE_XENBUS_DRIVER(tpmfront, ,
414                 .probe = tpmfront_probe,
415                 .remove = tpmfront_remove,
416                 .resume = tpmfront_resume,
417                 .otherend_changed = backend_changed,
418         );
419
420 static int __init xen_tpmfront_init(void)
421 {
422         if (!xen_domain())
423                 return -ENODEV;
424
425         return xenbus_register_frontend(&tpmfront_driver);
426 }
427 module_init(xen_tpmfront_init);
428
429 static void __exit xen_tpmfront_exit(void)
430 {
431         xenbus_unregister_driver(&tpmfront_driver);
432 }
433 module_exit(xen_tpmfront_exit);
434
435 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
436 MODULE_DESCRIPTION("Xen vTPM Driver");
437 MODULE_LICENSE("GPL");