1 /* ldc.c: Logical Domain Channel link-layer protocol driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/scatterlist.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/hash.h>
19 #include <linux/iommu-common.h>
21 #include <asm/hypervisor.h>
22 #include <asm/iommu.h>
25 #include <asm/mdesc.h>
27 #define DRV_MODULE_NAME "ldc"
28 #define PFX DRV_MODULE_NAME ": "
29 #define DRV_MODULE_VERSION "1.1"
30 #define DRV_MODULE_RELDATE "July 22, 2008"
32 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL
33 #define COOKIE_PGSZ_CODE_SHIFT 60ULL
35 static DEFINE_PER_CPU(unsigned int, ldc_pool_hash);
37 static char version[] =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39 #define LDC_PACKET_SIZE 64
41 /* Packet header layout for unreliable and reliable mode frames.
42 * When in RAW mode, packets are simply straight 64-byte payloads
57 #define LDC_VERS 0x01 /* Link Version */
58 #define LDC_RTS 0x02 /* Request To Send */
59 #define LDC_RTR 0x03 /* Ready To Receive */
60 #define LDC_RDX 0x04 /* Ready for Data eXchange */
61 #define LDC_CTRL_MSK 0x0f
65 #define LDC_FRAG_MASK 0xc0
66 #define LDC_START 0x40
72 u8 u_data[LDC_PACKET_SIZE - 8];
76 u8 r_data[LDC_PACKET_SIZE - 8 - 8];
86 /* Ordered from largest major to lowest. */
87 static struct ldc_version ver_arr[] = {
88 { .major = 1, .minor = 0 },
91 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
92 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
97 int (*write)(struct ldc_channel *, const void *, unsigned int);
98 int (*read)(struct ldc_channel *, void *, unsigned int);
101 static const struct ldc_mode_ops raw_ops;
102 static const struct ldc_mode_ops nonraw_ops;
103 static const struct ldc_mode_ops stream_ops;
105 int ldom_domaining_enabled;
108 /* Protects ldc_unmap. */
110 struct ldc_mtable_entry *page_table;
111 struct iommu_table iommu_table;
115 /* Protects all operations that depend upon channel state. */
124 struct ldc_packet *tx_base;
125 unsigned long tx_head;
126 unsigned long tx_tail;
127 unsigned long tx_num_entries;
130 unsigned long tx_acked;
132 struct ldc_packet *rx_base;
133 unsigned long rx_head;
134 unsigned long rx_tail;
135 unsigned long rx_num_entries;
141 unsigned long chan_state;
143 struct ldc_channel_config cfg;
146 const struct ldc_mode_ops *mops;
148 struct ldc_iommu iommu;
150 struct ldc_version ver;
153 #define LDC_HS_CLOSED 0x00
154 #define LDC_HS_OPEN 0x01
155 #define LDC_HS_GOTVERS 0x02
156 #define LDC_HS_SENTRTR 0x03
157 #define LDC_HS_GOTRTR 0x04
158 #define LDC_HS_COMPLETE 0x10
161 #define LDC_FLAG_ALLOCED_QUEUES 0x01
162 #define LDC_FLAG_REGISTERED_QUEUES 0x02
163 #define LDC_FLAG_REGISTERED_IRQS 0x04
164 #define LDC_FLAG_RESET 0x10
169 #define LDC_IRQ_NAME_MAX 32
170 char rx_irq_name[LDC_IRQ_NAME_MAX];
171 char tx_irq_name[LDC_IRQ_NAME_MAX];
173 struct hlist_head mh_list;
175 struct hlist_node list;
178 #define ldcdbg(TYPE, f, a...) \
179 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
180 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
183 static const char *state_to_str(u8 state)
186 case LDC_STATE_INVALID:
190 case LDC_STATE_BOUND:
192 case LDC_STATE_READY:
194 case LDC_STATE_CONNECTED:
201 static void ldc_set_state(struct ldc_channel *lp, u8 state)
203 ldcdbg(STATE, "STATE (%s) --> (%s)\n",
204 state_to_str(lp->state),
205 state_to_str(state));
210 static unsigned long __advance(unsigned long off, unsigned long num_entries)
212 off += LDC_PACKET_SIZE;
213 if (off == (num_entries * LDC_PACKET_SIZE))
219 static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
221 return __advance(off, lp->rx_num_entries);
224 static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
226 return __advance(off, lp->tx_num_entries);
229 static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
230 unsigned long *new_tail)
232 struct ldc_packet *p;
235 t = tx_advance(lp, lp->tx_tail);
236 if (t == lp->tx_head)
242 return p + (lp->tx_tail / LDC_PACKET_SIZE);
245 /* When we are in reliable or stream mode, have to track the next packet
246 * we haven't gotten an ACK for in the TX queue using tx_acked. We have
247 * to be careful not to stomp over the queue past that point. During
248 * the handshake, we don't have TX data packets pending in the queue
249 * and that's why handshake_get_tx_packet() need not be mindful of
252 static unsigned long head_for_data(struct ldc_channel *lp)
254 if (lp->cfg.mode == LDC_MODE_STREAM)
259 static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
261 unsigned long limit, tail, new_tail, diff;
264 limit = head_for_data(lp);
266 new_tail = tx_advance(lp, tail);
267 if (new_tail == limit)
270 if (limit > new_tail)
271 diff = limit - new_tail;
274 ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
275 diff /= LDC_PACKET_SIZE;
278 if (diff * mss < size)
284 static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
285 unsigned long *new_tail)
287 struct ldc_packet *p;
290 h = head_for_data(lp);
291 t = tx_advance(lp, lp->tx_tail);
298 return p + (lp->tx_tail / LDC_PACKET_SIZE);
301 static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
303 unsigned long orig_tail = lp->tx_tail;
307 while (limit-- > 0) {
310 err = sun4v_ldc_tx_set_qtail(lp->id, tail);
314 if (err != HV_EWOULDBLOCK) {
315 lp->tx_tail = orig_tail;
321 lp->tx_tail = orig_tail;
325 /* This just updates the head value in the hypervisor using
326 * a polling loop with a timeout. The caller takes care of
327 * upating software state representing the head change, if any.
329 static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
333 while (limit-- > 0) {
336 err = sun4v_ldc_rx_set_qhead(lp->id, head);
340 if (err != HV_EWOULDBLOCK)
349 static int send_tx_packet(struct ldc_channel *lp,
350 struct ldc_packet *p,
351 unsigned long new_tail)
353 BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
355 return set_tx_tail(lp, new_tail);
358 static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
360 void *data, int dlen,
361 unsigned long *new_tail)
363 struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
366 memset(p, 0, sizeof(*p));
371 memcpy(p->u.u_data, data, dlen);
376 static int start_handshake(struct ldc_channel *lp)
378 struct ldc_packet *p;
379 struct ldc_version *ver;
380 unsigned long new_tail;
384 ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
385 ver->major, ver->minor);
387 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
388 ver, sizeof(*ver), &new_tail);
390 int err = send_tx_packet(lp, p, new_tail);
392 lp->flags &= ~LDC_FLAG_RESET;
398 static int send_version_nack(struct ldc_channel *lp,
399 u16 major, u16 minor)
401 struct ldc_packet *p;
402 struct ldc_version ver;
403 unsigned long new_tail;
408 p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
409 &ver, sizeof(ver), &new_tail);
411 ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
412 ver.major, ver.minor);
414 return send_tx_packet(lp, p, new_tail);
419 static int send_version_ack(struct ldc_channel *lp,
420 struct ldc_version *vp)
422 struct ldc_packet *p;
423 unsigned long new_tail;
425 p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
426 vp, sizeof(*vp), &new_tail);
428 ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
429 vp->major, vp->minor);
431 return send_tx_packet(lp, p, new_tail);
436 static int send_rts(struct ldc_channel *lp)
438 struct ldc_packet *p;
439 unsigned long new_tail;
441 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
444 p->env = lp->cfg.mode;
448 ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
451 return send_tx_packet(lp, p, new_tail);
456 static int send_rtr(struct ldc_channel *lp)
458 struct ldc_packet *p;
459 unsigned long new_tail;
461 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
464 p->env = lp->cfg.mode;
467 ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
470 return send_tx_packet(lp, p, new_tail);
475 static int send_rdx(struct ldc_channel *lp)
477 struct ldc_packet *p;
478 unsigned long new_tail;
480 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
484 p->seqid = ++lp->snd_nxt;
485 p->u.r.ackid = lp->rcv_nxt;
487 ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
488 p->env, p->seqid, p->u.r.ackid);
490 return send_tx_packet(lp, p, new_tail);
495 static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
497 struct ldc_packet *p;
498 unsigned long new_tail;
501 p = data_get_tx_packet(lp, &new_tail);
504 memset(p, 0, sizeof(*p));
505 p->type = data_pkt->type;
507 p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
508 p->seqid = lp->snd_nxt + 1;
509 p->u.r.ackid = lp->rcv_nxt;
511 ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
512 p->type, p->ctrl, p->seqid, p->u.r.ackid);
514 err = send_tx_packet(lp, p, new_tail);
521 static int ldc_abort(struct ldc_channel *lp)
523 unsigned long hv_err;
525 ldcdbg(STATE, "ABORT\n");
527 /* We report but do not act upon the hypervisor errors because
528 * there really isn't much we can do if they fail at this point.
530 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
532 printk(KERN_ERR PFX "ldc_abort: "
533 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
534 lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
536 hv_err = sun4v_ldc_tx_get_state(lp->id,
541 printk(KERN_ERR PFX "ldc_abort: "
542 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
545 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
547 printk(KERN_ERR PFX "ldc_abort: "
548 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
549 lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
551 /* Refetch the RX queue state as well, because we could be invoked
552 * here in the queue processing context.
554 hv_err = sun4v_ldc_rx_get_state(lp->id,
559 printk(KERN_ERR PFX "ldc_abort: "
560 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
566 static struct ldc_version *find_by_major(u16 major)
568 struct ldc_version *ret = NULL;
571 for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
572 struct ldc_version *v = &ver_arr[i];
573 if (v->major <= major) {
581 static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
583 struct ldc_version *vap;
586 ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
587 vp->major, vp->minor);
589 if (lp->hs_state == LDC_HS_GOTVERS) {
590 lp->hs_state = LDC_HS_OPEN;
591 memset(&lp->ver, 0, sizeof(lp->ver));
594 vap = find_by_major(vp->major);
596 err = send_version_nack(lp, 0, 0);
597 } else if (vap->major != vp->major) {
598 err = send_version_nack(lp, vap->major, vap->minor);
600 struct ldc_version ver = *vp;
601 if (ver.minor > vap->minor)
602 ver.minor = vap->minor;
603 err = send_version_ack(lp, &ver);
606 lp->hs_state = LDC_HS_GOTVERS;
610 return ldc_abort(lp);
615 static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
617 ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
618 vp->major, vp->minor);
620 if (lp->hs_state == LDC_HS_GOTVERS) {
621 if (lp->ver.major != vp->major ||
622 lp->ver.minor != vp->minor)
623 return ldc_abort(lp);
626 lp->hs_state = LDC_HS_GOTVERS;
629 return ldc_abort(lp);
633 static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
635 struct ldc_version *vap;
636 struct ldc_packet *p;
637 unsigned long new_tail;
639 if (vp->major == 0 && vp->minor == 0)
640 return ldc_abort(lp);
642 vap = find_by_major(vp->major);
644 return ldc_abort(lp);
646 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
650 return ldc_abort(lp);
652 return send_tx_packet(lp, p, new_tail);
655 static int process_version(struct ldc_channel *lp,
656 struct ldc_packet *p)
658 struct ldc_version *vp;
660 vp = (struct ldc_version *) p->u.u_data;
664 return process_ver_info(lp, vp);
667 return process_ver_ack(lp, vp);
670 return process_ver_nack(lp, vp);
673 return ldc_abort(lp);
677 static int process_rts(struct ldc_channel *lp,
678 struct ldc_packet *p)
680 ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
681 p->stype, p->seqid, p->env);
683 if (p->stype != LDC_INFO ||
684 lp->hs_state != LDC_HS_GOTVERS ||
685 p->env != lp->cfg.mode)
686 return ldc_abort(lp);
688 lp->snd_nxt = p->seqid;
689 lp->rcv_nxt = p->seqid;
690 lp->hs_state = LDC_HS_SENTRTR;
692 return ldc_abort(lp);
697 static int process_rtr(struct ldc_channel *lp,
698 struct ldc_packet *p)
700 ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
701 p->stype, p->seqid, p->env);
703 if (p->stype != LDC_INFO ||
704 p->env != lp->cfg.mode)
705 return ldc_abort(lp);
707 lp->snd_nxt = p->seqid;
708 lp->hs_state = LDC_HS_COMPLETE;
709 ldc_set_state(lp, LDC_STATE_CONNECTED);
715 static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
717 return lp->rcv_nxt + 1 == seqid;
720 static int process_rdx(struct ldc_channel *lp,
721 struct ldc_packet *p)
723 ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
724 p->stype, p->seqid, p->env, p->u.r.ackid);
726 if (p->stype != LDC_INFO ||
727 !(rx_seq_ok(lp, p->seqid)))
728 return ldc_abort(lp);
730 lp->rcv_nxt = p->seqid;
732 lp->hs_state = LDC_HS_COMPLETE;
733 ldc_set_state(lp, LDC_STATE_CONNECTED);
738 static int process_control_frame(struct ldc_channel *lp,
739 struct ldc_packet *p)
743 return process_version(lp, p);
746 return process_rts(lp, p);
749 return process_rtr(lp, p);
752 return process_rdx(lp, p);
755 return ldc_abort(lp);
759 static int process_error_frame(struct ldc_channel *lp,
760 struct ldc_packet *p)
762 return ldc_abort(lp);
765 static int process_data_ack(struct ldc_channel *lp,
766 struct ldc_packet *ack)
768 unsigned long head = lp->tx_acked;
769 u32 ackid = ack->u.r.ackid;
772 struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
774 head = tx_advance(lp, head);
776 if (p->seqid == ackid) {
780 if (head == lp->tx_tail)
781 return ldc_abort(lp);
787 static void send_events(struct ldc_channel *lp, unsigned int event_mask)
789 if (event_mask & LDC_EVENT_RESET)
790 lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
791 if (event_mask & LDC_EVENT_UP)
792 lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
793 if (event_mask & LDC_EVENT_DATA_READY)
794 lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
797 static irqreturn_t ldc_rx(int irq, void *dev_id)
799 struct ldc_channel *lp = dev_id;
800 unsigned long orig_state, flags;
801 unsigned int event_mask;
803 spin_lock_irqsave(&lp->lock, flags);
805 orig_state = lp->chan_state;
807 /* We should probably check for hypervisor errors here and
808 * reset the LDC channel if we get one.
810 sun4v_ldc_rx_get_state(lp->id,
815 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
816 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
820 if (lp->cfg.mode == LDC_MODE_RAW &&
821 lp->chan_state == LDC_CHANNEL_UP) {
822 lp->hs_state = LDC_HS_COMPLETE;
823 ldc_set_state(lp, LDC_STATE_CONNECTED);
825 event_mask |= LDC_EVENT_UP;
827 orig_state = lp->chan_state;
830 /* If we are in reset state, flush the RX queue and ignore
833 if (lp->flags & LDC_FLAG_RESET) {
834 (void) __set_rx_head(lp, lp->rx_tail);
838 /* Once we finish the handshake, we let the ldc_read()
839 * paths do all of the control frame and state management.
840 * Just trigger the callback.
842 if (lp->hs_state == LDC_HS_COMPLETE) {
844 if (lp->chan_state != orig_state) {
845 unsigned int event = LDC_EVENT_RESET;
847 if (lp->chan_state == LDC_CHANNEL_UP)
848 event = LDC_EVENT_UP;
852 if (lp->rx_head != lp->rx_tail)
853 event_mask |= LDC_EVENT_DATA_READY;
858 if (lp->chan_state != orig_state)
861 while (lp->rx_head != lp->rx_tail) {
862 struct ldc_packet *p;
866 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
870 err = process_control_frame(lp, p);
876 event_mask |= LDC_EVENT_DATA_READY;
881 err = process_error_frame(lp, p);
893 new += LDC_PACKET_SIZE;
894 if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
898 err = __set_rx_head(lp, new);
900 (void) ldc_abort(lp);
903 if (lp->hs_state == LDC_HS_COMPLETE)
904 goto handshake_complete;
908 spin_unlock_irqrestore(&lp->lock, flags);
910 send_events(lp, event_mask);
915 static irqreturn_t ldc_tx(int irq, void *dev_id)
917 struct ldc_channel *lp = dev_id;
918 unsigned long flags, orig_state;
919 unsigned int event_mask = 0;
921 spin_lock_irqsave(&lp->lock, flags);
923 orig_state = lp->chan_state;
925 /* We should probably check for hypervisor errors here and
926 * reset the LDC channel if we get one.
928 sun4v_ldc_tx_get_state(lp->id,
933 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
934 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
936 if (lp->cfg.mode == LDC_MODE_RAW &&
937 lp->chan_state == LDC_CHANNEL_UP) {
938 lp->hs_state = LDC_HS_COMPLETE;
939 ldc_set_state(lp, LDC_STATE_CONNECTED);
941 event_mask |= LDC_EVENT_UP;
944 spin_unlock_irqrestore(&lp->lock, flags);
946 send_events(lp, event_mask);
951 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
952 * XXX that addition and removal from the ldc_channel_list has
953 * XXX atomicity, otherwise the __ldc_channel_exists() check is
954 * XXX totally pointless as another thread can slip into ldc_alloc()
955 * XXX and add a channel with the same ID. There also needs to be
956 * XXX a spinlock for ldc_channel_list.
958 static HLIST_HEAD(ldc_channel_list);
960 static int __ldc_channel_exists(unsigned long id)
962 struct ldc_channel *lp;
964 hlist_for_each_entry(lp, &ldc_channel_list, list) {
971 static int alloc_queue(const char *name, unsigned long num_entries,
972 struct ldc_packet **base, unsigned long *ra)
974 unsigned long size, order;
977 size = num_entries * LDC_PACKET_SIZE;
978 order = get_order(size);
980 q = (void *) __get_free_pages(GFP_KERNEL, order);
982 printk(KERN_ERR PFX "Alloc of %s queue failed with "
983 "size=%lu order=%lu\n", name, size, order);
987 memset(q, 0, PAGE_SIZE << order);
995 static void free_queue(unsigned long num_entries, struct ldc_packet *q)
997 unsigned long size, order;
1002 size = num_entries * LDC_PACKET_SIZE;
1003 order = get_order(size);
1005 free_pages((unsigned long)q, order);
1008 static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
1010 u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
1011 /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
1013 cookie &= ~COOKIE_PGSZ_CODE;
1015 return (cookie >> (13ULL + (szcode * 3ULL)));
1018 struct ldc_demap_arg {
1019 struct ldc_iommu *ldc_iommu;
1024 static void ldc_demap(void *arg, unsigned long entry, unsigned long npages)
1026 struct ldc_demap_arg *ldc_demap_arg = arg;
1027 struct ldc_iommu *iommu = ldc_demap_arg->ldc_iommu;
1028 unsigned long id = ldc_demap_arg->id;
1029 u64 cookie = ldc_demap_arg->cookie;
1030 struct ldc_mtable_entry *base;
1031 unsigned long i, shift;
1033 shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
1034 base = iommu->page_table + entry;
1035 for (i = 0; i < npages; i++) {
1037 sun4v_ldc_revoke(id, cookie + (i << shift),
1043 /* XXX Make this configurable... XXX */
1044 #define LDC_IOTABLE_SIZE (8 * 1024)
1046 struct iommu_tbl_ops ldc_iommu_ops = {
1047 .cookie_to_index = ldc_cookie_to_index,
1051 static void setup_ldc_pool_hash(void)
1054 static bool do_once;
1059 for_each_possible_cpu(i)
1060 per_cpu(ldc_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
1064 static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
1066 unsigned long sz, num_tsb_entries, tsbsize, order;
1067 struct ldc_iommu *ldc_iommu = &lp->iommu;
1068 struct iommu_table *iommu = &ldc_iommu->iommu_table;
1069 struct ldc_mtable_entry *table;
1070 unsigned long hv_err;
1073 num_tsb_entries = LDC_IOTABLE_SIZE;
1074 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1075 setup_ldc_pool_hash();
1076 spin_lock_init(&ldc_iommu->lock);
1078 sz = num_tsb_entries / 8;
1079 sz = (sz + 7UL) & ~7UL;
1080 iommu->map = kzalloc(sz, GFP_KERNEL);
1082 printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
1085 iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
1086 &ldc_iommu_ops, false, 1);
1088 order = get_order(tsbsize);
1090 table = (struct ldc_mtable_entry *)
1091 __get_free_pages(GFP_KERNEL, order);
1094 printk(KERN_ERR PFX "Alloc of MTE table failed, "
1095 "size=%lu order=%lu\n", tsbsize, order);
1099 memset(table, 0, PAGE_SIZE << order);
1101 ldc_iommu->page_table = table;
1103 hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
1107 goto out_free_table;
1112 free_pages((unsigned long) table, order);
1113 ldc_iommu->page_table = NULL;
1122 static void ldc_iommu_release(struct ldc_channel *lp)
1124 struct ldc_iommu *ldc_iommu = &lp->iommu;
1125 struct iommu_table *iommu = &ldc_iommu->iommu_table;
1126 unsigned long num_tsb_entries, tsbsize, order;
1128 (void) sun4v_ldc_set_map_table(lp->id, 0, 0);
1130 num_tsb_entries = iommu->poolsize * iommu->nr_pools;
1131 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1132 order = get_order(tsbsize);
1134 free_pages((unsigned long) ldc_iommu->page_table, order);
1135 ldc_iommu->page_table = NULL;
1141 struct ldc_channel *ldc_alloc(unsigned long id,
1142 const struct ldc_channel_config *cfgp,
1146 struct ldc_channel *lp;
1147 const struct ldc_mode_ops *mops;
1148 unsigned long dummy1, dummy2, hv_err;
1153 if (!ldom_domaining_enabled)
1162 switch (cfgp->mode) {
1165 mss = LDC_PACKET_SIZE;
1168 case LDC_MODE_UNRELIABLE:
1170 mss = LDC_PACKET_SIZE - 8;
1173 case LDC_MODE_STREAM:
1175 mss = LDC_PACKET_SIZE - 8 - 8;
1182 if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
1185 hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
1187 if (hv_err == HV_ECHANNEL)
1191 if (__ldc_channel_exists(id))
1196 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
1201 spin_lock_init(&lp->lock);
1205 err = ldc_iommu_init(name, lp);
1214 lp->cfg.mtu = LDC_DEFAULT_MTU;
1216 if (lp->cfg.mode == LDC_MODE_STREAM) {
1217 mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
1220 goto out_free_iommu;
1222 lp->mssbuf = mssbuf;
1225 lp->event_arg = event_arg;
1227 /* XXX allow setting via ldc_channel_config to override defaults
1228 * XXX or use some formula based upon mtu
1230 lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1231 lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1233 err = alloc_queue("TX", lp->tx_num_entries,
1234 &lp->tx_base, &lp->tx_ra);
1236 goto out_free_mssbuf;
1238 err = alloc_queue("RX", lp->rx_num_entries,
1239 &lp->rx_base, &lp->rx_ra);
1243 lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
1245 lp->hs_state = LDC_HS_CLOSED;
1246 ldc_set_state(lp, LDC_STATE_INIT);
1248 INIT_HLIST_NODE(&lp->list);
1249 hlist_add_head(&lp->list, &ldc_channel_list);
1251 INIT_HLIST_HEAD(&lp->mh_list);
1253 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
1254 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1256 err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
1257 lp->rx_irq_name, lp);
1261 err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
1262 lp->tx_irq_name, lp);
1264 free_irq(lp->cfg.rx_irq, lp);
1271 free_queue(lp->tx_num_entries, lp->tx_base);
1277 ldc_iommu_release(lp);
1283 return ERR_PTR(err);
1285 EXPORT_SYMBOL(ldc_alloc);
1287 void ldc_unbind(struct ldc_channel *lp)
1289 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
1290 free_irq(lp->cfg.rx_irq, lp);
1291 free_irq(lp->cfg.tx_irq, lp);
1292 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1295 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
1296 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1297 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1298 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1300 if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
1301 free_queue(lp->tx_num_entries, lp->tx_base);
1302 free_queue(lp->rx_num_entries, lp->rx_base);
1303 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
1306 ldc_set_state(lp, LDC_STATE_INIT);
1308 EXPORT_SYMBOL(ldc_unbind);
1310 void ldc_free(struct ldc_channel *lp)
1313 hlist_del(&lp->list);
1315 ldc_iommu_release(lp);
1319 EXPORT_SYMBOL(ldc_free);
1321 /* Bind the channel. This registers the LDC queues with
1322 * the hypervisor and puts the channel into a pseudo-listening
1323 * state. This does not initiate a handshake, ldc_connect() does
1326 int ldc_bind(struct ldc_channel *lp)
1328 unsigned long hv_err, flags;
1331 if (lp->state != LDC_STATE_INIT)
1334 spin_lock_irqsave(&lp->lock, flags);
1336 enable_irq(lp->cfg.rx_irq);
1337 enable_irq(lp->cfg.tx_irq);
1339 lp->flags |= LDC_FLAG_REGISTERED_IRQS;
1342 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1346 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1350 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1354 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1358 lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
1360 hv_err = sun4v_ldc_tx_get_state(lp->id,
1368 lp->tx_acked = lp->tx_head;
1370 lp->hs_state = LDC_HS_OPEN;
1371 ldc_set_state(lp, LDC_STATE_BOUND);
1373 spin_unlock_irqrestore(&lp->lock, flags);
1378 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1379 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1382 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1385 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1386 free_irq(lp->cfg.tx_irq, lp);
1387 free_irq(lp->cfg.rx_irq, lp);
1389 spin_unlock_irqrestore(&lp->lock, flags);
1393 EXPORT_SYMBOL(ldc_bind);
1395 int ldc_connect(struct ldc_channel *lp)
1397 unsigned long flags;
1400 if (lp->cfg.mode == LDC_MODE_RAW)
1403 spin_lock_irqsave(&lp->lock, flags);
1405 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1406 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
1407 lp->hs_state != LDC_HS_OPEN)
1408 err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
1410 err = start_handshake(lp);
1412 spin_unlock_irqrestore(&lp->lock, flags);
1416 EXPORT_SYMBOL(ldc_connect);
1418 int ldc_disconnect(struct ldc_channel *lp)
1420 unsigned long hv_err, flags;
1423 if (lp->cfg.mode == LDC_MODE_RAW)
1426 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1427 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
1430 spin_lock_irqsave(&lp->lock, flags);
1433 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1437 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1441 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1445 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1449 ldc_set_state(lp, LDC_STATE_BOUND);
1450 lp->hs_state = LDC_HS_OPEN;
1451 lp->flags |= LDC_FLAG_RESET;
1453 spin_unlock_irqrestore(&lp->lock, flags);
1458 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1459 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1460 free_irq(lp->cfg.tx_irq, lp);
1461 free_irq(lp->cfg.rx_irq, lp);
1462 lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
1463 LDC_FLAG_REGISTERED_QUEUES);
1464 ldc_set_state(lp, LDC_STATE_INIT);
1466 spin_unlock_irqrestore(&lp->lock, flags);
1470 EXPORT_SYMBOL(ldc_disconnect);
1472 int ldc_state(struct ldc_channel *lp)
1476 EXPORT_SYMBOL(ldc_state);
1478 static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
1480 struct ldc_packet *p;
1481 unsigned long new_tail;
1484 if (size > LDC_PACKET_SIZE)
1487 p = data_get_tx_packet(lp, &new_tail);
1491 memcpy(p, buf, size);
1493 err = send_tx_packet(lp, p, new_tail);
1500 static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
1502 struct ldc_packet *p;
1503 unsigned long hv_err, new;
1506 if (size < LDC_PACKET_SIZE)
1509 hv_err = sun4v_ldc_rx_get_state(lp->id,
1514 return ldc_abort(lp);
1516 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1517 lp->chan_state == LDC_CHANNEL_RESETTING)
1520 if (lp->rx_head == lp->rx_tail)
1523 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
1524 memcpy(buf, p, LDC_PACKET_SIZE);
1526 new = rx_advance(lp, lp->rx_head);
1529 err = __set_rx_head(lp, new);
1533 err = LDC_PACKET_SIZE;
1538 static const struct ldc_mode_ops raw_ops = {
1543 static int write_nonraw(struct ldc_channel *lp, const void *buf,
1546 unsigned long hv_err, tail;
1547 unsigned int copied;
1551 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
1553 if (unlikely(hv_err))
1556 if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
1557 return ldc_abort(lp);
1559 if (!tx_has_space_for(lp, size))
1565 while (copied < size) {
1566 struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
1567 u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
1573 p->stype = LDC_INFO;
1576 data_len = size - copied;
1577 if (data_len > lp->mss)
1580 BUG_ON(data_len > LDC_LEN);
1582 p->env = (data_len |
1583 (copied == 0 ? LDC_START : 0) |
1584 (data_len == size - copied ? LDC_STOP : 0));
1588 ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
1595 memcpy(data, buf, data_len);
1599 tail = tx_advance(lp, tail);
1602 err = set_tx_tail(lp, tail);
1611 static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
1612 struct ldc_packet *first_frag)
1617 lp->rcv_nxt = first_frag->seqid - 1;
1619 err = send_data_nack(lp, p);
1623 err = __set_rx_head(lp, lp->rx_tail);
1625 return ldc_abort(lp);
1630 static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
1632 if (p->stype & LDC_ACK) {
1633 int err = process_data_ack(lp, p);
1637 if (p->stype & LDC_NACK)
1638 return ldc_abort(lp);
1643 static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
1645 unsigned long dummy;
1648 ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
1649 cur_head, lp->rx_head, lp->rx_tail);
1650 while (limit-- > 0) {
1651 unsigned long hv_err;
1653 hv_err = sun4v_ldc_rx_get_state(lp->id,
1658 return ldc_abort(lp);
1660 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1661 lp->chan_state == LDC_CHANNEL_RESETTING)
1664 if (cur_head != lp->rx_tail) {
1665 ldcdbg(DATA, "DATA WAIT DONE "
1666 "head[%lx] tail[%lx] chan_state[%lx]\n",
1667 dummy, lp->rx_tail, lp->chan_state);
1676 static int rx_set_head(struct ldc_channel *lp, unsigned long head)
1678 int err = __set_rx_head(lp, head);
1681 return ldc_abort(lp);
1687 static void send_data_ack(struct ldc_channel *lp)
1689 unsigned long new_tail;
1690 struct ldc_packet *p;
1692 p = data_get_tx_packet(lp, &new_tail);
1696 memset(p, 0, sizeof(*p));
1700 p->seqid = lp->snd_nxt + 1;
1701 p->u.r.ackid = lp->rcv_nxt;
1703 err = send_tx_packet(lp, p, new_tail);
1709 static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
1711 struct ldc_packet *first_frag;
1712 unsigned long hv_err, new;
1715 hv_err = sun4v_ldc_rx_get_state(lp->id,
1720 return ldc_abort(lp);
1722 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1723 lp->chan_state == LDC_CHANNEL_RESETTING)
1726 if (lp->rx_head == lp->rx_tail)
1733 struct ldc_packet *p;
1736 BUG_ON(new == lp->rx_tail);
1737 p = lp->rx_base + (new / LDC_PACKET_SIZE);
1739 ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
1749 if (unlikely(!rx_seq_ok(lp, p->seqid))) {
1750 err = rx_bad_seq(lp, p, first_frag);
1755 if (p->type & LDC_CTRL) {
1756 err = process_control_frame(lp, p);
1762 lp->rcv_nxt = p->seqid;
1764 if (!(p->type & LDC_DATA)) {
1765 new = rx_advance(lp, new);
1768 if (p->stype & (LDC_ACK | LDC_NACK)) {
1769 err = data_ack_nack(lp, p);
1773 if (!(p->stype & LDC_INFO)) {
1774 new = rx_advance(lp, new);
1775 err = rx_set_head(lp, new);
1781 pkt_len = p->env & LDC_LEN;
1783 /* Every initial packet starts with the START bit set.
1785 * Singleton packets will have both START+STOP set.
1787 * Fragments will have START set in the first frame, STOP
1788 * set in the last frame, and neither bit set in middle
1789 * frames of the packet.
1791 * Therefore if we are at the beginning of a packet and
1792 * we don't see START, or we are in the middle of a fragmented
1793 * packet and do see START, we are unsynchronized and should
1794 * flush the RX queue.
1796 if ((first_frag == NULL && !(p->env & LDC_START)) ||
1797 (first_frag != NULL && (p->env & LDC_START))) {
1799 new = rx_advance(lp, new);
1801 err = rx_set_head(lp, new);
1811 if (pkt_len > size - copied) {
1812 /* User didn't give us a big enough buffer,
1813 * what to do? This is a pretty serious error.
1815 * Since we haven't updated the RX ring head to
1816 * consume any of the packets, signal the error
1817 * to the user and just leave the RX ring alone.
1819 * This seems the best behavior because this allows
1820 * a user of the LDC layer to start with a small
1821 * RX buffer for ldc_read() calls and use -EMSGSIZE
1822 * as a cue to enlarge it's read buffer.
1828 /* Ok, we are gonna eat this one. */
1829 new = rx_advance(lp, new);
1832 (lp->cfg.mode == LDC_MODE_UNRELIABLE ?
1833 p->u.u_data : p->u.r.r_data), pkt_len);
1837 if (p->env & LDC_STOP)
1841 if (new == lp->rx_tail) {
1842 err = rx_data_wait(lp, new);
1849 err = rx_set_head(lp, new);
1851 if (err && first_frag)
1852 lp->rcv_nxt = first_frag->seqid - 1;
1856 if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
1863 static const struct ldc_mode_ops nonraw_ops = {
1864 .write = write_nonraw,
1865 .read = read_nonraw,
1868 static int write_stream(struct ldc_channel *lp, const void *buf,
1871 if (size > lp->cfg.mtu)
1873 return write_nonraw(lp, buf, size);
1876 static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
1878 if (!lp->mssbuf_len) {
1879 int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
1883 lp->mssbuf_len = err;
1887 if (size > lp->mssbuf_len)
1888 size = lp->mssbuf_len;
1889 memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
1891 lp->mssbuf_off += size;
1892 lp->mssbuf_len -= size;
1897 static const struct ldc_mode_ops stream_ops = {
1898 .write = write_stream,
1899 .read = read_stream,
1902 int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
1904 unsigned long flags;
1913 spin_lock_irqsave(&lp->lock, flags);
1915 if (lp->hs_state != LDC_HS_COMPLETE)
1918 err = lp->mops->write(lp, buf, size);
1920 spin_unlock_irqrestore(&lp->lock, flags);
1924 EXPORT_SYMBOL(ldc_write);
1926 int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
1928 unsigned long flags;
1937 spin_lock_irqsave(&lp->lock, flags);
1939 if (lp->hs_state != LDC_HS_COMPLETE)
1942 err = lp->mops->read(lp, buf, size);
1944 spin_unlock_irqrestore(&lp->lock, flags);
1948 EXPORT_SYMBOL(ldc_read);
1950 static u64 pagesize_code(void)
1952 switch (PAGE_SIZE) {
1954 case (8ULL * 1024ULL):
1956 case (64ULL * 1024ULL):
1958 case (512ULL * 1024ULL):
1960 case (4ULL * 1024ULL * 1024ULL):
1962 case (32ULL * 1024ULL * 1024ULL):
1964 case (256ULL * 1024ULL * 1024ULL):
1969 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
1971 return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
1972 (index << PAGE_SHIFT) |
1977 static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
1978 unsigned long npages)
1982 entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_table, npages,
1983 NULL, __this_cpu_read(ldc_pool_hash));
1984 if (unlikely(entry < 0))
1987 return iommu->page_table + entry;
1990 static u64 perm_to_mte(unsigned int map_perm)
1994 mte_base = pagesize_code();
1996 if (map_perm & LDC_MAP_SHADOW) {
1997 if (map_perm & LDC_MAP_R)
1998 mte_base |= LDC_MTE_COPY_R;
1999 if (map_perm & LDC_MAP_W)
2000 mte_base |= LDC_MTE_COPY_W;
2002 if (map_perm & LDC_MAP_DIRECT) {
2003 if (map_perm & LDC_MAP_R)
2004 mte_base |= LDC_MTE_READ;
2005 if (map_perm & LDC_MAP_W)
2006 mte_base |= LDC_MTE_WRITE;
2007 if (map_perm & LDC_MAP_X)
2008 mte_base |= LDC_MTE_EXEC;
2010 if (map_perm & LDC_MAP_IO) {
2011 if (map_perm & LDC_MAP_R)
2012 mte_base |= LDC_MTE_IOMMU_R;
2013 if (map_perm & LDC_MAP_W)
2014 mte_base |= LDC_MTE_IOMMU_W;
2020 static int pages_in_region(unsigned long base, long len)
2025 unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
2027 len -= (new - base);
2035 struct cookie_state {
2036 struct ldc_mtable_entry *page_table;
2037 struct ldc_trans_cookie *cookies;
2044 static void fill_cookies(struct cookie_state *sp, unsigned long pa,
2045 unsigned long off, unsigned long len)
2048 unsigned long tlen, new = pa + PAGE_SIZE;
2051 sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
2055 tlen = PAGE_SIZE - off;
2059 this_cookie = make_cookie(sp->pte_idx,
2060 pagesize_code(), off);
2064 if (this_cookie == sp->prev_cookie) {
2065 sp->cookies[sp->nc - 1].cookie_size += tlen;
2067 sp->cookies[sp->nc].cookie_addr = this_cookie;
2068 sp->cookies[sp->nc].cookie_size = tlen;
2071 sp->prev_cookie = this_cookie + tlen;
2080 static int sg_count_one(struct scatterlist *sg)
2082 unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
2083 long len = sg->length;
2085 if ((sg->offset | len) & (8UL - 1))
2088 return pages_in_region(base + sg->offset, len);
2091 static int sg_count_pages(struct scatterlist *sg, int num_sg)
2097 for (i = 0; i < num_sg; i++) {
2098 int err = sg_count_one(sg + i);
2107 int ldc_map_sg(struct ldc_channel *lp,
2108 struct scatterlist *sg, int num_sg,
2109 struct ldc_trans_cookie *cookies, int ncookies,
2110 unsigned int map_perm)
2112 unsigned long i, npages;
2113 struct ldc_mtable_entry *base;
2114 struct cookie_state state;
2115 struct ldc_iommu *iommu;
2118 if (map_perm & ~LDC_MAP_ALL)
2121 err = sg_count_pages(sg, num_sg);
2131 base = alloc_npages(iommu, npages);
2136 state.page_table = iommu->page_table;
2137 state.cookies = cookies;
2138 state.mte_base = perm_to_mte(map_perm);
2139 state.prev_cookie = ~(u64)0;
2140 state.pte_idx = (base - iommu->page_table);
2143 for (i = 0; i < num_sg; i++)
2144 fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT,
2145 sg[i].offset, sg[i].length);
2149 EXPORT_SYMBOL(ldc_map_sg);
2151 int ldc_map_single(struct ldc_channel *lp,
2152 void *buf, unsigned int len,
2153 struct ldc_trans_cookie *cookies, int ncookies,
2154 unsigned int map_perm)
2156 unsigned long npages, pa;
2157 struct ldc_mtable_entry *base;
2158 struct cookie_state state;
2159 struct ldc_iommu *iommu;
2161 if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
2165 if ((pa | len) & (8UL - 1))
2168 npages = pages_in_region(pa, len);
2172 base = alloc_npages(iommu, npages);
2177 state.page_table = iommu->page_table;
2178 state.cookies = cookies;
2179 state.mte_base = perm_to_mte(map_perm);
2180 state.prev_cookie = ~(u64)0;
2181 state.pte_idx = (base - iommu->page_table);
2183 fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
2184 BUG_ON(state.nc > ncookies);
2188 EXPORT_SYMBOL(ldc_map_single);
2191 static void free_npages(unsigned long id, struct ldc_iommu *iommu,
2192 u64 cookie, u64 size)
2194 unsigned long npages;
2195 struct ldc_demap_arg demap_arg;
2197 demap_arg.ldc_iommu = iommu;
2198 demap_arg.cookie = cookie;
2201 npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
2202 iommu_tbl_range_free(&iommu->iommu_table, cookie, npages, true,
2207 void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
2210 struct ldc_iommu *iommu = &lp->iommu;
2212 unsigned long flags;
2214 spin_lock_irqsave(&iommu->lock, flags);
2215 for (i = 0; i < ncookies; i++) {
2216 u64 addr = cookies[i].cookie_addr;
2217 u64 size = cookies[i].cookie_size;
2219 free_npages(lp->id, iommu, addr, size);
2221 spin_unlock_irqrestore(&iommu->lock, flags);
2223 EXPORT_SYMBOL(ldc_unmap);
2225 int ldc_copy(struct ldc_channel *lp, int copy_dir,
2226 void *buf, unsigned int len, unsigned long offset,
2227 struct ldc_trans_cookie *cookies, int ncookies)
2229 unsigned int orig_len;
2233 if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
2234 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
2240 if ((ra | len | offset) & (8UL - 1)) {
2241 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
2242 "ra[%lx] len[%x] offset[%lx]\n",
2243 lp->id, ra, len, offset);
2247 if (lp->hs_state != LDC_HS_COMPLETE ||
2248 (lp->flags & LDC_FLAG_RESET)) {
2249 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
2250 "flags[%x]\n", lp->id, lp->hs_state, lp->flags);
2255 for (i = 0; i < ncookies; i++) {
2256 unsigned long cookie_raddr = cookies[i].cookie_addr;
2257 unsigned long this_len = cookies[i].cookie_size;
2258 unsigned long actual_len;
2260 if (unlikely(offset)) {
2261 unsigned long this_off = offset;
2263 if (this_off > this_len)
2264 this_off = this_len;
2267 this_len -= this_off;
2270 cookie_raddr += this_off;
2277 unsigned long hv_err;
2279 hv_err = sun4v_ldc_copy(lp->id, copy_dir,
2281 this_len, &actual_len);
2282 if (unlikely(hv_err)) {
2283 printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
2286 if (lp->hs_state != LDC_HS_COMPLETE ||
2287 (lp->flags & LDC_FLAG_RESET))
2293 cookie_raddr += actual_len;
2296 if (actual_len == this_len)
2299 this_len -= actual_len;
2306 /* It is caller policy what to do about short copies.
2307 * For example, a networking driver can declare the
2308 * packet a runt and drop it.
2311 return orig_len - len;
2313 EXPORT_SYMBOL(ldc_copy);
2315 void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
2316 struct ldc_trans_cookie *cookies, int *ncookies,
2317 unsigned int map_perm)
2322 if (len & (8UL - 1))
2323 return ERR_PTR(-EINVAL);
2325 buf = kzalloc(len, GFP_KERNEL);
2327 return ERR_PTR(-ENOMEM);
2329 err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
2332 return ERR_PTR(err);
2338 EXPORT_SYMBOL(ldc_alloc_exp_dring);
2340 void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
2341 struct ldc_trans_cookie *cookies, int ncookies)
2343 ldc_unmap(lp, cookies, ncookies);
2346 EXPORT_SYMBOL(ldc_free_exp_dring);
2348 static int __init ldc_init(void)
2350 unsigned long major, minor;
2351 struct mdesc_handle *hp;
2360 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
2362 if (mp == MDESC_NODE_NULL)
2365 v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
2371 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
2372 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
2376 printk(KERN_INFO "%s", version);
2379 printk(KERN_INFO PFX "Domaining disabled.\n");
2382 ldom_domaining_enabled = 1;
2390 core_initcall(ldc_init);