]> git.saurik.com Git - apple/xnu.git/blob - osfmk/mach/flipc_cb.h
662cc24f9262a783417522081346cef539969d8a
[apple/xnu.git] / osfmk / mach / flipc_cb.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 *
33 */
34 /*
35 * HISTORY
36 *
37 * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez
38 * Import of Mac OS X kernel (~semeria)
39 *
40 * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez
41 * Import of OSF Mach kernel (~mburg)
42 *
43 * Revision 1.1.11.1 1996/09/17 16:34:42 bruel
44 * fixed types.
45 * [96/09/17 bruel]
46 *
47 * Revision 1.1.6.1 1995/06/13 18:20:10 sjs
48 * Merged from flipc_shared.
49 * [95/06/07 sjs]
50 *
51 * Revision 1.1.3.14 1995/05/19 00:58:14 sjs
52 * Added send_ready to shared area, used for fast check if there is something
53 * to do (and prevents the cache from getting stirred).
54 * [95/05/18 sjs]
55 *
56 * Revision 1.1.3.13 1995/05/16 20:46:28 randys
57 * Export performance valid information through performance
58 * structure rather than kernel configuration section.
59 * [95/05/16 randys]
60 *
61 * Added performance (FLIPC_PERF) config information to
62 * kernel_configuration section of comm buffer, so that user
63 * programs can find out if this information is being gathered.
64 * [95/05/16 randys]
65 *
66 * Revision 1.1.3.12 1995/05/15 14:26:54 randys
67 * Updated comments on use of acquire pointer (it's completely
68 * ignored if dpb is set) and added macros for testing !dpb and
69 * enabled at the same time.
70 * [95/05/11 randys]
71 *
72 * Change pme_process_ptr ==> sme_process_ptr (since it's being read
73 * by AIL now).
74 * [95/05/11 randys]
75 *
76 * Added private copied of release and process pointers.
77 * [95/05/11 randys]
78 *
79 * Rearrange endpoint structure to separate data with importantly
80 * different access patterns into different cache lines. This
81 * involved duplicating some (effectively constant) entries, and
82 * creating two versions of some macros.
83 * [95/05/11 randys]
84 *
85 * Revision 1.1.3.11 1995/05/08 16:06:33 randys
86 * Added comment explaining that an endpoint bufferlist must always
87 * have valid buffer pointers in all of its entries, to keep
88 * FLIPC_endpoint_buffer_available from going off the deep end. No
89 * code changes.
90 * [95/04/18 randys]
91 *
92 * Revision 1.1.3.10 1995/04/05 21:21:52 randys
93 * Added a field to the buffer control structure holding the
94 * scheduling policy chosen for the allocations lock.
95 * [95/04/05 randys]
96 *
97 * Revision 1.1.3.9 1995/03/23 20:35:19 randys
98 * Added comments indicating duplication of declarations of
99 * flipc_cb_base & flipc_cb_size in this file and in flipc_usermsg.h
100 * Modified declaration of flipc_cb_size to be unsigned long.
101 * [95/03/21 randys]
102 *
103 * Revision 1.1.3.8 1995/02/23 21:32:42 randys
104 * Added space for kernel configuration in communications buffer
105 * control structure.
106 * [95/02/22 randys]
107 *
108 * Revision 1.1.3.7 1995/02/21 17:22:58 randys
109 * Re-indented code to four space indentation
110 * [1995/02/21 16:25:32 randys]
111 *
112 * Revision 1.1.3.6 1995/02/13 22:57:29 randys
113 * Replaced all of NEXT_{ACQUIRE,RELEASE,PROCESS}_PTR macros with a
114 * single NEXT_BUFFERLIST_PTR macro.
115 * [95/02/03 randys]
116 *
117 * Revision 1.1.3.5 1995/01/26 21:01:44 randys
118 * Add performance structure into CB.
119 * [1995/01/24 21:14:31 randys]
120 *
121 * Added flag in epgroup structure to note that epgroup
122 * has a semaphore associated with it.
123 * [1995/01/19 23:02:13 randys]
124 *
125 * Add a space in the comm buffer header for the null_destination
126 * the ME sets up for the AIL. Get rid of
127 * FLIPC_ADDRESS_ENDPOINT_PTR (it isn't used)
128 * [1995/01/19 20:22:30 randys]
129 *
130 * Up the comm buffer size to 1 megabyte
131 * [1995/01/17 22:23:27 randys]
132 *
133 * Revision 1.1.3.4 1995/01/12 21:19:01 randys
134 * Minor commenting changes from dlb
135 * [1995/01/06 18:18:12 randys]
136 *
137 * Revision 1.1.3.3 1994/12/22 16:23:57 randys
138 * Fixed calculation of number of buffers on an endpoint
139 * to take size of buffer pointers into account.
140 * [1994/12/21 16:19:55 randys]
141 *
142 * Revision 1.1.3.2 1994/12/20 19:01:56 randys
143 * Moved definition of flipc_simple_lock to flipc_cb.h
144 * [1994/12/20 17:34:41 randys]
145 *
146 * Added a simple lock in the comm buffer to use for the
147 * allocations lock, along with directions as to how
148 * to use it (not like a normal simple lock).
149 * [1994/12/20 15:27:25 randys]
150 *
151 * Added error log into communications buffer control
152 * structure, and changed FLIPC_ADDRESS_ENDPOINT_PTR to
153 * correctly compute the endpoint pointer based on the
154 * new ctl structure layout.
155 * [1994/12/19 23:47:45 randys]
156 *
157 * Added filename in comment at top of each file
158 * [1994/12/19 20:28:20 randys]
159 *
160 * Add version field to epgroup to check races on buffer acquire
161 * from epgroup.
162 * [1994/12/19 18:05:04 randys]
163 *
164 * Revision 1.1.3.1 1994/12/12 17:46:12 randys
165 * Putting initial flipc implementation under flipc_shared
166 * [1994/12/12 16:27:46 randys]
167 *
168 * Revision 1.1.1.2 1994/12/11 23:11:18 randys
169 * Initial flipc code checkin
170 *
171 * $EndLog$
172 */
173
174 /*
175 * mach/flipc_cb.h
176 *
177 * This file is intended to be the data structure layout for the flipc
178 * communcations buffer, both for the KKT implementation and
179 * for the eventual paragon implementation. This file should include
180 * all of the information necessary for either humans or machines to
181 * understand the data structure layout.
182 *
183 * The communications buffer is the wired section of memory used for
184 * communication between the flipc applications interface layer and
185 * the flipc message engine. No structure in it are visible to the
186 * user; the applications interface layer mediates all user access to
187 * the CB.
188 */
189
190 #ifndef _MACH_FLIPC_CB_H_
191 #define _MACH_FLIPC_CB_H_
192
193 #include <mach/flipc_types.h>
194
195 /*
196 * Flipc naming and argument ordering conventions (this applies mainly to
197 * user-interface.h, but seems inappropriate in a user-visible header file):
198 *
199 * All objects prefixed with "flipc"; uppercase for user-visible
200 * objects, lower case for internal ones.
201 *
202 * Types created with typedef will have _t suffixes.
203 *
204 * Words will be separated by '_'.
205 *
206 * Macro definitions will be all in caps.
207 *
208 * Enum members will have their initial letter (after Flipc) capitalized.
209 *
210 *
211 * For user-visible routines:
212 *
213 * The first word following the "flipc" will be the flipc object type that
214 * that routine operates on (specifically "domain", "epgroup",
215 * "endpoint", or "buffer").
216 *
217 * The object named by the first word of the call will, if an argument
218 * to the call, be the first argument.
219 *
220 * Output variables passed as pointers in the arglist will come last.
221 */
222
223 /*
224 * The kinds of objects that exist in the communications buffer are:
225 *
226 * Endpoints -- Used for sending or receiving.
227 * Buffers -- Composed of a buffer header and buffer data.
228 * Endpoint groups -- Used for collecting multiple numbers of endpoints
229 * together for a select like operation.
230 */
231
232 /*
233 * We can't use general pointers inside the communications buffer,
234 * since the address space on either side of the interface is
235 * different. The places where we could use pointers are:
236 *
237 * *) From endpoint sets to endpoints.
238 * *) From endpoints to buffers.
239 *
240 * The kinds of pointers we could use are:
241 * *) Byte offset from the beginning of the comm buffer. This
242 * is simple, but has the disadvantage of allowing the user to
243 * play games with pointing endpoint buffer pointers into data
244 * space, & etc.
245 * *) Rigid arrays of each type of object, with the object
246 * "pointer" being an index into the array. This avoids the
247 * above problem, but complicates memory allocation (forces
248 * allocation to be contiguous, which may force pre-deciding
249 * how much space each of the above types will take).
250 *
251 * Though we appear to be going for the rigid allocation for each type
252 * of data structure, I'm still going to do the "simple offset"
253 * solution to maintain maximum flexibility into the future.
254 * The single exception to this is that FLIPC addresses will be composed of
255 * node number and endpoint number, where the endpoint number will be
256 * the index into the endpoint array.
257 */
258
259 typedef unsigned long flipc_cb_ptr;
260 /* Define a null value, which doesn't point anywhere into the CB. */
261 #define FLIPC_CBPTR_NULL ((flipc_cb_ptr) -1)
262
263 /*
264 * Synchronization between message engine and application.
265 *
266 * In general, it isn't reasonable to allow locking and unlocking of
267 * data structures between message engine and communications buffer,
268 * as this requires the message engine to trust arbitrary user
269 * threads. The solution is to arrange all data structures so that
270 * they may be accessed by both parties without locking. The way that
271 * this is usually done is that specific variables are considered to
272 * be owned by one of the ME or the AIL, and the other party is
273 * allowed to read the variable but not to modify it. With this
274 * arrangement, implementing things like producer/consumer circular
275 * queues is possible; each agent (ME or AIL) goes around the list
276 * doing its thing, and avoids passing the pointer showing where the
277 * other agent is working.
278 *
279 * Following the above, we may divide structure members into five
280 * classes, and define prefixes for these five classes.
281 *
282 * Description Prefix
283 * -------------------------------
284 * Private to AIL pail_
285 * Private to ME pme_
286 * AIL owned, read by ME sail_
287 * ME owned, read by AIL sme_
288 * Shared in other way shrd_
289 *
290 * Shared variables may change their ownership based on their own
291 * or someone elses value (these variables may be thought of as
292 * being handed back and forth between the two entities) or on a
293 * configuration option of the structure (not handed back and forth,
294 * but still based on another variables value).
295 *
296 * In addition, I am going to put variables that are set at endpoint
297 * allocation and cleared at deallocation (but read by both sides) in
298 * a separate class; they are "AIL owned, read by ME" but are
299 * effectively constant over the synchronization protocols we care
300 * about.
301 *
302 * Constant after allocation const_
303 *
304 * Note that this ignores memory consistency issues (when the two
305 * agents are actually on two separate processors). These issues need
306 * to be explored in more detail; for now suffice it to say that the
307 * above methods work given a sequentially consistent memory model or
308 * a processor consistent memory model.
309 *
310 * Also note that an optimizing compiler may reorder our memory
311 * accesses, playing merry hell with the inter-node synchronization
312 * protocols (the compiler doesn't know about the other node, after
313 * all). To avoid this, all structure members used for
314 * synchronization will be marked volatile; this will force the
315 * compiler to keep the order and number of accesses intact. This
316 * will also force the compiler *not* to optimize way accesses to
317 * these variables, so it is wise to explicitly load the variable into
318 * a temporary once if you need to do multiple computations with it,
319 * and store it back afterwards when you are done.
320 */
321
322 /*
323 * Memory allocation:
324 *
325 * For maximum simplicity in the first implementation, we need to know
326 * at comm buffer allocation time how many endpoints, endpoint_sets,
327 * and buffers we will want total, until the end of time. This
328 * masively simplifies memory allocation; there will be a single array
329 * of each type of data and the communication buffer will be taken up
330 * by the concatenation of these arrays (with some fiddling to make
331 * sure that no data crosses a page boundary).
332 *
333 * For each data type there will be a free list to which pieces of
334 * data will be added to or removed from as needed. Each data type
335 * will have a pointer in it to allow it to be linked onto the free
336 * list.
337 */
338
339 /*
340 * Multiple thread access to data structures:
341 *
342 * There are several points in the communications buffer (notably
343 * endpoint accesses) when multiple application threads will be
344 * attempting operations on data structures at the same time. To
345 * multiplex these operations, we need a per-data structure lock.
346 * Lock attributes:
347 * *) This lock will not be kernel based, as such a lock would be
348 * too heavyweight to use for arbitrary sending and receiving
349 * operations).
350 * *) Because it is not kernel based, it may not be used to
351 * multiplex accesses from threads at different kernel
352 * priority levels. Deadlock would result if a low-priority
353 * thread gained the lock and then was prempted by a
354 * high-priority thread that wanted to acquire it.
355 * *) Architecture-dependent interfaces need to be designed to
356 * atomically lock and unlock this data structure.
357 *
358 * These are "simple locks" and are defined in flipc_dep.h.
359 */
360
361 /*
362 * Lock type. This placement (in flipc_cb.h) is a little bit of a
363 * hack, as it really should be defined with the machine dependent lock
364 * macros. But then the machine independent lock macros have problems
365 * because they have to include it both before and after the prototypes.
366 * So rather than split the machine dependent stuff into multiple
367 * files, I'll define it here and hope that this definition works for
368 * whatever architectures we're on.
369 */
370 typedef unsigned long flipc_simple_lock;
371
372 /*
373 * Ownership of data structures.
374 *
375 * Please note that this is a can of worms, and that I (Randys)
376 * consider this (and it's interactions with endpoint group membership)
377 * the likeliest place for design bugs in FLIPC. Any and all should
378 * take this as an open invitation and challenge to find bugs in what
379 * follows.
380 *
381 * Rules:
382 *
383 * *) If you've disabled a structure and synched with the
384 * appropriate side of the ME, the ME won't touch it.
385 *
386 * *) If you've taken a send endpoint off of the send endpoint
387 * list and sync'd with the ME, the ME won't touch it.
388 *
389 *[The rest of this applies to the AIL only; the above rules are the
390 * only ones the ME respects. ]
391 *
392 * *) Within the AIL, a disabled structure is owned by:
393 * *) The routine that disabled it, before it is put on
394 * the free list.
395 * *) The routine that dequeued it from the free list,
396 * before it is enabled.
397 * Taking of the simple lock is not required for ownership in
398 * these cases. Taking of the simple lock is not required for
399 * the act of *enabling* the structure (you have ownership and
400 * are giving it away), however it is required for the act of
401 * disabling the structure (since it is the only valid way to
402 * take ownership of an enabled structure, and you can't
403 * modify the enabled bit without having ownership).
404 *
405 * *) The simple lock in a structure always needs to be valid, as
406 * simple locks may be taken while the structure is in any
407 * state. Simiarly, the enabled bit must always be valid,
408 * both because it's what the ME checks, and because it may be
409 * checked by the AIL while the structure is free.
410 *
411 * *) Holding the simple lock on an enabled structure imparts
412 * ownership of that structure. You are allowed to take the
413 * simple lock of a disabled structure, but ownership is not
414 * gained by doing so.
415 *
416 * *) You are allowed to read the enabled/disabled bit without
417 * owning the structure (if the structure is disabled, there
418 * may be no way to gain the ownership).
419 *
420 * *) Owning a structure allows you to do what you want with it,
421 * except:
422 * *) As mentioned above, the simple lock and
423 * enabled/disabled bit must always be valid.
424 * *) The ownership of the endpoint group related members
425 * of an endpoint structure is special; see below.
426 * *) The allocations lock must be held to manipulate the
427 * next send endpoint field of any endpoint.
428 *
429 * *) If an endpoint is on an endpoint group, the ownership of
430 * the the endpoint group related members of the structure
431 * (sail_endpoint_group and pail_next_eg_endpoint) go with the
432 * owndership of the endpoint group, not the endpoint. For
433 * this purpose only, membership is defined atomically as the
434 * sail_endpoint_group pointer being set to an endpoint group.
435 * Thus one may remove an endpoint from an endpoint group
436 * without owning the endpoint (change the sail_endpoint_group
437 * pointer last). One requires both locks to add an endpoint
438 * to an endpoint group, however.
439 *
440 * (Part of the motivation for this is that removal and
441 * addition of endpoints to endpoint groups requires
442 * modifications of pointers in other endpoint structures).
443 *
444 * *) No structure may be put on the free list if marked with any
445 * association to any other structure. Specifically, endpoint
446 * groups may have no endpoints belonging to them, and
447 * endpoints may not belong to an endpoint group or have
448 * buffers belonging to them.
449 *
450 * *) One consequence of the above is that endpoint groups may
451 * not be marked as disabled while they have any endpoints on
452 * them, as freeing an endpoint requires it to be removed from
453 * its endpoint group, and if ownership of the endpoint group
454 * cannot be gained, that is impossible.
455 *
456 * *) In theory, endpoints *may* be marked disabled while they
457 * are still on endpoint groups. In practice, they are not.
458 * This is relied on by the code which frees endpoint groups,
459 * in a non-obvious way. Specifically, that code assumes that
460 * there is no way that a call to free endpoint will return
461 * with the endpoint still on the endpoint group. Since the
462 * only way for free endpoint to fail is if the endpoint is
463 * inactive, and since the endpoint is set inactive only after
464 * free endpoint (presumably a different one) confirms that it
465 * isn't on any endpoint group, this assumption is true.
466 *
467 * Got that? Take home lesson: don't allow endpoints to be
468 * marked disabled while still on endpoint groups until you
469 * *do* get that, and are willing to take the responsibility
470 * of changing it so that it works under your new scheme.
471 *
472 * *) Ownership of the freelist(s) are gained by holding the
473 * allocations lock for the buffer, and *only* in that way.
474 * No modification of freelist, send endpoint list, or send
475 * side ME sync bits is valid without holding the allocations
476 * lock. In other words, while you can read things in the
477 * main communications buffer control structure at will, you
478 * may not change them without owning the allocations lock.
479 *
480 * *) The state where a structure is disabled but off of the
481 * freelist may be valid as an intermediate (while an AIL
482 * routine is orchestrating a transition) but is not a valid
483 * static state. This state must not survive the return to
484 * application code of the thread that disabled the structure.
485 */
486
487 /*
488 * Flipc data buffer management.
489 *
490 * A buffer (whether being used for sending or receiving) may be in
491 * one of three states:
492 *
493 * READY -- Buffer held by application.
494 * PROCESSING -- Buffer held by endpoint, unprocessed. For receive endpoints,
495 * this means that the buffer is empty, waiting to be filled by
496 * an incoming message. For send endpoints, this means tht the
497 * buffer is full, waiting to be sent out.
498 * COMPLETED -- Buffer held by the endpoint, processed. For receive
499 * endpoints, this means that the buffer is full, with newly
500 * received data in it. For send endpoints, this means that the
501 * buffer is empty (*), with it's data having been sent out.
502 *
503 * (*) In point of fact the data hasn't been touched, though bits
504 * may have been fiddled with in the header data structure. But
505 * it's been sent.
506 * FREE -- The buffer is in the pool of free buffers, and may be
507 * allocated to any newly created endpoint.
508 *
509 * The transition diagram between these states is relatively simple:
510 *
511 *
512 * release
513 * /-----------------\|
514 * +----------+ -+----------+
515 * | READY | |PROCESSING|<- - - - - -
516 * +----------+_ +----------+ \
517 * ^ |\ - - - - - - - - / | | \endpoint allocate
518 * | (processed) \endpoint \
519 * | | \ free |
520 * | acquire / ------\
521 * | \ |
522 * | / (processed) >+----------+
523 * +----------+ | FREE |
524 * |COMPLETED |< - - - - - - - - - - +----------+
525 * +----------+ endpoint allocate / ^
526 * | ^- - - - - - - - - - - - - - - - - - - - - - - |
527 * | /
528 * \ endpoint free /
529 * ------------------------------------------------------/
530 *
531 * (If it doesn't look simple, imagine it without the FREE state; that
532 * state doesn't enter into almost any buffer manipulations)
533 *
534 * For send buffers, release==send, acquire==allocate, and
535 * processed==the sending done by the message engine. For receive buffers,
536 * release==release, acquire==receive, and process==the actual
537 * arrival of the message handled by the messaging engine.
538 *
539 * The choice of path from the PROCESSING state is an endpoint
540 * specific configuration option; a particular endpoint may leave a
541 * processed buffer on the endpoint, or it may release it back to the
542 * application by dropping it from the endpoint.
543 *
544 * Buffers are assigned the PROCESSING state on a newly allocated
545 * receive endpoint (to be ready to receive messages) and the
546 * COMPLETED state on a newly allocated send endpoint.
547 *
548 * The state (other than FREE) that a particular buffer is in is
549 * determined by its place on a circular queue of buffer pointers that
550 * is part of the endpoint structure. Buffers owned by the
551 * application (READY) are not pointed to by pointers on this queue.
552 * The buffer is released to the message engine by placement of a
553 * pointer to it on this queue. When the message engine is done
554 * processing the buffer, it sets a flag in the buffer header. If the
555 * endpoint is so configured, it then removes the buffer pointer from
556 * the queue; otherwise the AIL acquires the buffer (and removes the
557 * pointer from the queue) when it chooses.
558 *
559 * . . . . . .
560 * . .
561 * . .
562 * . . AIL releasing
563 * . . ^
564 * . +-------+--/
565 * . | |
566 * . |Buffers|
567 * . | to be |
568 * . |Sent or|
569 * . |Receivd|
570 * . | Into | ^ ME processing
571 * . +-------+ --/
572 * . | |
573 * . AIL | Sent | (These buffers have a flag set to indicate
574 * .Acquiring| or | that they have been processed. This
575 * . |Filled | section is optional; the endpoint may be
576 * . |buffers| configured to drop buffers after processing)
577 * . ^ | |
578 * . \--+-------+
579 * . .
580 * . .
581 * . . . . . .
582 *
583 *
584 * The AIL will refuse to acquire a buffer that has not yet been
585 * processed by the ME. Acquire will not work at all on endpoints
586 * that have been configured to drop buffers on completion.
587 *
588 * The buffer_available primitive is coded to avoid doing a
589 * (potentially costly) acquiring of the endpoint flipc lock. Since
590 * telling where there is a buffer available requires two operations
591 * (comparison of the acquire and release pointers to see if there are
592 * any buffers on the endpoint, and then indirection of the acquire
593 * pointer to see if that buffer has bee processed yet), there is a
594 * potential race that will admit the possibility of indirecting
595 * through an invalid pointer. For this reason, for the life of an
596 * endpoint, it is a requirement that all buffer pointers on the
597 * bufferlist point *somewhere* (ie. to some existing buffer), so that
598 * this indirection will not cause an access error. The
599 * buffer_available primitive may return the wrong result, but (as
600 * long as the incorrectness is transitory), this is acceptable.
601 */
602
603 /* Set up the states so that FLIPC_buffer_processed can just do an
604 & and a test. */
605 typedef enum {
606 flipc_Free = 0x0, flipc_Processing = 0x1,
607 flipc_Completed = 0x2, flipc_Ready = 0x3
608 } flipc_buffer_state_t;
609 #define FLIPC_BUFFER_PROCESSED_P(state) ((state) & 0x2)
610
611 /*
612 * Data header/buffer layout.
613 *
614 * For this implementation, and probably for all time, the header
615 * immediately precedes the data in memory, and the mesaging engine
616 * will send both header and data. Our priority is message dispatch
617 * speed rather than raw bandwidth (this is the small message side of
618 * a transfer mechanism), so we don't mind that we are throwing away
619 * some bandwidth by taking up transferred space with header data.
620 *
621 * The data size will be the maximum size allowed by the underlying
622 * transport, minus the header size (available at run time). The user
623 * will be given a pointer to the data buffer, and will use this both
624 * for copying data in and out, and as an argument to the underlying
625 * flipc routines. The flipc routines will access appropriately.
626 *
627 * The header structure follows; the user data type will be offset and
628 * cast appropriately to access this.
629 */
630
631 typedef struct flipc_data_buffer {
632 union {
633 FLIPC_address_t destination; /* For sending. */
634 flipc_cb_ptr free; /* Link for header free list. */
635 } u;
636
637 /* ME owned if flipc_Processing, AIL owned otherwise. May not ever
638 assume the state flipc_Ready in an optimized implementation. */
639 volatile flipc_buffer_state_t shrd_state;
640 } *flipc_data_buffer_t;
641
642 /*
643 * Endpoint structure.
644 *
645 * An endpoint is the data structure used for communicating buffers,
646 * either send or receive. Note that all actual circular lists of
647 * buffer pointers on the endpoints are in their own array that gets
648 * partitioned out to the various endpoints. This is because we want
649 * the endpoint structures themselves to be fixed size for easy
650 * indexing upon receit of a message. This large scale array will be
651 * of size (max_buffers_per_endpoint) * (number_of_endpoints). Both
652 * of these values are set during the domain initialization call.
653 *
654 * Note that the pointers contained in the buffer lists are pointers to
655 * buffer *headers*, not to the data.
656 */
657
658 /*
659 * This structure is divided into four cache lines, separated by their
660 * usage type:
661 *
662 * *) Private data that the AIL scribbles on.
663 * *) Data the AIL writes (regularly) that the ME reads
664 * (occaisionally). The canonical example is the release pointer.
665 * *) Private data that the ME scribbles on.
666 * *) Data the ME writes (regularly) that the AIL reads (occaisionally).
667 * The canonical example is the process pointer.
668 *
669 * There are a couple of other categories of stuff, that can be shoehorned
670 * into the above:
671 * *) Constant data that both sides read regularly. This can be
672 * duplicated in the two private areas (actually, it can be
673 * duplicated in any two areas that stay in the cache of the
674 * respective processors).
675 * *) Stuff that is not accessed on the critical path; it can go
676 * almost anywhere (probably in one of the two ping-ponging
677 * cache lines).
678 * *) Stuff that is read-only for a single processor goes in that
679 * processors private data section.
680 *
681 * Duplicate entries have a "p" or a "a" suffixed to the name to
682 * indicate that fact. Note that these will usually, but not always,
683 * be "const" variables--they may be "const" variables only from the
684 * critical path viewpoint.
685 *
686 * We take cache line length as being 8 * sizeof(int).
687 */
688
689 typedef struct flipc_endpoint {
690
691 /* ===Private AIL data=== */
692 /* Type of endpoint (send, recv, etc). Duplicated in private
693 ME section. */
694 FLIPC_endpoint_type_t constda_type;
695
696 /* This next value is two variables squeezed into a single word to
697 * save on memory accesses (since they are almost always read at
698 * the same time. The two variables are:
699 *
700 * const_drop_processed_buffers -- Should the message engine drop
701 * buffers after processing them (as opposed to leaving them on
702 * the endpoint)?
703 *
704 * sail_enabled (volatile) -- Is the endpoint enabled? This isn't
705 * marked constant because it is used for synchronization on
706 * endpoint deallocation.
707 *
708 * Note that to reduce test and branches, we these two variables
709 * are represented by two bits in the word (bit 0 and bit 16). It
710 * is illegal to have bits other than 0 and 16 set in this word.
711 * This assumption is used in ENABLED_AND_NOT_DPB_P, and is enforced
712 * in DOE_CONSTRUCT (assumed to not be performance critical) below.
713 *
714 * Duplicated in private ME section.
715 */
716
717 volatile unsigned long sailda_dpb_or_enabled;
718
719 #define EXTRACT_DPB(dpb_or_enabled) ((dpb_or_enabled) >> 16)
720 #define EXTRACT_ENABLED(dpb_or_enabled) ((dpb_or_enabled) & 0xffff)
721 #define DISABLED_OR_DPB_P(dpb_or_enabled) ((dpb_or_enabled) ^ 0x1)
722 #define DOE_CONSTRUCT(dpb, enabled) \
723 (((dpb) ? 0x10000 : 0) | ((enabled) ? 0x1 : 0))
724
725 flipc_simple_lock pail_lock; /* Simple lock for serializing
726 multiple thread access to
727 structure. AIL owned. */
728 /* First element in buffer list array that is ours. Constant
729 from communications buffer initialization. */
730 flipc_cb_ptr constda_my_buffer_list;
731 /* First element after my_buffer_list that is *not* in my buffer
732 list. Constant from communications buffer initialization. */
733 flipc_cb_ptr constda_next_buffer_list;
734
735 /* First location that has a valid buffer pointer in it. This may
736 contain a pointer to a buffer available for acquisition, or it
737 may contain a pointer to a buffer that is still being
738 processed; the buffer header or process_ptr needs to be checked
739 to be sure. This location is AIL owned. It is ignored by all
740 (including the ME and initialization code) if
741 drop_processed_buffers, above, is set. */
742 volatile flipc_cb_ptr shrd_acquire_ptr;
743
744 /* AIL private copy of process pointer. This hopefully means that
745 the AIL won't need to read the real process pointer (and fault
746 in a cache line) very often. */
747 flipc_cb_ptr pail_process_ptr;
748
749 unsigned int pad_pail_7;
750
751 /* ===End of cache line===*/
752 /* ===AIL writes, ME occaisionally reads=== */
753
754 /* Next location at which the AIL may insert a buffer pointer. */
755 volatile flipc_cb_ptr sail_release_ptr;
756 unsigned int pad_sail_1;
757 unsigned int pad_sail_2;
758 unsigned int pad_sail_3;
759 unsigned int pad_sail_4;
760 unsigned int pad_sail_5;
761 unsigned int pad_sail_6;
762 unsigned int pad_sail_7;
763
764 /* ===End of cache line===*/
765 /* ===Private ME data=== */
766 /* See above comments (in private ail section). */
767
768 FLIPC_endpoint_type_t constdm_type;
769 volatile unsigned long saildm_dpb_or_enabled;
770
771 volatile unsigned long sme_overruns; /* For a receive endpoint, counter for
772 the number of messages that have
773 arrived when there hasn't been
774 space. ME owned. */
775 unsigned long pail_overruns_seen; /* A count of the number of overruns
776 that the AIL has noted and doesn't
777 want to be bothered with again.
778 The user only sees the difference
779 between the previous count and this. */
780
781 /*
782 * For send endpoints; linked into a list that is used by the ME
783 * to find stuff to do. Also used for endpoint free list.
784 * Null if at end of list. Not "const" because it's used as a
785 * synchronization variable during setup and teardown
786 * of send endpoints.
787 */
788 volatile flipc_cb_ptr sail_next_send_endpoint;
789
790 /* Constant buffer lsit pointers for ME. See private ail comments. */
791 flipc_cb_ptr constdm_my_buffer_list;
792 flipc_cb_ptr constdm_next_buffer_list;
793
794 /* Private ME copy of release pointer. This hopefully means that
795 the ME won't have to read (and fault in a cache line) the
796 release pointer very often. */
797
798 flipc_cb_ptr pme_release_ptr;
799 /* ===End of cache line===*/
800
801 /* ===ME writes, AIL occaisionally reads=== */
802 /*
803 * For endpoint group membership.
804 */
805 flipc_cb_ptr pail_next_eg_endpoint; /* Next endpoint in endpoint group.
806 AIL owned. */
807 flipc_cb_ptr sail_epgroup; /* Direct pointer to endpoint group that
808 we are part of. FLIPC_CBPTR_NULL
809 if none. AIL owned. */
810
811 /* First location that has a buffer pointer available for
812 processing. If this value is equal to the release_ptr there are no
813 buffers available for processing. */
814 volatile flipc_cb_ptr sme_process_ptr;
815 unsigned int pad_sme_3;
816 unsigned int pad_sme_4;
817 unsigned int pad_sme_5;
818 unsigned int pad_sme_6;
819 unsigned int pad_sme_7;
820
821 /* ===End of cache line===*/
822 /* ===END=== */
823
824 /* The following macros may have possible performance loss in
825 multiple accesses (or indirection, but a good compiler will get
826 around that). We need to have versions for each processor so
827 that the constant reads are done from the right copy. */
828
829 /* General bufferlist pointer increment macro, with versions
830 for ME and AIL. */
831
832 #define NEXT_BUFFERLIST_PTR(bufferlist_ptr, endpoint, suf) \
833 (((bufferlist_ptr) + sizeof(flipc_data_buffer_t) \
834 == ((endpoint)->const ## suf ## _next_buffer_list)) ? \
835 ((endpoint)->const ## suf ## _my_buffer_list) : \
836 (bufferlist_ptr) + sizeof(flipc_data_buffer_t))
837 #define NEXT_BUFFERLIST_PTR_ME(bufferlist_ptr, endpoint) \
838 NEXT_BUFFERLIST_PTR(bufferlist_ptr, endpoint, dm)
839 #define NEXT_BUFFERLIST_PTR_AIL(bufferlist_ptr, endpoint) \
840 NEXT_BUFFERLIST_PTR(bufferlist_ptr, endpoint, da)
841
842 /* Macros for each of "can I release onto this buffer?" "Can I
843 acquire from this buffer?" and "Can I process an element on
844 this buffer?" The first two presume they are being executed on
845 the main procesor, the third on the co-processor.
846 All have three arguments:
847 *) A variable which will be set to the release, acquire, or
848 process pointer after the macro *if* the operation is ok.
849 *) A temporary variable used inside the function.
850 *) The endpoint.
851
852 We presume the acquire macro won't be called if drop processed
853 buffers is enabled; the process and release macros deal
854 appropriately with that issue. */
855
856 /* In general these macros will:
857 *) Not read a volatile structure member more than once.
858 *) If a variables owner is the other processor, these macros
859 will check a local copy of the variable first before checking
860 the other processors.
861 *) Will only update the local copy if the remote copy really is
862 different from the local one.
863 */
864
865 /* This macro implements the synchronization check; local cbptr is
866 the pointer owned by the local processor which we want to compare
867 with a pointer on the remote processor which we have a copy
868 of locally. Reads the remote pointer zero or one times; other
869 reads are as necessary.
870
871 The algorithm is:
872 *) If the local copy says our pointer and the remote value aren't equal,
873 we're done.
874 *) Otherwise, check the remote copy. If it says the values aren't
875 equal, update the local copy. */
876
877 #define ENDPOINT_SYNCNE_CHECK(local_cbptr, copy_rmt_cbptr, \
878 rmt_cbptr, tmp_cbptr) \
879 ((local_cbptr) != (copy_rmt_cbptr) \
880 || ((((tmp_cbptr) = (rmt_cbptr)) != (local_cbptr)) \
881 && (((copy_rmt_cbptr) = (tmp_cbptr)), 1)))
882
883 #define ENDPOINT_ACQUIRE_OK(acquire_cbptr, tmp_cbptr, endpoint) \
884 ((acquire_cbptr) = (endpoint)->shrd_acquire_ptr, \
885 ENDPOINT_SYNCNE_CHECK(acquire_cbptr, (endpoint)->pail_process_ptr, \
886 (endpoint)->sme_process_ptr, tmp_cbptr))
887
888 #define ENDPOINT_PROCESS_OK(process_cbptr, tmp_cbptr, endpoint) \
889 ((process_cbptr) = (endpoint)->sme_process_ptr, \
890 ENDPOINT_SYNCNE_CHECK(process_cbptr, (endpoint)->pme_release_ptr, \
891 (endpoint)->sail_release_ptr, tmp_cbptr))
892
893 #define NODPB_ENDPOINT_RELEASE_OK(release_cbptr, tmp_cbptr, endpoint) \
894 ((release_cbptr) = (endpoint)->sail_release_ptr, \
895 (tmp_cbptr) = (endpoint)->shrd_acquire_ptr, \
896 (NEXT_BUFFERLIST_PTR_AIL(release_cbptr, endpoint) \
897 != (tmp_cbptr)))
898
899 /* Don't use NEXT_BUFFERLIST_PTR here to save a temporary variable. */
900 #define DPB_ENDPOINT_RELEASE_OK(release_cbptr, tmp_cbptr, endpoint) \
901 (release_cbptr = (endpoint)->sail_release_ptr, \
902 ((release_cbptr + sizeof(flipc_data_buffer_t) == \
903 (endpoint)->constda_next_buffer_list) \
904 ? ENDPOINT_SYNCNE_CHECK((endpoint)->constda_my_buffer_list, \
905 (endpoint)->pail_process_ptr, \
906 (endpoint)->sme_process_ptr, \
907 tmp_cbptr) \
908 : ENDPOINT_SYNCNE_CHECK(release_cbptr + sizeof(flipc_data_buffer_t), \
909 (endpoint)->pail_process_ptr, \
910 (endpoint)->sme_process_ptr, \
911 tmp_cbptr)))
912
913 /* This next is tricky; remember that acquire_ptr points
914 to an actual bufferptr on the list, whereas release_ptr does
915 not. This macro is only used in FLIPC_endpoint_query, and so
916 doesn't need to have an ME version. */
917
918 #define BUFFERS_ON_ENDPOINT_AIL(acquire_ptr, release_ptr, endpoint) \
919 ((release_ptr) > (acquire_ptr) \
920 ? ((release_ptr) - (acquire_ptr)) / sizeof(flipc_cb_ptr) \
921 : ((((release_ptr) - (endpoint)->constda_my_buffer_list) \
922 + ((endpoint)->constda_next_buffer_list - acquire_ptr)) \
923 / sizeof(flipc_cb_ptr)))
924 } *flipc_endpoint_t;
925
926
927 /*
928 * Endpoint groups.
929 *
930 * Used to represent a group of endpoints, for linking sending/receiving
931 * with semaphores & etc. Note that there needs to be a private data
932 * structure kept by the kernel that associates with each epgroup
933 * a semaphore to be used for wakeups on that endpoint set.
934 */
935
936 typedef struct flipc_epgroup {
937 flipc_simple_lock pail_lock; /* Lock to synchronize threads (at the
938 same priority level) accessing this
939 structure. */
940 volatile unsigned long sail_enabled; /* Set if structure is active. */
941 unsigned long const_semaphore_associated; /* Flag to indicate whether or not
942 there is a semaphore associated
943 with this endpoint group in the
944 kernel flipc routines. */
945 volatile unsigned long sail_wakeup_req; /* Incremented when a thread wants to
946 be woken. */
947 volatile unsigned long pme_wakeup_del; /* Incremented when the ME delivers a
948 wakeup. */
949 unsigned long pail_version; /* Incremented when epgroup membership
950 is changed; checked when retrieving
951 a buffer from an epgroup. */
952 unsigned long sail_msgs_per_wakeup; /* How many messages need to arrive
953 before the ME delivers a wakeup. */
954 unsigned long pme_msgs_since_wakeup; /* How many messages have arrived
955 since the last wakeup. ME
956 owned. */
957
958 flipc_cb_ptr pail_first_endpoint; /* First endpoint in the group. The
959 other endpoints are linked along
960 behind him. AIL owned. */
961 flipc_cb_ptr pail_free; /* Used to link this endpoint onto
962 the freelist. */
963 } *flipc_epgroup_t;
964
965 /*
966 * Communication buffer control structure.
967 *
968 * This is in the communications buffer itself. Note that any changes
969 * in this structure require it to be locked with the allocation lock,
970 * as access to this structure is shared by all threads using the CB.
971 */
972
973 /*
974 * Individual data type layout.
975 *
976 * All we need here is a pointer to the start of each type of data
977 * struct, the number of those data structures in the communications
978 * buffer, and a pointer to the beginning of the freelist for that data
979 * structure.
980 *
981 * Note that the composite buffer list doesn't have a freelist associated
982 * with it, since each section of the buffer list is tightly bound to an
983 * endpoint, and is allocated and freed with that endpoint. We still
984 * need the start and number information, though.
985 */
986 struct flipc_cb_type_ctl {
987 flipc_cb_ptr start; /* Where there array of this type of
988 data structure starts. */
989 unsigned long number; /* How many of them we've got. */
990 flipc_cb_ptr free; /* Where the beginning of the freelist
991 is. */
992 };
993
994 /*
995 * Synchronization with message engine.
996 *
997 * At certain times (specifically during structure allocation/free or
998 * additions to the send list) you want to know that the messaging
999 * engine has picked up your changes. However, the message engine has
1000 * (effectively) two threads, one for each of the send and receive
1001 * sides. The mechanisms used for synchronizations with the two sides
1002 * differ. In an eventual co-processor implementation (with a single
1003 * thread), only the send side mechanism will be used.
1004 *
1005 * To request a cached state flush by the send side of the mesasging
1006 * engine, you flip the request_sync bit and it responds by flipping
1007 * the response_sync bit. The send ME checks this bit once every trip
1008 * through the send endpoints.
1009 *
1010 * On the receive side, since receives take very little time and do
1011 * not block (unlike sends) when we want to make sure the ME is
1012 * holding no cached receive side state, we simply spin until we see
1013 * that the ME receive side is no longer operating. It sets a
1014 * variable whenever it is in the process of receiving a message.
1015 */
1016
1017 /*
1018 * Proper manipulation of the send endpoint list.
1019 *
1020 * Note that synchronizing with the message engine over access to the
1021 * send endpoint list is especially tricky. There is no problem with
1022 * writing new values in all of the locations required to take a send
1023 * endpoint off of the list. However, we must be very sure before
1024 * modifying the pointer *in* the send endpoint that the ME isn't
1025 * currently working in that send endpoint (else it could be sent off
1026 * into the void). Two options here:
1027 *
1028 * *) Synchronize (using the below variables) for each send
1029 * endpoint removed, after the removal but before the
1030 * modification of the data in the internal structure.
1031 * *) If we can always be sure that the send endpoint link in the
1032 * endpoint structure has a valid value, we can simply let the
1033 * chips fall where they may. It will be null while free, and
1034 * have a value that points back into the send buffer list
1035 * when reallocated. I'm not going to do this; it's sleezy
1036 * and will partially mess up fairness based on ME send
1037 * endpoint round-robinning.
1038 */
1039
1040 /*
1041 * This entire structure is protected by an kernel level lock so there
1042 * is no conflict between threads accessing it. See flipc_kfr.c for
1043 * details on this lock; how it is implemented and used depends on what
1044 * kernel base we are on.
1045 */
1046
1047 /*
1048 * Note that the last element of this structure is variable sized, so this
1049 * structure itself is also variable sized.
1050 */
1051 typedef struct flipc_comm_buffer_ctl {
1052 /* Kernel flipc configuration that the user must match in order to
1053 work with this kernel. Checked as soon as the comm buffer is
1054 mapped. */
1055 struct {
1056 unsigned int real_time_primitives:1;
1057 unsigned int message_engine_in_kernel:1;
1058 unsigned int no_bus_locking:1; /* One way check -- if the kernel doesn't
1059 have this and the user does, that's
1060 an error. */
1061 } kernel_configuration;
1062 volatile unsigned long send_ready; /* A send(s) is ready to go */
1063
1064 /* These first three structures are constant after communications buffer
1065 initialization. */
1066 unsigned long data_buffer_size; /* Size of the data buffers. */
1067 unsigned long local_node_address; /* Local node number. */
1068 FLIPC_address_t null_destination; /* Local null destination value. */
1069
1070 #if REAL_TIME_PRIMITIVES
1071 /* The scheduling policy used by the task initializing flipc for
1072 the allocations lock. */
1073 int allocations_lock_policy;
1074 #else
1075 /* A poor substitute for a kernel level allocations lock.
1076 Note that this *cannot* be used as a regular simple lock;
1077 instead, try to acquire it, call sleep(1), try again, etc.
1078 Spinning on this lock will probably waste lots of cycles. */
1079 flipc_simple_lock pail_alloc_lock;
1080 #endif
1081
1082 /* All of the members of these structures except for the free pointer
1083 are constant after initialization. The free pointer is ail owned
1084 and private. */
1085 struct flipc_cb_type_ctl endpoint;
1086 struct flipc_cb_type_ctl epgroup;
1087 struct flipc_cb_type_ctl bufferlist;
1088 struct flipc_cb_type_ctl data_buffer;
1089
1090 /* Global synchronization with the message engine. On the KKT
1091 implementation we need one synchronizer for each thread. */
1092
1093 /* Send side: */
1094 volatile unsigned long sail_request_sync; /* request_sync = !request_sync when the
1095 AIL wants to synchronize with the
1096 CB. */
1097 volatile unsigned long sme_respond_sync; /* respond_sync = !respond_sync when
1098 the ME has noticed the sync
1099 request. By responding to the
1100 sync, the ME is stating that it has
1101 no communications buffer state that
1102 was cached previous to it noticing
1103 the sync. */
1104
1105 /* Receive side. */
1106 volatile unsigned long sme_receive_in_progress; /* Set by the ME before it looks at
1107 any data structures; cleared
1108 afterwards. A simple spin in
1109 the user space on this
1110 variable will suffice, as the
1111 time that the message
1112 engine could be receiving
1113 is low. */
1114
1115 /* Send endpoint list starts here. */
1116 volatile flipc_cb_ptr sail_send_endpoint_list; /* Null if no send endpoints.
1117 */
1118
1119 /* Keep track of whatever performance information we choose. */
1120 struct FLIPC_domain_performance_info performance;
1121
1122 /* Keep track of various kinds of error information here. */
1123 struct FLIPC_domain_errors sme_error_log;
1124
1125 } *flipc_comm_buffer_ctl_t;
1126
1127
1128 /*
1129 * The communications buffer.
1130 *
1131 * The only restriction on the layout of the communications buffer is
1132 * that the buffers themselves may not cross page boundaries. So we
1133 * will place the data buffers at the end of the communications
1134 * buffer, and the other objects at the beginning, and there may be a
1135 * little bit of extra space in the middle.
1136 *
1137 * Note that this layout may change in future versions of FLIPC.
1138 *
1139 * +---------------------------+
1140 * | flipc_comm_buffer_ctl |
1141 * +---------------------------+
1142 * | |
1143 * | Endpoints |
1144 * | |
1145 * +---------------------------+
1146 * | |
1147 * | Endpoint Groups |
1148 * | |
1149 * +---------------------------+
1150 * | |
1151 * | Combined Buffer Lists |
1152 * | |
1153 * +---------------------------+
1154 * | |
1155 * | (Possible empty space) |
1156 * | |
1157 * +---------------------------+
1158 * | |
1159 * | Data Buffers |
1160 * | |
1161 * +---------------------------+
1162 */
1163
1164 /* The number of pages that the kernel will reserve for the comm
1165 buffer. The AIL needs to know this to know how much to map. */
1166 #define COMM_BUFFER_SIZE 0x100000
1167
1168 /*
1169 * These variables are set, in a per-address space context, to the base
1170 * and length of the communications buffer. The ME needs to do bounds
1171 * checking to make sure it isn't overrunning anything. Note that the
1172 * existence of these variables implies that an application will only
1173 * open a single domain.
1174 *
1175 * These declarations are duplicated in flipc/flipc_usermsg.h, and
1176 * should be kept in sync with that file.
1177 */
1178 unsigned char *flipc_cb_base;
1179 unsigned long flipc_cb_length; /* In bytes. */
1180
1181 /*
1182 * Following is a set of macros to convert back and forth between
1183 * real address pointers and flipc_cb_ptr's for each data type. They
1184 * rely on the flipc_cb_base being set correctly.
1185 *
1186 * A possible future improvement might be to have bounds checking occur
1187 * inside these macros, but I'm not sure what I'd do if it failed.
1188 */
1189
1190 /* Easy going one way. */
1191 #define FLIPC_CBPTR(ptr) \
1192 (((unsigned char *) (ptr)) - flipc_cb_base)
1193
1194 /* Need to get the right types going the other way. */
1195 #define FLIPC_ENDPOINT_PTR(cb_ptr) \
1196 ((flipc_endpoint_t) ((cb_ptr) + flipc_cb_base))
1197 #define FLIPC_EPGROUP_PTR(cb_ptr) \
1198 ((flipc_epgroup_t) ((cb_ptr) + flipc_cb_base))
1199 #define FLIPC_DATA_BUFFER_PTR(cb_ptr) \
1200 ((flipc_data_buffer_t) ((cb_ptr) + flipc_cb_base))
1201 #define FLIPC_BUFFERLIST_PTR(cb_ptr) \
1202 ((flipc_cb_ptr *) ((cb_ptr) + flipc_cb_base))
1203
1204
1205 /*
1206 * Flipc addresses.
1207 *
1208 * The addresses used by flipc for communication are defined in the
1209 * user visible header file as unsigned longs. These macros pull that
1210 * information apart for use of the FLIPC internal routines.
1211 *
1212 * I assume in the following that endpoints immediately follow the
1213 * comm buffer control structure, because that makes indexing into
1214 * them much easier.
1215 */
1216
1217 #define FLIPC_CREATE_ADDRESS(node, endpoint_idx) \
1218 ((node << 16) | (endpoint_idx))
1219 #define FLIPC_ADDRESS_NODE(addr) (((unsigned long) (addr)) >> 16)
1220 #define FLIPC_ADDRESS_ENDPOINT(addr) (((unsigned long) (addr)) & 0xffff)
1221
1222 #endif /* _MACH_FLIPC_CB_H_ */