00001
00026 #ifndef _RTDM_DRIVER_H
00027 #define _RTDM_DRIVER_H
00028
00029 #ifndef __KERNEL__
00030 #error This header is for kernel space usage only. \
00031 You are likely looking for rtdm/rtdm.h...
00032 #endif
00033
00034 #include <asm/atomic.h>
00035 #include <linux/list.h>
00036
00037 #include <nucleus/xenomai.h>
00038 #include <nucleus/heap.h>
00039 #include <nucleus/pod.h>
00040 #include <nucleus/synch.h>
00041 #include <nucleus/select.h>
00042 #include <nucleus/vfile.h>
00043 #include <rtdm/rtdm.h>
00044
00045
00046 #include <nucleus/assert.h>
00047 #ifdef CONFIG_PCI
00048 #include <asm-generic/xenomai/pci_ids.h>
00049 #endif
00050
00051 #ifndef CONFIG_XENO_OPT_DEBUG_RTDM
00052 #define CONFIG_XENO_OPT_DEBUG_RTDM 0
00053 #endif
00054
00055 struct rtdm_dev_context;
00056 typedef struct xnselector rtdm_selector_t;
00057 enum rtdm_selecttype;
00058
00071 #define RTDM_EXCLUSIVE 0x0001
00072
00074 #define RTDM_NAMED_DEVICE 0x0010
00075
00078 #define RTDM_PROTOCOL_DEVICE 0x0020
00079
00081 #define RTDM_DEVICE_TYPE_MASK 0x00F0
00082
00091 #define RTDM_CREATED_IN_NRT 0
00092
00094 #define RTDM_CLOSING 1
00095
00097 #define RTDM_USER_CONTEXT_FLAG 8
00098
00107 #define RTDM_DEVICE_STRUCT_VER 5
00108
00110 #define RTDM_CONTEXT_STRUCT_VER 3
00111
00113 #define RTDM_SECURE_DEVICE 0x80000000
00114
00116 #define RTDM_DRIVER_VER(major, minor, patch) \
00117 (((major & 0xFF) << 16) | ((minor & 0xFF) << 8) | (patch & 0xFF))
00118
00120 #define RTDM_DRIVER_MAJOR_VER(ver) (((ver) >> 16) & 0xFF)
00121
00123 #define RTDM_DRIVER_MINOR_VER(ver) (((ver) >> 8) & 0xFF)
00124
00126 #define RTDM_DRIVER_PATCH_VER(ver) ((ver) & 0xFF)
00127
00139 enum rtdm_selecttype {
00141 RTDM_SELECTTYPE_READ = XNSELECT_READ,
00142
00144 RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
00145
00147 RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
00148 };
00172 typedef int (*rtdm_open_handler_t)(struct rtdm_dev_context *context,
00173 rtdm_user_info_t *user_info, int oflag);
00174
00189 typedef int (*rtdm_socket_handler_t)(struct rtdm_dev_context *context,
00190 rtdm_user_info_t *user_info, int protocol);
00191
00212 typedef int (*rtdm_close_handler_t)(struct rtdm_dev_context *context,
00213 rtdm_user_info_t *user_info);
00214
00230 typedef int (*rtdm_ioctl_handler_t)(struct rtdm_dev_context *context,
00231 rtdm_user_info_t *user_info,
00232 unsigned int request, void __user *arg);
00233
00247 typedef int (*rtdm_select_bind_handler_t)(struct rtdm_dev_context *context,
00248 rtdm_selector_t *selector,
00249 enum rtdm_selecttype type,
00250 unsigned fd_index);
00251
00267 typedef ssize_t (*rtdm_read_handler_t)(struct rtdm_dev_context *context,
00268 rtdm_user_info_t *user_info,
00269 void *buf, size_t nbyte);
00270
00286 typedef ssize_t (*rtdm_write_handler_t)(struct rtdm_dev_context *context,
00287 rtdm_user_info_t *user_info,
00288 const void *buf, size_t nbyte);
00289
00306 typedef ssize_t (*rtdm_recvmsg_handler_t)(struct rtdm_dev_context *context,
00307 rtdm_user_info_t *user_info,
00308 struct msghdr *msg, int flags);
00309
00326 typedef ssize_t (*rtdm_sendmsg_handler_t)(struct rtdm_dev_context *context,
00327 rtdm_user_info_t *user_info,
00328 const struct msghdr *msg, int flags);
00331 typedef int (*rtdm_rt_handler_t)(struct rtdm_dev_context *context,
00332 rtdm_user_info_t *user_info, void *arg);
00336 struct rtdm_operations {
00341 rtdm_close_handler_t close_rt;
00343 rtdm_close_handler_t close_nrt;
00344
00346 rtdm_ioctl_handler_t ioctl_rt;
00348 rtdm_ioctl_handler_t ioctl_nrt;
00349
00351 rtdm_select_bind_handler_t select_bind;
00357 rtdm_read_handler_t read_rt;
00359 rtdm_read_handler_t read_nrt;
00360
00362 rtdm_write_handler_t write_rt;
00364 rtdm_write_handler_t write_nrt;
00370 rtdm_recvmsg_handler_t recvmsg_rt;
00372 rtdm_recvmsg_handler_t recvmsg_nrt;
00373
00375 rtdm_sendmsg_handler_t sendmsg_rt;
00377 rtdm_sendmsg_handler_t sendmsg_nrt;
00379 };
00380
00381 struct rtdm_devctx_reserved {
00382 void *owner;
00383 struct list_head cleanup;
00384 };
00385
00397 struct rtdm_dev_context {
00399 unsigned long context_flags;
00400
00402 int fd;
00403
00406 atomic_t close_lock_count;
00407
00409 struct rtdm_operations *ops;
00410
00412 struct rtdm_device *device;
00413
00415 struct rtdm_devctx_reserved reserved;
00416
00418 char dev_private[0];
00419 };
00420
00429 static inline void *
00430 rtdm_context_to_private(struct rtdm_dev_context *context)
00431 {
00432 return (void *)context->dev_private;
00433 }
00434
00443 static inline struct rtdm_dev_context *
00444 rtdm_private_to_context(void *dev_private)
00445 {
00446 return container_of(dev_private, struct rtdm_dev_context, dev_private);
00447 }
00448
00449 struct rtdm_dev_reserved {
00450 struct list_head entry;
00451 atomic_t refcount;
00452 struct rtdm_dev_context *exclusive_context;
00453 };
00454
00462 struct rtdm_device {
00465 int struct_version;
00466
00468 int device_flags;
00470 size_t context_size;
00471
00473 char device_name[RTDM_MAX_DEVNAME_LEN + 1];
00474
00476 int protocol_family;
00478 int socket_type;
00479
00484 rtdm_open_handler_t open_rt;
00487 rtdm_open_handler_t open_nrt;
00488
00494 rtdm_socket_handler_t socket_rt;
00497 rtdm_socket_handler_t socket_nrt;
00498
00500 struct rtdm_operations ops;
00501
00503 int device_class;
00506 int device_sub_class;
00508 int profile_version;
00510 const char *driver_name;
00512 int driver_version;
00515 const char *peripheral_name;
00517 const char *provider_name;
00518
00520 const char *proc_name;
00521 #ifdef CONFIG_XENO_OPT_VFILE
00522
00523 struct xnvfile_directory vfroot;
00524 struct xnvfile_regular info_vfile;
00525 #endif
00526
00528 int device_id;
00530 void *device_data;
00531
00533 struct rtdm_dev_reserved reserved;
00534 };
00537
00538
00539 int rtdm_dev_register(struct rtdm_device *device);
00540 int rtdm_dev_unregister(struct rtdm_device *device, unsigned int poll_delay);
00541
00542
00543
00544 #define rtdm_open rt_dev_open
00545 #define rtdm_socket rt_dev_socket
00546 #define rtdm_close rt_dev_close
00547 #define rtdm_ioctl rt_dev_ioctl
00548 #define rtdm_read rt_dev_read
00549 #define rtdm_write rt_dev_write
00550 #define rtdm_recvmsg rt_dev_recvmsg
00551 #define rtdm_recv rt_dev_recv
00552 #define rtdm_recvfrom rt_dev_recvfrom
00553 #define rtdm_sendmsg rt_dev_sendmsg
00554 #define rtdm_send rt_dev_send
00555 #define rtdm_sendto rt_dev_sendto
00556 #define rtdm_bind rt_dev_bind
00557 #define rtdm_listen rt_dev_listen
00558 #define rtdm_accept rt_dev_accept
00559 #define rtdm_getsockopt rt_dev_getsockopt
00560 #define rtdm_setsockopt rt_dev_setsockopt
00561 #define rtdm_getsockname rt_dev_getsockname
00562 #define rtdm_getpeername rt_dev_getpeername
00563 #define rtdm_shutdown rt_dev_shutdown
00564
00565 struct rtdm_dev_context *rtdm_context_get(int fd);
00566
00567 #ifndef DOXYGEN_CPP
00568
00569 #define CONTEXT_IS_LOCKED(context) \
00570 (atomic_read(&(context)->close_lock_count) > 1 || \
00571 (test_bit(RTDM_CLOSING, &(context)->context_flags) && \
00572 atomic_read(&(context)->close_lock_count) > 0))
00573
00574 static inline void rtdm_context_lock(struct rtdm_dev_context *context)
00575 {
00576 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00577 );
00578 atomic_inc(&context->close_lock_count);
00579 }
00580
00581 extern int rtdm_apc;
00582
00583 static inline void rtdm_context_unlock(struct rtdm_dev_context *context)
00584 {
00585 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00586 );
00587 smp_mb__before_atomic_dec();
00588 if (unlikely(atomic_dec_and_test(&context->close_lock_count)))
00589 rthal_apc_schedule(rtdm_apc);
00590 }
00591
00592 static inline void rtdm_context_put(struct rtdm_dev_context *context)
00593 {
00594 rtdm_context_unlock(context);
00595 }
00596
00597
00598 struct xntbase;
00599 extern struct xntbase *rtdm_tbase;
00600
00601 static inline nanosecs_abs_t rtdm_clock_read(void)
00602 {
00603 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_time(rtdm_tbase));
00604 }
00605
00606 static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
00607 {
00608 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_jiffies(rtdm_tbase));
00609 }
00610 #endif
00611
00617 int rtdm_select_bind(int fd, rtdm_selector_t *selector,
00618 enum rtdm_selecttype type, unsigned fd_index);
00619
00620
00658 #ifdef DOXYGEN_CPP
00659 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00660 { \
00661 <ENTER_ATOMIC_SECTION> \
00662 code_block; \
00663 <LEAVE_ATOMIC_SECTION> \
00664 }
00665 #else
00666 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00667 { \
00668 spl_t __rtdm_s; \
00669 \
00670 xnlock_get_irqsave(&nklock, __rtdm_s); \
00671 code_block; \
00672 xnlock_put_irqrestore(&nklock, __rtdm_s); \
00673 }
00674 #endif
00675
00685 #define RTDM_LOCK_UNLOCKED RTHAL_SPIN_LOCK_UNLOCKED
00686
00688 typedef rthal_spinlock_t rtdm_lock_t;
00689
00691 typedef unsigned long rtdm_lockctx_t;
00692
00708 #define rtdm_lock_init(lock) rthal_spin_lock_init(lock)
00709
00726 #ifdef DOXYGEN_CPP
00727 #define rtdm_lock_get(lock) rthal_spin_lock(lock)
00728 #else
00729 #define rtdm_lock_get(lock) \
00730 do { \
00731 XENO_BUGON(RTDM, !rthal_local_irq_disabled()); \
00732 rthal_spin_lock(lock); \
00733 } while (0)
00734 #endif
00735
00752 #define rtdm_lock_put(lock) rthal_spin_unlock(lock)
00753
00771 #define rtdm_lock_get_irqsave(lock, context) \
00772 rthal_spin_lock_irqsave(lock, context)
00773
00791 #define rtdm_lock_put_irqrestore(lock, context) \
00792 rthal_spin_unlock_irqrestore(lock, context)
00793
00810 #define rtdm_lock_irqsave(context) \
00811 rthal_local_irq_save(context)
00812
00829 #define rtdm_lock_irqrestore(context) \
00830 rthal_local_irq_restore(context)
00831
00835
00841 typedef xnintr_t rtdm_irq_t;
00842
00849 #define RTDM_IRQTYPE_SHARED XN_ISR_SHARED
00850
00852 #define RTDM_IRQTYPE_EDGE XN_ISR_EDGE
00853
00862 typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
00863
00870 #define RTDM_IRQ_NONE XN_ISR_NONE
00871
00872 #define RTDM_IRQ_HANDLED XN_ISR_HANDLED
00873
00892 #define rtdm_irq_get_arg(irq_handle, type) ((type *)irq_handle->cookie)
00893
00895 int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
00896 rtdm_irq_handler_t handler, unsigned long flags,
00897 const char *device_name, void *arg);
00898
00899 #ifndef DOXYGEN_CPP
00900 static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
00901 {
00902 return xnintr_detach(irq_handle);
00903 }
00904
00905 static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
00906 {
00907 return xnintr_enable(irq_handle);
00908 }
00909
00910 static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
00911 {
00912 return xnintr_disable(irq_handle);
00913 }
00914 #endif
00915
00916
00917
00923 typedef unsigned rtdm_nrtsig_t;
00924
00935 typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t nrt_sig, void *arg);
00938 #ifndef DOXYGEN_CPP
00939 static inline int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
00940 rtdm_nrtsig_handler_t handler, void *arg)
00941 {
00942 *nrt_sig = rthal_alloc_virq();
00943
00944 if (*nrt_sig == 0)
00945 return -EAGAIN;
00946
00947 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, handler, arg, NULL,
00948 IPIPE_HANDLE_MASK);
00949 return 0;
00950 }
00951
00952 static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
00953 {
00954 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, NULL, NULL, NULL, 0);
00955
00956 rthal_free_virq(*nrt_sig);
00957 }
00958
00959 static inline void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig)
00960 {
00961 rthal_trigger_irq(*nrt_sig);
00962 }
00963 #endif
00964
00965
00966
00972 typedef xntimer_t rtdm_timer_t;
00973
00979 typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
00980
00986 enum rtdm_timer_mode {
00988 RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
00989
00991 RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
00992
00994 RTDM_TIMERMODE_REALTIME = XN_REALTIME
00995 };
01000 #ifndef DOXYGEN_CPP
01001 #define rtdm_timer_init(timer, handler, name) \
01002 ({ \
01003 xntimer_init((timer), rtdm_tbase, handler); \
01004 xntimer_set_name((timer), (name)); \
01005 0; \
01006 })
01007 #endif
01008
01009 void rtdm_timer_destroy(rtdm_timer_t *timer);
01010
01011 int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
01012 nanosecs_rel_t interval, enum rtdm_timer_mode mode);
01013
01014 void rtdm_timer_stop(rtdm_timer_t *timer);
01015
01016 #ifndef DOXYGEN_CPP
01017 static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
01018 nanosecs_abs_t expiry,
01019 nanosecs_rel_t interval,
01020 enum rtdm_timer_mode mode)
01021 {
01022 return xntimer_start(timer, xntbase_ns2ticks_ceil(rtdm_tbase, expiry),
01023 xntbase_ns2ticks_ceil(rtdm_tbase, interval),
01024 (xntmode_t)mode);
01025 }
01026
01027 static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
01028 {
01029 xntimer_stop(timer);
01030 }
01031 #endif
01032
01033
01039 typedef xnthread_t rtdm_task_t;
01040
01046 typedef void (*rtdm_task_proc_t)(void *arg);
01047
01052 #define RTDM_TASK_LOWEST_PRIORITY XNSCHED_LOW_PRIO
01053 #define RTDM_TASK_HIGHEST_PRIORITY XNSCHED_HIGH_PRIO
01054
01060 #define RTDM_TASK_RAISE_PRIORITY (+1)
01061 #define RTDM_TASK_LOWER_PRIORITY (-1)
01062
01066 int rtdm_task_init(rtdm_task_t *task, const char *name,
01067 rtdm_task_proc_t task_proc, void *arg,
01068 int priority, nanosecs_rel_t period);
01069 int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
01070 void rtdm_task_busy_sleep(nanosecs_rel_t delay);
01071
01072 #ifndef DOXYGEN_CPP
01073 static inline void rtdm_task_destroy(rtdm_task_t *task)
01074 {
01075 xnpod_delete_thread(task);
01076 }
01077
01078 void rtdm_task_join_nrt(rtdm_task_t *task, unsigned int poll_delay);
01079
01080 static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
01081 {
01082 union xnsched_policy_param param = { .rt = { .prio = priority } };
01083 xnpod_set_thread_schedparam(task, &xnsched_class_rt, ¶m);
01084 xnpod_schedule();
01085 }
01086
01087 static inline int rtdm_task_set_period(rtdm_task_t *task,
01088 nanosecs_rel_t period)
01089 {
01090 if (period < 0)
01091 period = 0;
01092 return xnpod_set_thread_periodic(task, XN_INFINITE,
01093 xntbase_ns2ticks_ceil
01094 (xnthread_time_base(task), period));
01095 }
01096
01097 static inline int rtdm_task_unblock(rtdm_task_t *task)
01098 {
01099 int res = xnpod_unblock_thread(task);
01100
01101 xnpod_schedule();
01102 return res;
01103 }
01104
01105 static inline rtdm_task_t *rtdm_task_current(void)
01106 {
01107 return xnpod_current_thread();
01108 }
01109
01110 static inline int rtdm_task_wait_period(void)
01111 {
01112 XENO_ASSERT(RTDM, !xnpod_unblockable_p(), return -EPERM;);
01113 return xnpod_wait_thread_period(NULL);
01114 }
01115
01116 static inline int rtdm_task_sleep(nanosecs_rel_t delay)
01117 {
01118 return __rtdm_task_sleep(delay, XN_RELATIVE);
01119 }
01120
01121 static inline int
01122 rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
01123 {
01124
01125 if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
01126 return -EINVAL;
01127 return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
01128 }
01129
01130
01131 static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
01132 {
01133 return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
01134 }
01135 #endif
01136
01137
01138
01139 typedef nanosecs_abs_t rtdm_toseq_t;
01140
01141 void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
01142
01143
01144
01145 typedef struct {
01146 xnsynch_t synch_base;
01147 DECLARE_XNSELECT(select_block);
01148 } rtdm_event_t;
01149
01150 #define RTDM_EVENT_PENDING XNSYNCH_SPARE1
01151
01152 void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
01153 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01154 int rtdm_event_select_bind(rtdm_event_t *event, rtdm_selector_t *selector,
01155 enum rtdm_selecttype type, unsigned fd_index);
01156 #else
01157 #define rtdm_event_select_bind(e, s, t, i) ({ (void)(e); -EBADF; })
01158 #endif
01159 int rtdm_event_wait(rtdm_event_t *event);
01160 int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
01161 rtdm_toseq_t *timeout_seq);
01162 void rtdm_event_signal(rtdm_event_t *event);
01163
01164 void rtdm_event_clear(rtdm_event_t *event);
01165
01166 #ifndef DOXYGEN_CPP
01167 void __rtdm_synch_flush(xnsynch_t *synch, unsigned long reason);
01168
01169 static inline void rtdm_event_pulse(rtdm_event_t *event)
01170 {
01171 trace_mark(xn_rtdm, event_pulse, "event %p", event);
01172 __rtdm_synch_flush(&event->synch_base, 0);
01173 }
01174
01175 static inline void rtdm_event_destroy(rtdm_event_t *event)
01176 {
01177 trace_mark(xn_rtdm, event_destroy, "event %p", event);
01178 __rtdm_synch_flush(&event->synch_base, XNRMID);
01179 xnselect_destroy(&event->select_block);
01180 }
01181 #endif
01182
01183
01184
01185 typedef struct {
01186 unsigned long value;
01187 xnsynch_t synch_base;
01188 DECLARE_XNSELECT(select_block);
01189 } rtdm_sem_t;
01190
01191 void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
01192 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01193 int rtdm_sem_select_bind(rtdm_sem_t *sem, rtdm_selector_t *selector,
01194 enum rtdm_selecttype type, unsigned fd_index);
01195 #else
01196 #define rtdm_sem_select_bind(s, se, t, i) ({ (void)(s); -EBADF; })
01197 #endif
01198 int rtdm_sem_down(rtdm_sem_t *sem);
01199 int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
01200 rtdm_toseq_t *timeout_seq);
01201 void rtdm_sem_up(rtdm_sem_t *sem);
01202
01203 #ifndef DOXYGEN_CPP
01204 static inline void rtdm_sem_destroy(rtdm_sem_t *sem)
01205 {
01206 trace_mark(xn_rtdm, sem_destroy, "sem %p", sem);
01207 __rtdm_synch_flush(&sem->synch_base, XNRMID);
01208 xnselect_destroy(&sem->select_block);
01209 }
01210 #endif
01211
01212
01213
01214 typedef struct {
01215 xnsynch_t synch_base;
01216 } rtdm_mutex_t;
01217
01218 void rtdm_mutex_init(rtdm_mutex_t *mutex);
01219 int rtdm_mutex_lock(rtdm_mutex_t *mutex);
01220 int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
01221 rtdm_toseq_t *timeout_seq);
01222
01223 #ifndef DOXYGEN_CPP
01224 static inline void rtdm_mutex_unlock(rtdm_mutex_t *mutex)
01225 {
01226 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return;);
01227
01228 trace_mark(xn_rtdm, mutex_unlock, "mutex %p", mutex);
01229
01230 if (unlikely(xnsynch_release(&mutex->synch_base) != NULL))
01231 xnpod_schedule();
01232 }
01233
01234 static inline void rtdm_mutex_destroy(rtdm_mutex_t *mutex)
01235 {
01236 trace_mark(xn_rtdm, mutex_destroy, "mutex %p", mutex);
01237
01238 __rtdm_synch_flush(&mutex->synch_base, XNRMID);
01239 }
01240 #endif
01241
01242
01243
01244 #define rtdm_printk(format, ...) printk(format, ##__VA_ARGS__)
01245
01246 struct rtdm_ratelimit_state {
01247 rtdm_lock_t lock;
01248 nanosecs_abs_t interval;
01249 int burst;
01250 int printed;
01251 int missed;
01252 nanosecs_abs_t begin;
01253 };
01254
01255 int rtdm_ratelimit(struct rtdm_ratelimit_state *rs, const char *func);
01256
01257 #define DEFINE_RTDM_RATELIMIT_STATE(name, interval_init, burst_init) \
01258 struct rtdm_ratelimit_state name = { \
01259 .lock = RTDM_LOCK_UNLOCKED, \
01260 .interval = interval_init, \
01261 .burst = burst_init, \
01262 }
01263
01264
01265 #define DEF_RTDM_RATELIMIT_INTERVAL 5000000000LL
01266 #define DEF_RTDM_RATELIMIT_BURST 10
01267
01268 #define rtdm_printk_ratelimited(fmt, ...) ({ \
01269 static DEFINE_RTDM_RATELIMIT_STATE(_rs, \
01270 DEF_RTDM_RATELIMIT_INTERVAL, \
01271 DEF_RTDM_RATELIMIT_BURST); \
01272 \
01273 if (rtdm_ratelimit(&_rs, __func__)) \
01274 printk(fmt, ##__VA_ARGS__); \
01275 })
01276
01277 #ifndef DOXYGEN_CPP
01278 static inline void *rtdm_malloc(size_t size)
01279 {
01280 return xnmalloc(size);
01281 }
01282
01283 static inline void rtdm_free(void *ptr)
01284 {
01285 xnfree(ptr);
01286 }
01287
01288 #ifdef CONFIG_XENO_OPT_PERVASIVE
01289 int rtdm_mmap_to_user(rtdm_user_info_t *user_info,
01290 void *src_addr, size_t len,
01291 int prot, void **pptr,
01292 struct vm_operations_struct *vm_ops,
01293 void *vm_private_data);
01294 int rtdm_iomap_to_user(rtdm_user_info_t *user_info,
01295 phys_addr_t src_addr, size_t len,
01296 int prot, void **pptr,
01297 struct vm_operations_struct *vm_ops,
01298 void *vm_private_data);
01299 int rtdm_munmap(rtdm_user_info_t *user_info, void *ptr, size_t len);
01300
01301 static inline int rtdm_read_user_ok(rtdm_user_info_t *user_info,
01302 const void __user *ptr, size_t size)
01303 {
01304 return access_rok(ptr, size);
01305 }
01306
01307 static inline int rtdm_rw_user_ok(rtdm_user_info_t *user_info,
01308 const void __user *ptr, size_t size)
01309 {
01310 return access_wok(ptr, size);
01311 }
01312
01313 static inline int rtdm_copy_from_user(rtdm_user_info_t *user_info,
01314 void *dst, const void __user *src,
01315 size_t size)
01316 {
01317 return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
01318 }
01319
01320 static inline int rtdm_safe_copy_from_user(rtdm_user_info_t *user_info,
01321 void *dst, const void __user *src,
01322 size_t size)
01323 {
01324 return (!access_rok(src, size) ||
01325 __xn_copy_from_user(dst, src, size)) ? -EFAULT : 0;
01326 }
01327
01328 static inline int rtdm_copy_to_user(rtdm_user_info_t *user_info,
01329 void __user *dst, const void *src,
01330 size_t size)
01331 {
01332 return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
01333 }
01334
01335 static inline int rtdm_safe_copy_to_user(rtdm_user_info_t *user_info,
01336 void __user *dst, const void *src,
01337 size_t size)
01338 {
01339 return (!access_wok(dst, size) ||
01340 __xn_copy_to_user(dst, src, size)) ? -EFAULT : 0;
01341 }
01342
01343 static inline int rtdm_strncpy_from_user(rtdm_user_info_t *user_info,
01344 char *dst,
01345 const char __user *src, size_t count)
01346 {
01347 if (unlikely(!access_rok(src, 1)))
01348 return -EFAULT;
01349 return __xn_strncpy_from_user(dst, src, count);
01350 }
01351
01352 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01353 {
01354 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01355
01356 return (user_info ? xnshadow_thread(user_info) != NULL
01357 : !xnpod_root_p());
01358 }
01359
01360 #else
01361
01362 #define rtdm_mmap_to_user(...) ({ -ENOSYS; })
01363 #define rtdm_munmap(...) ({ -ENOSYS; })
01364 #define rtdm_read_user_ok(...) ({ 0; })
01365 #define rtdm_rw_user_ok(...) ({ 0; })
01366 #define rtdm_copy_from_user(...) ({ -ENOSYS; })
01367 #define rtdm_safe_copy_from_user(...) ({ -ENOSYS; })
01368 #define rtdm_copy_to_user(...) ({ -ENOSYS; })
01369 #define rtdm_safe_copy_to_user(...) ({ -ENOSYS; })
01370 #define rtdm_strncpy_from_user(...) ({ -ENOSYS; })
01371
01372 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01373 {
01374 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01375
01376 return !xnpod_root_p();
01377 }
01378
01379 #endif
01380
01381 static inline int rtdm_in_rt_context(void)
01382 {
01383 return (rthal_current_domain != rthal_root_domain);
01384 }
01385
01386 #endif
01387
01388 int rtdm_exec_in_rt(struct rtdm_dev_context *context,
01389 rtdm_user_info_t *user_info, void *arg,
01390 rtdm_rt_handler_t handler);
01391
01392 #endif