#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
+/* sched_attr is not defined in glibc */
+struct sched_attr {
+ uint32_t size;
+ uint32_t sched_policy;
+ uint64_t sched_flags;
+ int32_t sched_nice;
+ uint32_t sched_priority;
+ uint64_t sched_runtime;
+ uint64_t sched_deadline;
+ uint64_t sched_period;
+ uint32_t sched_util_min;
+ uint32_t sched_util_max;
+};
+#define __NR_sys_sched_getattr __NR_sched_getattr
+_syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, size, unsigned int, flags);
+#define __NR_sys_sched_setattr __NR_sched_setattr
+_syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, flags);
#define __NR_sys_getcpu __NR_getcpu
_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
return strerror(target_to_host_errno(err));
}
+static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
+{
+ int i;
+ uint8_t b;
+ if (usize <= ksize) {
+ return 1;
+ }
+ for (i = ksize; i < usize; i++) {
+ if (get_user_u8(b, addr + i)) {
+ return -TARGET_EFAULT;
+ }
+ if (b != 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
#define safe_syscall0(type, name) \
static type safe_##name(void) \
{ \
}
case TARGET_NR_sched_getscheduler:
return get_errno(sched_getscheduler(arg1));
+ case TARGET_NR_sched_getattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (arg3 > sizeof(scha)) {
+ arg3 = sizeof(scha);
+ }
+ ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
+ if (!is_error(ret)) {
+ target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ target_scha->size = tswap32(scha.size);
+ target_scha->sched_policy = tswap32(scha.sched_policy);
+ target_scha->sched_flags = tswap64(scha.sched_flags);
+ target_scha->sched_nice = tswap32(scha.sched_nice);
+ target_scha->sched_priority = tswap32(scha.sched_priority);
+ target_scha->sched_runtime = tswap64(scha.sched_runtime);
+ target_scha->sched_deadline = tswap64(scha.sched_deadline);
+ target_scha->sched_period = tswap64(scha.sched_period);
+ if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
+ target_scha->sched_util_min = tswap32(scha.sched_util_min);
+ target_scha->sched_util_max = tswap32(scha.sched_util_max);
+ }
+ unlock_user(target_scha, arg2, arg3);
+ }
+ return ret;
+ }
+ case TARGET_NR_sched_setattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ uint32_t size;
+ int zeroed;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (get_user_u32(size, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ if (!size) {
+ size = offsetof(struct target_sched_attr, sched_util_min);
+ }
+ if (size < offsetof(struct target_sched_attr, sched_util_min)) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+
+ zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
+ if (zeroed < 0) {
+ return zeroed;
+ } else if (zeroed == 0) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+ if (size > sizeof(struct target_sched_attr)) {
+ size = sizeof(struct target_sched_attr);
+ }
+
+ target_scha = lock_user(VERIFY_READ, arg2, size, 1);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ scha.size = size;
+ scha.sched_policy = tswap32(target_scha->sched_policy);
+ scha.sched_flags = tswap64(target_scha->sched_flags);
+ scha.sched_nice = tswap32(target_scha->sched_nice);
+ scha.sched_priority = tswap32(target_scha->sched_priority);
+ scha.sched_runtime = tswap64(target_scha->sched_runtime);
+ scha.sched_deadline = tswap64(target_scha->sched_deadline);
+ scha.sched_period = tswap64(target_scha->sched_period);
+ if (size > offsetof(struct target_sched_attr, sched_util_min)) {
+ scha.sched_util_min = tswap32(target_scha->sched_util_min);
+ scha.sched_util_max = tswap32(target_scha->sched_util_max);
+ }
+ unlock_user(target_scha, arg2, 0);
+ return get_errno(sys_sched_setattr(arg1, &scha, arg3));
+ }
case TARGET_NR_sched_yield:
return get_errno(sched_yield());
case TARGET_NR_sched_get_priority_max: