Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (36 commits)
  [S390] Remove code duplication from monreader / dcssblk.
  [S390] kernel: show last breaking-event-address on oops
  [S390] lowcore: Change type of lowcores softirq_pending to __u32.
  [S390] zcrypt: Comments and kernel-doc cleanup
  [S390] uaccess: Always access the correct address space.
  [S390] Fix a lot of sparse warnings.
  [S390] Convert s390 to GENERIC_CLOCKEVENTS.
  [S390] genirq/clockevents: move irq affinity prototypes/inlines to interrupt.h
  [S390] Convert monitor calls to function calls.
  [S390] qdio (new feature): enhancing info-retrieval from QDIO-adapters
  [S390] replace remaining __FUNCTION__ occurrences
  [S390] remove redundant display of free swap space in show_mem()
  [S390] qdio: remove outdated developerworks link.
  [S390] Add debug_register_mode() function to debug feature API
  [S390] crypto: use more descriptive function names for init/exit routines.
  [S390] switch sched_clock to store-clock-extended.
  [S390] zcrypt: add support for large random numbers
  [S390] hw_random: allow rng_dev_read() to return hardware errors.
  [S390] Vertical cpu management.
  [S390] cpu topology support for s390.
  ...
This commit is contained in:
Linus Torvalds 2008-04-18 08:19:15 -07:00
commit 4cba84b5d6
95 changed files with 2180 additions and 1116 deletions

View file

@ -115,6 +115,27 @@ Return Value: Handle for generated debug area
Description: Allocates memory for a debug log Description: Allocates memory for a debug log
Must not be called within an interrupt handler Must not be called within an interrupt handler
----------------------------------------------------------------------------
debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
int buf_size, mode_t mode, uid_t uid,
gid_t gid);
Parameter: name: Name of debug log (e.g. used for debugfs entry)
pages: Number of pages, which will be allocated per area
nr_areas: Number of debug areas
buf_size: Size of data area in each debug entry
mode: File mode for debugfs files. E.g. S_IRWXUGO
uid: User ID for debugfs files. Currently only 0 is
supported.
gid: Group ID for debugfs files. Currently only 0 is
supported.
Return Value: Handle for generated debug area
NULL if register failed
Description: Allocates memory for a debug log
Must not be called within an interrupt handler
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
void debug_unregister (debug_info_t * id); void debug_unregister (debug_info_t * id);

View file

@ -3,6 +3,10 @@
# see Documentation/kbuild/kconfig-language.txt. # see Documentation/kbuild/kconfig-language.txt.
# #
config SCHED_MC
def_bool y
depends on SMP
config MMU config MMU
def_bool y def_bool y
@ -39,6 +43,9 @@ config GENERIC_HWEIGHT
config GENERIC_TIME config GENERIC_TIME
def_bool y def_bool y
config GENERIC_CLOCKEVENTS
def_bool y
config GENERIC_BUG config GENERIC_BUG
bool bool
depends on BUG depends on BUG
@ -69,6 +76,8 @@ menu "Base setup"
comment "Processor type and features" comment "Processor type and features"
source "kernel/time/Kconfig"
config 64BIT config 64BIT
bool "64 bit kernel" bool "64 bit kernel"
help help
@ -301,10 +310,7 @@ config QDIO
tristate "QDIO support" tristate "QDIO support"
---help--- ---help---
This driver provides the Queued Direct I/O base support for This driver provides the Queued Direct I/O base support for
IBM mainframes. IBM System z.
For details please refer to the documentation provided by IBM at
<http://www10.software.ibm.com/developerworks/opensource/linux390>
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called qdio. module will be called qdio.
@ -486,25 +492,6 @@ config APPLDATA_NET_SUM
source kernel/Kconfig.hz source kernel/Kconfig.hz
config NO_IDLE_HZ
bool "No HZ timer ticks in idle"
help
Switches the regular HZ timer off when the system is going idle.
This helps z/VM to detect that the Linux system is idle. VM can
then "swap-out" this guest which reduces memory usage. It also
reduces the overhead of idle systems.
The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer.
hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ
timer is active.
config NO_IDLE_HZ_INIT
bool "HZ timer in idle off by default"
depends on NO_IDLE_HZ
help
The HZ timer is switched off in idle by default. That means the
HZ timer is already disabled at boot time.
config S390_HYPFS_FS config S390_HYPFS_FS
bool "s390 hypervisor file system support" bool "s390 hypervisor file system support"
select SYS_HYPERVISOR select SYS_HYPERVISOR

View file

@ -499,7 +499,7 @@ static struct crypto_alg cbc_aes_alg = {
} }
}; };
static int __init aes_init(void) static int __init aes_s390_init(void)
{ {
int ret; int ret;
@ -542,15 +542,15 @@ aes_err:
goto out; goto out;
} }
static void __exit aes_fini(void) static void __exit aes_s390_fini(void)
{ {
crypto_unregister_alg(&cbc_aes_alg); crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg); crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg); crypto_unregister_alg(&aes_alg);
} }
module_init(aes_init); module_init(aes_s390_init);
module_exit(aes_fini); module_exit(aes_s390_fini);
MODULE_ALIAS("aes"); MODULE_ALIAS("aes");

View file

@ -550,7 +550,7 @@ static struct crypto_alg cbc_des3_192_alg = {
} }
}; };
static int init(void) static int des_s390_init(void)
{ {
int ret = 0; int ret = 0;
@ -612,7 +612,7 @@ des_err:
goto out; goto out;
} }
static void __exit fini(void) static void __exit des_s390_fini(void)
{ {
crypto_unregister_alg(&cbc_des3_192_alg); crypto_unregister_alg(&cbc_des3_192_alg);
crypto_unregister_alg(&ecb_des3_192_alg); crypto_unregister_alg(&ecb_des3_192_alg);
@ -625,8 +625,8 @@ static void __exit fini(void)
crypto_unregister_alg(&des_alg); crypto_unregister_alg(&des_alg);
} }
module_init(init); module_init(des_s390_init);
module_exit(fini); module_exit(des_s390_fini);
MODULE_ALIAS("des"); MODULE_ALIAS("des");
MODULE_ALIAS("des3_ede"); MODULE_ALIAS("des3_ede");

View file

@ -137,7 +137,7 @@ static struct crypto_alg alg = {
.dia_final = sha1_final } } .dia_final = sha1_final } }
}; };
static int __init init(void) static int __init sha1_s390_init(void)
{ {
if (!crypt_s390_func_available(KIMD_SHA_1)) if (!crypt_s390_func_available(KIMD_SHA_1))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -145,13 +145,13 @@ static int __init init(void)
return crypto_register_alg(&alg); return crypto_register_alg(&alg);
} }
static void __exit fini(void) static void __exit sha1_s390_fini(void)
{ {
crypto_unregister_alg(&alg); crypto_unregister_alg(&alg);
} }
module_init(init); module_init(sha1_s390_init);
module_exit(fini); module_exit(sha1_s390_fini);
MODULE_ALIAS("sha1"); MODULE_ALIAS("sha1");

View file

@ -133,7 +133,7 @@ static struct crypto_alg alg = {
.dia_final = sha256_final } } .dia_final = sha256_final } }
}; };
static int init(void) static int sha256_s390_init(void)
{ {
if (!crypt_s390_func_available(KIMD_SHA_256)) if (!crypt_s390_func_available(KIMD_SHA_256))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -141,13 +141,13 @@ static int init(void)
return crypto_register_alg(&alg); return crypto_register_alg(&alg);
} }
static void __exit fini(void) static void __exit sha256_s390_fini(void)
{ {
crypto_unregister_alg(&alg); crypto_unregister_alg(&alg);
} }
module_init(init); module_init(sha256_s390_init);
module_exit(fini); module_exit(sha256_s390_fini);
MODULE_ALIAS("sha256"); MODULE_ALIAS("sha256");

View file

@ -3,6 +3,7 @@
# Linux kernel version: 2.6.25-rc4 # Linux kernel version: 2.6.25-rc4
# Wed Mar 5 11:22:59 2008 # Wed Mar 5 11:22:59 2008
# #
CONFIG_SCHED_MC=y
CONFIG_MMU=y CONFIG_MMU=y
CONFIG_ZONE_DMA=y CONFIG_ZONE_DMA=y
CONFIG_LOCKDEP_SUPPORT=y CONFIG_LOCKDEP_SUPPORT=y

View file

@ -19,7 +19,7 @@ obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
extra-y += head.o init_task.o vmlinux.lds extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o topology.o
obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o

View file

@ -162,4 +162,77 @@ struct ucontext32 {
compat_sigset_t uc_sigmask; /* mask last for extensibility */ compat_sigset_t uc_sigmask; /* mask last for extensibility */
}; };
struct __sysctl_args32;
struct stat64_emu31;
struct mmap_arg_struct_emu31;
struct fadvise64_64_args;
struct old_sigaction32;
struct old_sigaction32;
long sys32_chown16(const char __user * filename, u16 user, u16 group);
long sys32_lchown16(const char __user * filename, u16 user, u16 group);
long sys32_fchown16(unsigned int fd, u16 user, u16 group);
long sys32_setregid16(u16 rgid, u16 egid);
long sys32_setgid16(u16 gid);
long sys32_setreuid16(u16 ruid, u16 euid);
long sys32_setuid16(u16 uid);
long sys32_setresuid16(u16 ruid, u16 euid, u16 suid);
long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid);
long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
long sys32_setfsuid16(u16 uid);
long sys32_setfsgid16(u16 gid);
long sys32_getgroups16(int gidsetsize, u16 __user *grouplist);
long sys32_setgroups16(int gidsetsize, u16 __user *grouplist);
long sys32_getuid16(void);
long sys32_geteuid16(void);
long sys32_getgid16(void);
long sys32_getegid16(void);
long sys32_ipc(u32 call, int first, int second, int third, u32 ptr);
long sys32_truncate64(const char __user * path, unsigned long high,
unsigned long low);
long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
long sys32_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
compat_sigset_t __user *oset, size_t sigsetsize);
long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
long sys32_execve(void);
long sys32_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
long sys32_delete_module(const char __user *name_user, unsigned int flags);
long sys32_gettimeofday(struct compat_timeval __user *tv,
struct timezone __user *tz);
long sys32_settimeofday(struct compat_timeval __user *tv,
struct timezone __user *tz);
long sys32_pause(void);
long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
u32 poshi, u32 poslo);
long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
size_t count, u32 poshi, u32 poslo);
compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
size_t count);
long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset,
s32 count);
long sys32_sysctl(struct __sysctl_args32 __user *args);
long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf);
long sys32_lstat64(char __user * filename,
struct stat64_emu31 __user * statbuf);
long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf);
long sys32_fstatat64(unsigned int dfd, char __user *filename,
struct stat64_emu31 __user* statbuf, int flag);
unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
long sys32_read(unsigned int fd, char __user * buf, size_t count);
long sys32_write(unsigned int fd, char __user * buf, size_t count);
long sys32_clone(void);
long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
long sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
struct old_sigaction32 __user *oact);
long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
struct sigaction32 __user *oact, size_t sigsetsize);
long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss);
#endif /* _ASM_S390X_S390_H */ #endif /* _ASM_S390X_S390_H */

View file

@ -29,6 +29,7 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include "compat_linux.h" #include "compat_linux.h"
#include "compat_ptrace.h" #include "compat_ptrace.h"
#include "entry.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@ -428,6 +429,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
/* Default to using normal stack */ /* Default to using normal stack */
sp = (unsigned long) A(regs->gprs[15]); sp = (unsigned long) A(regs->gprs[15]);
/* Overflow on alternate signal stack gives SIGSEGV. */
if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
return (void __user *) -1UL;
/* This is the X/Open sanctioned signal stack switching. */ /* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) { if (ka->sa.sa_flags & SA_ONSTACK) {
if (! sas_ss_flags(sp)) if (! sas_ss_flags(sp))
@ -461,6 +466,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
goto give_sigsegv; goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
goto give_sigsegv; goto give_sigsegv;
@ -514,6 +522,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
goto give_sigsegv; goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (copy_siginfo_to_user32(&frame->info, info)) if (copy_siginfo_to_user32(&frame->info, info))
goto give_sigsegv; goto give_sigsegv;

View file

@ -73,7 +73,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
static int debug_open(struct inode *inode, struct file *file); static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file); static int debug_close(struct inode *inode, struct file *file);
static debug_info_t* debug_info_create(char *name, int pages_per_area, static debug_info_t* debug_info_create(char *name, int pages_per_area,
int nr_areas, int buf_size); int nr_areas, int buf_size, mode_t mode);
static void debug_info_get(debug_info_t *); static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *); static void debug_info_put(debug_info_t *);
static int debug_prolog_level_fn(debug_info_t * id, static int debug_prolog_level_fn(debug_info_t * id,
@ -157,7 +157,7 @@ struct debug_view debug_sprintf_view = {
}; };
/* used by dump analysis tools to determine version of debug feature */ /* used by dump analysis tools to determine version of debug feature */
unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
/* static globals */ /* static globals */
@ -327,7 +327,8 @@ debug_info_free(debug_info_t* db_info){
*/ */
static debug_info_t* static debug_info_t*
debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size) debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size,
mode_t mode)
{ {
debug_info_t* rc; debug_info_t* rc;
@ -336,6 +337,8 @@ debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size)
if(!rc) if(!rc)
goto out; goto out;
rc->mode = mode & ~S_IFMT;
/* create root directory */ /* create root directory */
rc->debugfs_root_entry = debugfs_create_dir(rc->name, rc->debugfs_root_entry = debugfs_create_dir(rc->name,
debug_debugfs_root_entry); debug_debugfs_root_entry);
@ -676,23 +679,30 @@ debug_close(struct inode *inode, struct file *file)
} }
/* /*
* debug_register: * debug_register_mode:
* - creates and initializes debug area for the caller * - Creates and initializes debug area for the caller
* - returns handle for debug area * The mode parameter allows to specify access rights for the s390dbf files
* - Returns handle for debug area
*/ */
debug_info_t* debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas,
debug_register (char *name, int pages_per_area, int nr_areas, int buf_size) int buf_size, mode_t mode, uid_t uid,
gid_t gid)
{ {
debug_info_t *rc = NULL; debug_info_t *rc = NULL;
/* Since debugfs currently does not support uid/gid other than root, */
/* we do not allow gid/uid != 0 until we get support for that. */
if ((uid != 0) || (gid != 0))
printk(KERN_WARNING "debug: Warning - Currently only uid/gid "
"= 0 are supported. Using root as owner now!");
if (!initialized) if (!initialized)
BUG(); BUG();
mutex_lock(&debug_mutex); mutex_lock(&debug_mutex);
/* create new debug_info */ /* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size); rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
if(!rc) if(!rc)
goto out; goto out;
debug_register_view(rc, &debug_level_view); debug_register_view(rc, &debug_level_view);
@ -705,6 +715,20 @@ out:
mutex_unlock(&debug_mutex); mutex_unlock(&debug_mutex);
return rc; return rc;
} }
EXPORT_SYMBOL(debug_register_mode);
/*
* debug_register:
* - creates and initializes debug area for the caller
* - returns handle for debug area
*/
debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas,
int buf_size)
{
return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
S_IRUSR | S_IWUSR, 0, 0);
}
/* /*
* debug_unregister: * debug_unregister:
@ -1073,15 +1097,16 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
int rc = 0; int rc = 0;
int i; int i;
unsigned long flags; unsigned long flags;
mode_t mode = S_IFREG; mode_t mode;
struct dentry *pde; struct dentry *pde;
if (!id) if (!id)
goto out; goto out;
if (view->prolog_proc || view->format_proc || view->header_proc) mode = (id->mode | S_IFREG) & ~S_IXUGO;
mode |= S_IRUSR; if (!(view->prolog_proc || view->format_proc || view->header_proc))
if (view->input_proc) mode &= ~(S_IRUSR | S_IRGRP | S_IROTH);
mode |= S_IWUSR; if (!view->input_proc)
mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
id , &debug_file_ops); id , &debug_file_ops);
if (!pde){ if (!pde){

View file

@ -21,6 +21,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/cpcmd.h> #include <asm/cpcmd.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include "entry.h"
/* /*
* Create a Kernel NSS if the SAVESYS= parameter is defined * Create a Kernel NSS if the SAVESYS= parameter is defined

60
arch/s390/kernel/entry.h Normal file
View file

@ -0,0 +1,60 @@
#ifndef _ENTRY_H
#define _ENTRY_H
#include <linux/types.h>
#include <linux/signal.h>
#include <asm/ptrace.h>
typedef void pgm_check_handler_t(struct pt_regs *, long);
extern pgm_check_handler_t *pgm_check_table[128];
pgm_check_handler_t do_protection_exception;
pgm_check_handler_t do_dat_exception;
extern int sysctl_userprocess_debug;
void do_single_step(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs);
int handle_signal32(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
void do_extint(struct pt_regs *regs, unsigned short code);
int __cpuinit start_secondary(void *cpuvoid);
void __init startup_init(void);
void die(const char * str, struct pt_regs * regs, long err);
struct new_utsname;
struct mmap_arg_struct;
struct fadvise64_64_args;
struct old_sigaction;
struct sel_arg_struct;
long sys_pipe(unsigned long __user *fildes);
long sys_mmap2(struct mmap_arg_struct __user *arg);
long old_mmap(struct mmap_arg_struct __user *arg);
long sys_ipc(uint call, int first, unsigned long second,
unsigned long third, void __user *ptr);
long s390x_newuname(struct new_utsname __user *name);
long s390x_personality(unsigned long personality);
long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
size_t len, int advice);
long s390_fadvise64_64(struct fadvise64_64_args __user *args);
long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
long sys_fork(void);
long sys_clone(void);
long sys_vfork(void);
void execve_tail(void);
long sys_execve(void);
int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact);
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
long sys_sigreturn(void);
long sys_rt_sigreturn(void);
long sys32_sigreturn(void);
long sys32_rt_sigreturn(void);
long old_select(struct sel_arg_struct __user *arg);
long sys_ptrace(long request, long pid, long addr, long data);
#endif /* _ENTRY_H */

View file

@ -475,6 +475,7 @@ pgm_check_handler:
pgm_no_vtime: pgm_no_vtime:
#endif #endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
TRACE_IRQS_OFF TRACE_IRQS_OFF
lgf %r3,__LC_PGM_ILC # load program interruption code lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f lghi %r8,0x7f
@ -847,6 +848,7 @@ stack_overflow:
je 0f je 0f
la %r1,__LC_SAVE_AREA+32 la %r1,__LC_SAVE_AREA+32
0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
jg kernel_stack_overflow jg kernel_stack_overflow

View file

@ -655,7 +655,7 @@ static struct kobj_attribute reipl_type_attr =
static struct kset *reipl_kset; static struct kset *reipl_kset;
void reipl_run(struct shutdown_trigger *trigger) static void reipl_run(struct shutdown_trigger *trigger)
{ {
struct ccw_dev_id devid; struct ccw_dev_id devid;
static char buf[100]; static char buf[100];

View file

@ -360,7 +360,7 @@ no_kprobe:
* - When the probed function returns, this probe * - When the probed function returns, this probe
* causes the handlers to fire * causes the handlers to fire
*/ */
void kretprobe_trampoline_holder(void) static void __used kretprobe_trampoline_holder(void)
{ {
asm volatile(".global kretprobe_trampoline\n" asm volatile(".global kretprobe_trampoline\n"
"kretprobe_trampoline: bcr 0,0\n"); "kretprobe_trampoline: bcr 0,0\n");

View file

@ -36,6 +36,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/tick.h>
#include <linux/elfcore.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
@ -44,6 +46,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@ -76,6 +79,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
* Need to know about CPUs going idle? * Need to know about CPUs going idle?
*/ */
static ATOMIC_NOTIFIER_HEAD(idle_chain); static ATOMIC_NOTIFIER_HEAD(idle_chain);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
int register_idle_notifier(struct notifier_block *nb) int register_idle_notifier(struct notifier_block *nb)
{ {
@ -89,9 +93,33 @@ int unregister_idle_notifier(struct notifier_block *nb)
} }
EXPORT_SYMBOL(unregister_idle_notifier); EXPORT_SYMBOL(unregister_idle_notifier);
void do_monitor_call(struct pt_regs *regs, long interruption_code) static int s390_idle_enter(void)
{
struct s390_idle_data *idle;
int nr_calls = 0;
void *hcpu;
int rc;
hcpu = (void *)(long)smp_processor_id();
rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
&nr_calls);
if (rc == NOTIFY_BAD) {
nr_calls--;
__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu, nr_calls, NULL);
return rc;
}
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
return NOTIFY_OK;
}
void s390_idle_leave(void)
{ {
#ifdef CONFIG_SMP
struct s390_idle_data *idle; struct s390_idle_data *idle;
idle = &__get_cpu_var(s390_idle); idle = &__get_cpu_var(s390_idle);
@ -99,10 +127,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
idle->idle_time += get_clock() - idle->idle_enter; idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0; idle->in_idle = 0;
spin_unlock(&idle->lock); spin_unlock(&idle->lock);
#endif
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
(void *)(long) smp_processor_id()); (void *)(long) smp_processor_id());
} }
@ -113,61 +137,30 @@ extern void s390_handle_mcck(void);
*/ */
static void default_idle(void) static void default_idle(void)
{ {
int cpu, rc;
int nr_calls = 0;
void *hcpu;
#ifdef CONFIG_SMP
struct s390_idle_data *idle;
#endif
/* CPU is going idle. */ /* CPU is going idle. */
cpu = smp_processor_id();
hcpu = (void *)(long)cpu;
local_irq_disable(); local_irq_disable();
if (need_resched()) { if (need_resched()) {
local_irq_enable(); local_irq_enable();
return; return;
} }
if (s390_idle_enter() == NOTIFY_BAD) {
rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
&nr_calls);
if (rc == NOTIFY_BAD) {
nr_calls--;
__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu, nr_calls, NULL);
local_irq_enable(); local_irq_enable();
return; return;
} }
/* enable monitor call class 0 */
__ctl_set_bit(8, 15);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(cpu)) { if (cpu_is_offline(smp_processor_id())) {
preempt_enable_no_resched(); preempt_enable_no_resched();
cpu_die(); cpu_die();
} }
#endif #endif
local_mcck_disable(); local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) { if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable(); local_mcck_enable();
/* disable monitor call class 0 */ s390_idle_leave();
__ctl_clear_bit(8, 15);
atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu);
local_irq_enable(); local_irq_enable();
s390_handle_mcck(); s390_handle_mcck();
return; return;
} }
#ifdef CONFIG_SMP
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
#endif
trace_hardirqs_on(); trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */ /* Wait for external, I/O or machine check interrupt. */
__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
@ -177,9 +170,10 @@ static void default_idle(void)
void cpu_idle(void) void cpu_idle(void)
{ {
for (;;) { for (;;) {
tick_nohz_stop_sched_tick();
while (!need_resched()) while (!need_resched())
default_idle(); default_idle();
tick_nohz_restart_sched_tick();
preempt_enable_no_resched(); preempt_enable_no_resched();
schedule(); schedule();
preempt_disable(); preempt_disable();
@ -201,6 +195,7 @@ void show_regs(struct pt_regs *regs)
/* Show stack backtrace if pt_regs is from kernel mode */ /* Show stack backtrace if pt_regs is from kernel mode */
if (!(regs->psw.mask & PSW_MASK_PSTATE)) if (!(regs->psw.mask & PSW_MASK_PSTATE))
show_trace(NULL, (unsigned long *) regs->gprs[15]); show_trace(NULL, (unsigned long *) regs->gprs[15]);
show_last_breaking_event(regs);
} }
extern void kernel_thread_starter(void); extern void kernel_thread_starter(void);

View file

@ -41,6 +41,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include "entry.h"
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#include "compat_ptrace.h" #include "compat_ptrace.h"

View file

@ -13,11 +13,12 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/cpu.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "entry.h"
/* /*
* ext_int_hash[index] is the start of the list for all external interrupts * ext_int_hash[index] is the start of the list for all external interrupts
@ -119,13 +120,10 @@ void do_extint(struct pt_regs *regs, unsigned short code)
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
asm volatile ("mc 0,0"); s390_idle_check();
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/** /* Serve timer interrupts first. */
* Make sure that the i/o interrupt did not "overtake" clock_comparator_work();
* the last HZ timer interrupt.
*/
account_ticks(S390_lowcore.int_clock);
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
index = ext_hash(code); index = ext_hash(code);
for (p = ext_int_hash[index]; p; p = p->next) { for (p = ext_int_hash[index]; p; p = p->next) {

View file

@ -39,6 +39,7 @@
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/topology.h>
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
@ -427,7 +428,7 @@ setup_lowcore(void)
lc->io_new_psw.mask = psw_kernel_bits; lc->io_new_psw.mask = psw_kernel_bits;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->ipl_device = S390_lowcore.ipl_device; lc->ipl_device = S390_lowcore.ipl_device;
lc->jiffy_timer = -1LL; lc->clock_comparator = -1ULL;
lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
lc->async_stack = (unsigned long) lc->async_stack = (unsigned long)
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
@ -687,7 +688,7 @@ static __init unsigned int stfl(void)
return S390_lowcore.stfl_fac_list; return S390_lowcore.stfl_fac_list;
} }
static __init int stfle(unsigned long long *list, int doublewords) static int __init __stfle(unsigned long long *list, int doublewords)
{ {
typedef struct { unsigned long long _[doublewords]; } addrtype; typedef struct { unsigned long long _[doublewords]; } addrtype;
register unsigned long __nr asm("0") = doublewords - 1; register unsigned long __nr asm("0") = doublewords - 1;
@ -697,6 +698,13 @@ static __init int stfle(unsigned long long *list, int doublewords)
return __nr + 1; return __nr + 1;
} }
int __init stfle(unsigned long long *list, int doublewords)
{
if (!(stfl() & (1UL << 24)))
return -EOPNOTSUPP;
return __stfle(list, doublewords);
}
/* /*
* Setup hardware capabilities. * Setup hardware capabilities.
*/ */
@ -741,7 +749,7 @@ static void __init setup_hwcaps(void)
* HWCAP_S390_DFP bit 6. * HWCAP_S390_DFP bit 6.
*/ */
if ((elf_hwcap & (1UL << 2)) && if ((elf_hwcap & (1UL << 2)) &&
stfle(&facility_list_extended, 1) > 0) { __stfle(&facility_list_extended, 1) > 0) {
if (facility_list_extended & (1ULL << (64 - 43))) if (facility_list_extended & (1ULL << (64 - 43)))
elf_hwcap |= 1UL << 6; elf_hwcap |= 1UL << 6;
} }
@ -823,6 +831,7 @@ setup_arch(char **cmdline_p)
cpu_init(); cpu_init();
__cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
s390_init_cpu_topology();
/* /*
* Setup capabilities (ELF_HWCAP & ELF_PLATFORM). * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).

View file

@ -27,6 +27,7 @@
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include "entry.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@ -235,6 +236,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
/* Default to using normal stack */ /* Default to using normal stack */
sp = regs->gprs[15]; sp = regs->gprs[15];
/* Overflow on alternate signal stack gives SIGSEGV. */
if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
return (void __user *) -1UL;
/* This is the X/Open sanctioned signal stack switching. */ /* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) { if (ka->sa.sa_flags & SA_ONSTACK) {
if (! sas_ss_flags(sp)) if (! sas_ss_flags(sp))
@ -270,6 +275,9 @@ static int setup_frame(int sig, struct k_sigaction *ka,
if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
goto give_sigsegv; goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
goto give_sigsegv; goto give_sigsegv;
@ -327,6 +335,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
goto give_sigsegv; goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (copy_siginfo_to_user(&frame->info, info)) if (copy_siginfo_to_user(&frame->info, info))
goto give_sigsegv; goto give_sigsegv;
@ -474,11 +485,6 @@ void do_signal(struct pt_regs *regs)
int ret; int ret;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_31BIT)) { if (test_thread_flag(TIF_31BIT)) {
extern int handle_signal32(unsigned long sig,
struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset,
struct pt_regs *regs);
ret = handle_signal32(signr, &ka, &info, oldset, regs); ret = handle_signal32(signr, &ka, &info, oldset, regs);
} }
else else

View file

@ -44,6 +44,7 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include "entry.h"
/* /*
* An array with a pointer the lowcore of every CPU. * An array with a pointer the lowcore of every CPU.
@ -67,13 +68,12 @@ enum s390_cpu_state {
CPU_STATE_CONFIGURED, CPU_STATE_CONFIGURED,
}; };
#ifdef CONFIG_HOTPLUG_CPU DEFINE_MUTEX(smp_cpu_state_mutex);
static DEFINE_MUTEX(smp_cpu_state_mutex); int smp_cpu_polarization[NR_CPUS];
#endif
static int smp_cpu_state[NR_CPUS]; static int smp_cpu_state[NR_CPUS];
static int cpu_management;
static DEFINE_PER_CPU(struct cpu, cpu_devices); static DEFINE_PER_CPU(struct cpu, cpu_devices);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static void smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall(int, ec_bit_sig);
@ -298,7 +298,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
/* /*
* this function sends a 'purge tlb' signal to another CPU. * this function sends a 'purge tlb' signal to another CPU.
*/ */
void smp_ptlb_callback(void *info) static void smp_ptlb_callback(void *info)
{ {
__tlb_flush_local(); __tlb_flush_local();
} }
@ -456,6 +456,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
if (cpu_known(cpu_id)) if (cpu_known(cpu_id))
continue; continue;
__cpu_logical_map[logical_cpu] = cpu_id; __cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
if (!cpu_stopped(logical_cpu)) if (!cpu_stopped(logical_cpu))
continue; continue;
cpu_set(logical_cpu, cpu_present_map); cpu_set(logical_cpu, cpu_present_map);
@ -489,6 +490,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
if (cpu_known(cpu_id)) if (cpu_known(cpu_id))
continue; continue;
__cpu_logical_map[logical_cpu] = cpu_id; __cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
cpu_set(logical_cpu, cpu_present_map); cpu_set(logical_cpu, cpu_present_map);
if (cpu >= info->configured) if (cpu >= info->configured)
smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
@ -846,6 +848,7 @@ void __init smp_prepare_boot_cpu(void)
S390_lowcore.percpu_offset = __per_cpu_offset[0]; S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current; current_set[0] = current;
smp_cpu_state[0] = CPU_STATE_CONFIGURED; smp_cpu_state[0] = CPU_STATE_CONFIGURED;
smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
} }
@ -897,15 +900,19 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
case 0: case 0:
if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
if (!rc) if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_STANDBY; smp_cpu_state[cpu] = CPU_STATE_STANDBY;
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
}
} }
break; break;
case 1: case 1:
if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
rc = sclp_cpu_configure(__cpu_logical_map[cpu]); rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
if (!rc) if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
}
} }
break; break;
default: default:
@ -919,6 +926,34 @@ out:
static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf)
{
int cpu = dev->id;
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
switch (smp_cpu_polarization[cpu]) {
case POLARIZATION_HRZ:
count = sprintf(buf, "horizontal\n");
break;
case POLARIZATION_VL:
count = sprintf(buf, "vertical:low\n");
break;
case POLARIZATION_VM:
count = sprintf(buf, "vertical:medium\n");
break;
case POLARIZATION_VH:
count = sprintf(buf, "vertical:high\n");
break;
default:
count = sprintf(buf, "unknown\n");
break;
}
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
static ssize_t show_cpu_address(struct sys_device *dev, char *buf) static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
{ {
return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
@ -931,6 +966,7 @@ static struct attribute *cpu_common_attrs[] = {
&attr_configure.attr, &attr_configure.attr,
#endif #endif
&attr_address.attr, &attr_address.attr,
&attr_polarization.attr,
NULL, NULL,
}; };
@ -1075,11 +1111,48 @@ static ssize_t __ref rescan_store(struct sys_device *dev,
out: out:
put_online_cpus(); put_online_cpus();
mutex_unlock(&smp_cpu_state_mutex); mutex_unlock(&smp_cpu_state_mutex);
if (!cpus_empty(newcpus))
topology_schedule_update();
return rc ? rc : count; return rc ? rc : count;
} }
static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
static ssize_t dispatching_show(struct sys_device *dev, char *buf)
{
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", cpu_management);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static ssize_t dispatching_store(struct sys_device *dev, const char *buf,
size_t count)
{
int val, rc;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
rc = 0;
mutex_lock(&smp_cpu_state_mutex);
get_online_cpus();
if (cpu_management == val)
goto out;
rc = topology_set_cpu_management(val);
if (!rc)
cpu_management = val;
out:
put_online_cpus();
mutex_unlock(&smp_cpu_state_mutex);
return rc ? rc : count;
}
static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
static int __init topology_init(void) static int __init topology_init(void)
{ {
int cpu; int cpu;
@ -1093,6 +1166,10 @@ static int __init topology_init(void)
if (rc) if (rc)
return rc; return rc;
#endif #endif
rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
&attr_dispatching.attr);
if (rc)
return rc;
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu); rc = smp_add_present_cpu(cpu);
if (rc) if (rc)

View file

@ -29,8 +29,8 @@
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/ipc.h> #include <linux/ipc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "entry.h"
/* /*
* sys_pipe() is the normal C calling standard for creating * sys_pipe() is the normal C calling standard for creating

View file

@ -30,7 +30,7 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
@ -39,6 +39,7 @@
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/etr.h> #include <asm/etr.h>
#include <asm/cio.h>
/* change this if you have some constant time drift */ /* change this if you have some constant time drift */
#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
@ -57,16 +58,16 @@
static ext_int_info_t ext_int_info_cc; static ext_int_info_t ext_int_info_cc;
static ext_int_info_t ext_int_etr_cc; static ext_int_info_t ext_int_etr_cc;
static u64 init_timer_cc;
static u64 jiffies_timer_cc; static u64 jiffies_timer_cc;
static u64 xtime_cc;
static DEFINE_PER_CPU(struct clock_event_device, comparators);
/* /*
* Scheduler clock - returns current time in nanosec units. * Scheduler clock - returns current time in nanosec units.
*/ */
unsigned long long sched_clock(void) unsigned long long sched_clock(void)
{ {
return ((get_clock() - jiffies_timer_cc) * 125) >> 9; return ((get_clock_xt() - jiffies_timer_cc) * 125) >> 9;
} }
/* /*
@ -95,162 +96,40 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
#define s390_do_profile() do { ; } while(0) #define s390_do_profile() do { ; } while(0)
#endif /* CONFIG_PROFILING */ #endif /* CONFIG_PROFILING */
/* void clock_comparator_work(void)
* Advance the per cpu tick counter up to the time given with the
* "time" argument. The per cpu update consists of accounting
* the virtual cpu time, calling update_process_times and calling
* the profiling hook. If xtime is before time it is advanced as well.
*/
void account_ticks(u64 time)
{ {
__u32 ticks; struct clock_event_device *cd;
__u64 tmp;
/* Calculate how many ticks have passed. */
if (time < S390_lowcore.jiffy_timer)
return;
tmp = time - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
S390_lowcore.jiffy_timer +=
CLK_TICKS_PER_JIFFY * (__u64) ticks;
} else if (tmp >= CLK_TICKS_PER_JIFFY) {
ticks = 2;
S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
} else {
ticks = 1;
S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
}
#ifdef CONFIG_SMP
/*
* Do not rely on the boot cpu to do the calls to do_timer.
* Spread it over all cpus instead.
*/
write_seqlock(&xtime_lock);
if (S390_lowcore.jiffy_timer > xtime_cc) {
__u32 xticks;
tmp = S390_lowcore.jiffy_timer - xtime_cc;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
} else {
xticks = 1;
xtime_cc += CLK_TICKS_PER_JIFFY;
}
do_timer(xticks);
}
write_sequnlock(&xtime_lock);
#else
do_timer(ticks);
#endif
while (ticks--)
update_process_times(user_mode(get_irq_regs()));
S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
cd = &__get_cpu_var(comparators);
cd->event_handler(cd);
s390_do_profile(); s390_do_profile();
} }
#ifdef CONFIG_NO_IDLE_HZ
#ifdef CONFIG_NO_IDLE_HZ_INIT
int sysctl_hz_timer = 0;
#else
int sysctl_hz_timer = 1;
#endif
/* /*
* Stop the HZ tick on the current CPU. * Fixup the clock comparator.
* Only cpu_idle may call this function.
*/ */
static void stop_hz_timer(void) static void fixup_clock_comparator(unsigned long long delta)
{ {
unsigned long flags; /* If nobody is waiting there's nothing to fix. */
unsigned long seq, next; if (S390_lowcore.clock_comparator == -1ULL)
__u64 timer, todval;
int cpu = smp_processor_id();
if (sysctl_hz_timer != 0)
return; return;
S390_lowcore.clock_comparator += delta;
cpu_set(cpu, nohz_cpu_mask); set_clock_comparator(S390_lowcore.clock_comparator);
/*
* Leave the clock comparator set up for the next timer
* tick if either rcu or a softirq is pending.
*/
if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
cpu_clear(cpu, nohz_cpu_mask);
return;
}
/*
* This cpu is going really idle. Set up the clock comparator
* for the next event.
*/
next = next_timer_interrupt();
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
todval = -1ULL;
/* Be careful about overflows. */
if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
if (timer >= jiffies_timer_cc)
todval = timer;
}
set_clock_comparator(todval);
} }
/* static int s390_next_event(unsigned long delta,
* Start the HZ tick on the current CPU. struct clock_event_device *evt)
* Only cpu_idle may call this function.
*/
static void start_hz_timer(void)
{ {
if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) S390_lowcore.clock_comparator = get_clock() + delta;
return; set_clock_comparator(S390_lowcore.clock_comparator);
account_ticks(get_clock()); return 0;
set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
cpu_clear(smp_processor_id(), nohz_cpu_mask);
} }
static int nohz_idle_notify(struct notifier_block *self, static void s390_set_mode(enum clock_event_mode mode,
unsigned long action, void *hcpu) struct clock_event_device *evt)
{ {
switch (action) {
case S390_CPU_IDLE:
stop_hz_timer();
break;
case S390_CPU_NOT_IDLE:
start_hz_timer();
break;
}
return NOTIFY_OK;
}
static struct notifier_block nohz_idle_nb = {
.notifier_call = nohz_idle_notify,
};
static void __init nohz_init(void)
{
if (register_idle_notifier(&nohz_idle_nb))
panic("Couldn't register idle notifier");
}
#endif
/*
* Set up per cpu jiffy timer and set the clock comparator.
*/
static void setup_jiffy_timer(void)
{
/* Set up clock comparator to next jiffy. */
S390_lowcore.jiffy_timer =
jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
} }
/* /*
@ -259,7 +138,26 @@ static void setup_jiffy_timer(void)
*/ */
void init_cpu_timer(void) void init_cpu_timer(void)
{ {
setup_jiffy_timer(); struct clock_event_device *cd;
int cpu;
S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
cpu = smp_processor_id();
cd = &per_cpu(comparators, cpu);
cd->name = "comparator";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->mult = 16777;
cd->shift = 12;
cd->min_delta_ns = 1;
cd->max_delta_ns = LONG_MAX;
cd->rating = 400;
cd->cpumask = cpumask_of_cpu(cpu);
cd->set_next_event = s390_next_event;
cd->set_mode = s390_set_mode;
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */ /* Enable clock comparator timer interrupt. */
__ctl_set_bit(0,11); __ctl_set_bit(0,11);
@ -270,8 +168,6 @@ void init_cpu_timer(void)
static void clock_comparator_interrupt(__u16 code) static void clock_comparator_interrupt(__u16 code)
{ {
/* set clock comparator for next tick */
set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
} }
static void etr_reset(void); static void etr_reset(void);
@ -316,8 +212,9 @@ static struct clocksource clocksource_tod = {
*/ */
void __init time_init(void) void __init time_init(void)
{ {
u64 init_timer_cc;
init_timer_cc = reset_tod_clock(); init_timer_cc = reset_tod_clock();
xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
/* set xtime */ /* set xtime */
@ -342,10 +239,6 @@ void __init time_init(void)
/* Enable TOD clock interrupts on the boot cpu. */ /* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer(); init_cpu_timer();
#ifdef CONFIG_NO_IDLE_HZ
nohz_init();
#endif
#ifdef CONFIG_VIRT_TIMER #ifdef CONFIG_VIRT_TIMER
vtime_init(); vtime_init();
#endif #endif
@ -699,53 +592,49 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
} }
/* /*
* The time is "clock". xtime is what we think the time is. * The time is "clock". old is what we think the time is.
* Adjust the value by a multiple of jiffies and add the delta to ntp. * Adjust the value by a multiple of jiffies and add the delta to ntp.
* "delay" is an approximation how long the synchronization took. If * "delay" is an approximation how long the synchronization took. If
* the time correction is positive, then "delay" is subtracted from * the time correction is positive, then "delay" is subtracted from
* the time difference and only the remaining part is passed to ntp. * the time difference and only the remaining part is passed to ntp.
*/ */
static void etr_adjust_time(unsigned long long clock, unsigned long long delay) static unsigned long long etr_adjust_time(unsigned long long old,
unsigned long long clock,
unsigned long long delay)
{ {
unsigned long long delta, ticks; unsigned long long delta, ticks;
struct timex adjust; struct timex adjust;
/* if (clock > old) {
* We don't have to take the xtime lock because the cpu
* executing etr_adjust_time is running disabled in
* tasklet context and all other cpus are looping in
* etr_sync_cpu_start.
*/
if (clock > xtime_cc) {
/* It is later than we thought. */ /* It is later than we thought. */
delta = ticks = clock - xtime_cc; delta = ticks = clock - old;
delta = ticks = (delta < delay) ? 0 : delta - delay; delta = ticks = (delta < delay) ? 0 : delta - delay;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
init_timer_cc = init_timer_cc + delta;
jiffies_timer_cc = jiffies_timer_cc + delta;
xtime_cc = xtime_cc + delta;
adjust.offset = ticks * (1000000 / HZ); adjust.offset = ticks * (1000000 / HZ);
} else { } else {
/* It is earlier than we thought. */ /* It is earlier than we thought. */
delta = ticks = xtime_cc - clock; delta = ticks = old - clock;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
init_timer_cc = init_timer_cc - delta; delta = -delta;
jiffies_timer_cc = jiffies_timer_cc - delta;
xtime_cc = xtime_cc - delta;
adjust.offset = -ticks * (1000000 / HZ); adjust.offset = -ticks * (1000000 / HZ);
} }
jiffies_timer_cc += delta;
if (adjust.offset != 0) { if (adjust.offset != 0) {
printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
adjust.offset); adjust.offset);
adjust.modes = ADJ_OFFSET_SINGLESHOT; adjust.modes = ADJ_OFFSET_SINGLESHOT;
do_adjtimex(&adjust); do_adjtimex(&adjust);
} }
return delta;
} }
static struct {
int in_sync;
unsigned long long fixup_cc;
} etr_sync;
static void etr_sync_cpu_start(void *dummy) static void etr_sync_cpu_start(void *dummy)
{ {
int *in_sync = dummy;
etr_enable_sync_clock(); etr_enable_sync_clock();
/* /*
* This looks like a busy wait loop but it isn't. etr_sync_cpus * This looks like a busy wait loop but it isn't. etr_sync_cpus
@ -753,7 +642,7 @@ static void etr_sync_cpu_start(void *dummy)
* __udelay will stop the cpu on an enabled wait psw until the * __udelay will stop the cpu on an enabled wait psw until the
* TOD is running again. * TOD is running again.
*/ */
while (*in_sync == 0) { while (etr_sync.in_sync == 0) {
__udelay(1); __udelay(1);
/* /*
* A different cpu changes *in_sync. Therefore use * A different cpu changes *in_sync. Therefore use
@ -761,14 +650,14 @@ static void etr_sync_cpu_start(void *dummy)
*/ */
barrier(); barrier();
} }
if (*in_sync != 1) if (etr_sync.in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */ /* Didn't work. Clear per-cpu in sync bit again. */
etr_disable_sync_clock(NULL); etr_disable_sync_clock(NULL);
/* /*
* This round of TOD syncing is done. Set the clock comparator * This round of TOD syncing is done. Set the clock comparator
* to the next tick and let the processor continue. * to the next tick and let the processor continue.
*/ */
setup_jiffy_timer(); fixup_clock_comparator(etr_sync.fixup_cc);
} }
static void etr_sync_cpu_end(void *dummy) static void etr_sync_cpu_end(void *dummy)
@ -783,8 +672,8 @@ static void etr_sync_cpu_end(void *dummy)
static int etr_sync_clock(struct etr_aib *aib, int port) static int etr_sync_clock(struct etr_aib *aib, int port)
{ {
struct etr_aib *sync_port; struct etr_aib *sync_port;
unsigned long long clock, delay; unsigned long long clock, old_clock, delay, delta;
int in_sync, follows; int follows;
int rc; int rc;
/* Check if the current aib is adjacent to the sync port aib. */ /* Check if the current aib is adjacent to the sync port aib. */
@ -799,9 +688,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
* successfully synced the clock. smp_call_function will * successfully synced the clock. smp_call_function will
* return after all other cpus are in etr_sync_cpu_start. * return after all other cpus are in etr_sync_cpu_start.
*/ */
in_sync = 0; memset(&etr_sync, 0, sizeof(etr_sync));
preempt_disable(); preempt_disable();
smp_call_function(etr_sync_cpu_start,&in_sync,0,0); smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
local_irq_disable(); local_irq_disable();
etr_enable_sync_clock(); etr_enable_sync_clock();
@ -809,6 +698,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
__ctl_set_bit(14, 21); __ctl_set_bit(14, 21);
__ctl_set_bit(0, 29); __ctl_set_bit(0, 29);
clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
old_clock = get_clock();
if (set_clock(clock) == 0) { if (set_clock(clock) == 0) {
__udelay(1); /* Wait for the clock to start. */ __udelay(1); /* Wait for the clock to start. */
__ctl_clear_bit(0, 29); __ctl_clear_bit(0, 29);
@ -817,16 +707,17 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
/* Adjust Linux timing variables. */ /* Adjust Linux timing variables. */
delay = (unsigned long long) delay = (unsigned long long)
(aib->edf2.etv - sync_port->edf2.etv) << 32; (aib->edf2.etv - sync_port->edf2.etv) << 32;
etr_adjust_time(clock, delay); delta = etr_adjust_time(old_clock, clock, delay);
setup_jiffy_timer(); etr_sync.fixup_cc = delta;
fixup_clock_comparator(delta);
/* Verify that the clock is properly set. */ /* Verify that the clock is properly set. */
if (!etr_aib_follows(sync_port, aib, port)) { if (!etr_aib_follows(sync_port, aib, port)) {
/* Didn't work. */ /* Didn't work. */
etr_disable_sync_clock(NULL); etr_disable_sync_clock(NULL);
in_sync = -EAGAIN; etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN; rc = -EAGAIN;
} else { } else {
in_sync = 1; etr_sync.in_sync = 1;
rc = 0; rc = 0;
} }
} else { } else {
@ -834,7 +725,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
__ctl_clear_bit(0, 29); __ctl_clear_bit(0, 29);
__ctl_clear_bit(14, 21); __ctl_clear_bit(14, 21);
etr_disable_sync_clock(NULL); etr_disable_sync_clock(NULL);
in_sync = -EAGAIN; etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN; rc = -EAGAIN;
} }
local_irq_enable(); local_irq_enable();

314
arch/s390/kernel/topology.c Normal file
View file

@ -0,0 +1,314 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
#include <asm/sysinfo.h>
#define CPU_BITS 64
#define NR_MAG 6
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
#define PTF_CHECK (2UL)
struct tl_cpu {
unsigned char reserved0[4];
unsigned char :6;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
unsigned long mask[CPU_BITS / BITS_PER_LONG];
};
struct tl_container {
unsigned char reserved[8];
};
union tl_entry {
unsigned char nl;
struct tl_cpu cpu;
struct tl_container container;
};
struct tl_info {
unsigned char reserved0[2];
unsigned short length;
unsigned char mag[NR_MAG];
unsigned char reserved1;
unsigned char mnest;
unsigned char reserved2[4];
union tl_entry tle[0];
};
struct core_info {
struct core_info *next;
cpumask_t mask;
};
static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info;
static struct core_info core_info;
static int machine_has_topology;
static int machine_has_topology_irq;
static struct timer_list topology_timer;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
cpumask_t cpu_coregroup_map(unsigned int cpu)
{
struct core_info *core = &core_info;
cpumask_t mask;
cpus_clear(mask);
if (!machine_has_topology)
return cpu_present_map;
mutex_lock(&smp_cpu_state_mutex);
while (core) {
if (cpu_isset(cpu, core->mask)) {
mask = core->mask;
break;
}
core = core->next;
}
mutex_unlock(&smp_cpu_state_mutex);
if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu);
return mask;
}
static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
{
unsigned int cpu;
for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
cpu < CPU_BITS;
cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
{
unsigned int rcpu, lcpu;
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
if (__cpu_logical_map[lcpu] == rcpu) {
cpu_set(lcpu, core->mask);
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
}
}
}
static void clear_cores(void)
{
struct core_info *core = &core_info;
while (core) {
cpus_clear(core->mask);
core = core->next;
}
}
static union tl_entry *next_tle(union tl_entry *tle)
{
if (tle->nl)
return (union tl_entry *)((struct tl_container *)tle + 1);
else
return (union tl_entry *)((struct tl_cpu *)tle + 1);
}
static void tl_to_cores(struct tl_info *info)
{
union tl_entry *tle, *end;
struct core_info *core = &core_info;
mutex_lock(&smp_cpu_state_mutex);
clear_cores();
tle = info->tle;
end = (union tl_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
case 5:
case 4:
case 3:
case 2:
break;
case 1:
core = core->next;
break;
case 0:
add_cpus_to_core(&tle->cpu, core);
break;
default:
clear_cores();
machine_has_topology = 0;
return;
}
tle = next_tle(tle);
}
mutex_unlock(&smp_cpu_state_mutex);
}
static void topology_update_polarization_simple(void)
{
int cpu;
mutex_lock(&smp_cpu_state_mutex);
for_each_present_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
mutex_unlock(&smp_cpu_state_mutex);
}
static int ptf(unsigned long fc)
{
int rc;
asm volatile(
" .insn rre,0xb9a20000,%1,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc)
: "d" (fc) : "cc");
return rc;
}
int topology_set_cpu_management(int fc)
{
int cpu;
int rc;
if (!machine_has_topology)
return -EOPNOTSUPP;
if (fc)
rc = ptf(PTF_VERTICAL);
else
rc = ptf(PTF_HORIZONTAL);
if (rc)
return -EBUSY;
for_each_present_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
return rc;
}
void arch_update_cpu_topology(void)
{
struct tl_info *info = tl_info;
struct sys_device *sysdev;
int cpu;
if (!machine_has_topology) {
topology_update_polarization_simple();
return;
}
stsi(info, 15, 1, 2);
tl_to_cores(info);
for_each_online_cpu(cpu) {
sysdev = get_cpu_sysdev(cpu);
kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
}
}
static void topology_work_fn(struct work_struct *work)
{
arch_reinit_sched_domains();
}
void topology_schedule_update(void)
{
schedule_work(&topology_work);
}
static void topology_timer_fn(unsigned long ignored)
{
if (ptf(PTF_CHECK))
topology_schedule_update();
set_topology_timer();
}
static void set_topology_timer(void)
{
topology_timer.function = topology_timer_fn;
topology_timer.data = 0;
topology_timer.expires = jiffies + 60 * HZ;
add_timer(&topology_timer);
}
static void topology_interrupt(__u16 code)
{
schedule_work(&topology_work);
}
static int __init init_topology_update(void)
{
int rc;
if (!machine_has_topology) {
topology_update_polarization_simple();
return 0;
}
init_timer_deferrable(&topology_timer);
if (machine_has_topology_irq) {
rc = register_external_interrupt(0x2005, topology_interrupt);
if (rc)
return rc;
ctl_set_bit(0, 8);
}
else
set_topology_timer();
return 0;
}
__initcall(init_topology_update);
void __init s390_init_cpu_topology(void)
{
unsigned long long facility_bits;
struct tl_info *info;
struct core_info *core;
int nr_cores;
int i;
if (stfle(&facility_bits, 1) <= 0)
return;
if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
return;
machine_has_topology = 1;
if (facility_bits & (1ULL << 51))
machine_has_topology_irq = 1;
tl_info = alloc_bootmem_pages(PAGE_SIZE);
if (!tl_info)
goto error;
info = tl_info;
stsi(info, 15, 1, 2);
nr_cores = info->mag[NR_MAG - 2];
for (i = 0; i < info->mnest - 2; i++)
nr_cores *= info->mag[NR_MAG - 3 - i];
printk(KERN_INFO "CPU topology:");
for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
core = &core_info;
for (i = 0; i < nr_cores; i++) {
core->next = alloc_bootmem(sizeof(struct core_info));
core = core->next;
if (!core)
goto error;
}
return;
error:
machine_has_topology = 0;
machine_has_topology_irq = 0;
}

View file

@ -42,11 +42,8 @@
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/debug.h> #include <asm/debug.h>
#include "entry.h"
/* Called from entry.S only */
extern void handle_per_exception(struct pt_regs *regs);
typedef void pgm_check_handler_t(struct pt_regs *, long);
pgm_check_handler_t *pgm_check_table[128]; pgm_check_handler_t *pgm_check_table[128];
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
@ -59,7 +56,6 @@ int sysctl_userprocess_debug = 0;
extern pgm_check_handler_t do_protection_exception; extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_dat_exception; extern pgm_check_handler_t do_dat_exception;
extern pgm_check_handler_t do_monitor_call;
extern pgm_check_handler_t do_asce_exception; extern pgm_check_handler_t do_asce_exception;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@ -138,7 +134,6 @@ void show_trace(struct task_struct *task, unsigned long *stack)
else else
__show_trace(sp, S390_lowcore.thread_info, __show_trace(sp, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE); S390_lowcore.thread_info + THREAD_SIZE);
printk("\n");
if (!task) if (!task)
task = current; task = current;
debug_show_held_locks(task); debug_show_held_locks(task);
@ -166,6 +161,15 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_trace(task, sp); show_trace(task, sp);
} }
#ifdef CONFIG_64BIT
void show_last_breaking_event(struct pt_regs *regs)
{
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
}
#endif
/* /*
* The architecture-independent dump_stack generator * The architecture-independent dump_stack generator
*/ */
@ -739,6 +743,5 @@ void __init trap_init(void)
pgm_check_table[0x15] = &operand_exception; pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &space_switch_exception; pgm_check_table[0x1C] = &space_switch_exception;
pgm_check_table[0x1D] = &hfp_sqrt_exception; pgm_check_table[0x1D] = &hfp_sqrt_exception;
pgm_check_table[0x40] = &do_monitor_call;
pfault_irq_init(); pfault_irq_init();
} }

View file

@ -34,7 +34,7 @@ void __delay(unsigned long loops)
*/ */
void __udelay(unsigned long usecs) void __udelay(unsigned long usecs)
{ {
u64 end, time, jiffy_timer = 0; u64 end, time, old_cc = 0;
unsigned long flags, cr0, mask, dummy; unsigned long flags, cr0, mask, dummy;
int irq_context; int irq_context;
@ -43,8 +43,8 @@ void __udelay(unsigned long usecs)
local_bh_disable(); local_bh_disable();
local_irq_save(flags); local_irq_save(flags);
if (raw_irqs_disabled_flags(flags)) { if (raw_irqs_disabled_flags(flags)) {
jiffy_timer = S390_lowcore.jiffy_timer; old_cc = S390_lowcore.clock_comparator;
S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); S390_lowcore.clock_comparator = -1ULL;
__ctl_store(cr0, 0, 0); __ctl_store(cr0, 0, 0);
dummy = (cr0 & 0xffff00e0) | 0x00000800; dummy = (cr0 & 0xffff00e0) | 0x00000800;
__ctl_load(dummy , 0, 0); __ctl_load(dummy , 0, 0);
@ -55,8 +55,8 @@ void __udelay(unsigned long usecs)
end = get_clock() + ((u64) usecs << 12); end = get_clock() + ((u64) usecs << 12);
do { do {
time = end < S390_lowcore.jiffy_timer ? time = end < S390_lowcore.clock_comparator ?
end : S390_lowcore.jiffy_timer; end : S390_lowcore.clock_comparator;
set_clock_comparator(time); set_clock_comparator(time);
trace_hardirqs_on(); trace_hardirqs_on();
__load_psw_mask(mask); __load_psw_mask(mask);
@ -65,10 +65,10 @@ void __udelay(unsigned long usecs)
if (raw_irqs_disabled_flags(flags)) { if (raw_irqs_disabled_flags(flags)) {
__ctl_load(cr0, 0, 0); __ctl_load(cr0, 0, 0);
S390_lowcore.jiffy_timer = jiffy_timer; S390_lowcore.clock_comparator = old_cc;
} }
if (!irq_context) if (!irq_context)
_local_bh_enable(); _local_bh_enable();
set_clock_comparator(S390_lowcore.jiffy_timer); set_clock_comparator(S390_lowcore.clock_comparator);
local_irq_restore(flags); local_irq_restore(flags);
} }

View file

@ -302,6 +302,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
pte_t *pte_from, *pte_to; pte_t *pte_from, *pte_to;
int write_user; int write_user;
if (segment_eq(get_fs(), KERNEL_DS)) {
memcpy((void __force *) to, (void __force *) from, n);
return 0;
}
done = 0; done = 0;
retry: retry:
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
@ -361,18 +365,10 @@ fault:
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc" ); "m" (*uaddr) : "cc" );
int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
{ {
int oldval = 0, newval, ret; int oldval = 0, newval, ret;
spin_lock(&current->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
}
get_page(virt_to_page(uaddr));
spin_unlock(&current->mm->page_table_lock);
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
__futex_atomic_op("lr %2,%5\n", __futex_atomic_op("lr %2,%5\n",
@ -397,17 +393,17 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
default: default:
ret = -ENOSYS; ret = -ENOSYS;
} }
put_page(virt_to_page(uaddr)); if (ret == 0)
*old = oldval; *old = oldval;
return ret; return ret;
} }
int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
{ {
int ret; int ret;
if (!current->mm) if (segment_eq(get_fs(), KERNEL_DS))
return -EFAULT; return __futex_atomic_op_pt(op, uaddr, oparg, old);
spin_lock(&current->mm->page_table_lock); spin_lock(&current->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) { if (!uaddr) {
@ -416,13 +412,40 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
} }
get_page(virt_to_page(uaddr)); get_page(virt_to_page(uaddr));
spin_unlock(&current->mm->page_table_lock); spin_unlock(&current->mm->page_table_lock);
asm volatile(" cs %1,%4,0(%5)\n" ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
"0: lr %0,%1\n" put_page(virt_to_page(uaddr));
"1:\n" return ret;
EX_TABLE(0b,1b) }
static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
{
int ret;
asm volatile("0: cs %1,%4,0(%5)\n"
"1: lr %0,%1\n"
"2:\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory" ); : "cc", "memory" );
return ret;
}
int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
{
int ret;
if (segment_eq(get_fs(), KERNEL_DS))
return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
spin_lock(&current->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
}
get_page(virt_to_page(uaddr));
spin_unlock(&current->mm->page_table_lock);
ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
put_page(virt_to_page(uaddr)); put_page(virt_to_page(uaddr));
return ret; return ret;
} }

View file

@ -289,22 +289,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
switch (rc) { if (rc)
case 0:
break;
case -ENOSPC:
PRINT_WARN("segment_load: not loading segment %s - overlaps "
"storage/segment\n", name);
goto out_free; goto out_free;
case -ERANGE:
PRINT_WARN("segment_load: not loading segment %s - exceeds "
"kernel mapping range\n", name);
goto out_free;
default:
PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
name, rc);
goto out_free;
}
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (seg->res == NULL) { if (seg->res == NULL) {
@ -582,8 +568,59 @@ out:
mutex_unlock(&dcss_lock); mutex_unlock(&dcss_lock);
} }
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
void segment_warning(int rc, char *seg_name)
{
switch (rc) {
case -ENOENT:
PRINT_WARN("cannot load/query segment %s, "
"does not exist\n", seg_name);
break;
case -ENOSYS:
PRINT_WARN("cannot load/query segment %s, "
"not running on VM\n", seg_name);
break;
case -EIO:
PRINT_WARN("cannot load/query segment %s, "
"hardware error\n", seg_name);
break;
case -ENOTSUPP:
PRINT_WARN("cannot load/query segment %s, "
"is a multi-part segment\n", seg_name);
break;
case -ENOSPC:
PRINT_WARN("cannot load/query segment %s, "
"overlaps with storage\n", seg_name);
break;
case -EBUSY:
PRINT_WARN("cannot load/query segment %s, "
"overlaps with already loaded dcss\n", seg_name);
break;
case -EPERM:
PRINT_WARN("cannot load/query segment %s, "
"already loaded in incompatible mode\n", seg_name);
break;
case -ENOMEM:
PRINT_WARN("cannot load/query segment %s, "
"out of memory\n", seg_name);
break;
case -ERANGE:
PRINT_WARN("cannot load/query segment %s, "
"exceeds kernel mapping range\n", seg_name);
break;
default:
PRINT_WARN("cannot load/query segment %s, "
"return value %i\n", seg_name, rc);
break;
}
}
EXPORT_SYMBOL(segment_load); EXPORT_SYMBOL(segment_load);
EXPORT_SYMBOL(segment_unload); EXPORT_SYMBOL(segment_unload);
EXPORT_SYMBOL(segment_save); EXPORT_SYMBOL(segment_save);
EXPORT_SYMBOL(segment_type); EXPORT_SYMBOL(segment_type);
EXPORT_SYMBOL(segment_modify_shared); EXPORT_SYMBOL(segment_modify_shared);
EXPORT_SYMBOL(segment_warning);

View file

@ -28,11 +28,11 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000 #define __FAIL_ADDR_MASK 0x7ffff000
@ -50,8 +50,6 @@
extern int sysctl_userprocess_debug; extern int sysctl_userprocess_debug;
#endif #endif
extern void die(const char *,struct pt_regs *,long);
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, long err) static inline int notify_page_fault(struct pt_regs *regs, long err)
{ {
@ -245,11 +243,6 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
} }
#ifdef CONFIG_S390_EXEC_PROTECT #ifdef CONFIG_S390_EXEC_PROTECT
extern long sys_sigreturn(struct pt_regs *regs);
extern long sys_rt_sigreturn(struct pt_regs *regs);
extern long sys32_sigreturn(struct pt_regs *regs);
extern long sys32_rt_sigreturn(struct pt_regs *regs);
static int signal_return(struct mm_struct *mm, struct pt_regs *regs, static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
unsigned long address, unsigned long error_code) unsigned long address, unsigned long error_code)
{ {
@ -270,15 +263,15 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
compat = test_tsk_thread_flag(current, TIF_31BIT); compat = test_tsk_thread_flag(current, TIF_31BIT);
if (compat && instruction == 0x0a77) if (compat && instruction == 0x0a77)
sys32_sigreturn(regs); sys32_sigreturn();
else if (compat && instruction == 0x0aad) else if (compat && instruction == 0x0aad)
sys32_rt_sigreturn(regs); sys32_rt_sigreturn();
else else
#endif #endif
if (instruction == 0x0a77) if (instruction == 0x0a77)
sys_sigreturn(regs); sys_sigreturn();
else if (instruction == 0x0aad) else if (instruction == 0x0aad)
sys_rt_sigreturn(regs); sys_rt_sigreturn();
else { else {
current->thread.prot_addr = address; current->thread.prot_addr = address;
current->thread.trap_no = error_code; current->thread.trap_no = error_code;
@ -424,7 +417,7 @@ no_context:
} }
void __kprobes do_protection_exception(struct pt_regs *regs, void __kprobes do_protection_exception(struct pt_regs *regs,
unsigned long error_code) long error_code)
{ {
/* Protection exception is supressing, decrement psw address. */ /* Protection exception is supressing, decrement psw address. */
regs->psw.addr -= (error_code >> 16); regs->psw.addr -= (error_code >> 16);
@ -440,7 +433,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs,
do_exception(regs, 4, 1); do_exception(regs, 4, 1);
} }
void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
{ {
do_exception(regs, error_code & 0xff, 0); do_exception(regs, error_code & 0xff, 0);
} }

View file

@ -50,7 +50,6 @@ void show_mem(void)
printk("Mem-info:\n"); printk("Mem-info:\n");
show_free_areas(); show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
i = max_mapnr; i = max_mapnr;
while (i-- > 0) { while (i-- > 0) {
if (!pfn_valid(i)) if (!pfn_valid(i))

View file

@ -116,6 +116,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
err = -EAGAIN; err = -EAGAIN;
if (!bytes_read && (filp->f_flags & O_NONBLOCK)) if (!bytes_read && (filp->f_flags & O_NONBLOCK))
goto out; goto out;
if (bytes_read < 0) {
err = bytes_read;
goto out;
}
err = -EFAULT; err = -EFAULT;
while (bytes_read && size) { while (bytes_read && size) {

View file

@ -64,6 +64,7 @@ config ZCRYPT
tristate "Support for PCI-attached cryptographic adapters" tristate "Support for PCI-attached cryptographic adapters"
depends on S390 depends on S390
select ZCRYPT_MONOLITHIC if ZCRYPT="y" select ZCRYPT_MONOLITHIC if ZCRYPT="y"
select HW_RANDOM
help help
Select this option if you want to use a PCI-attached cryptographic Select this option if you want to use a PCI-attached cryptographic
adapter like: adapter like:

View file

@ -20,6 +20,7 @@ config DCSSBLK
config DASD config DASD
tristate "Support for DASD devices" tristate "Support for DASD devices"
depends on CCW && BLOCK depends on CCW && BLOCK
select IOSCHED_DEADLINE
help help
Enable this option if you want to access DASDs directly utilizing Enable this option if you want to access DASDs directly utilizing
S/390s channel subsystem commands. This is necessary for running S/390s channel subsystem commands. This is necessary for running

View file

@ -980,12 +980,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
break; break;
case -ETIMEDOUT: case -ETIMEDOUT:
printk(KERN_WARNING"%s(%s): request timed out\n", printk(KERN_WARNING"%s(%s): request timed out\n",
__FUNCTION__, cdev->dev.bus_id); __func__, cdev->dev.bus_id);
//FIXME - dasd uses own timeout interface... //FIXME - dasd uses own timeout interface...
break; break;
default: default:
printk(KERN_WARNING"%s(%s): unknown error %ld\n", printk(KERN_WARNING"%s(%s): unknown error %ld\n",
__FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); __func__, cdev->dev.bus_id, PTR_ERR(irb));
} }
return; return;
} }
@ -1956,6 +1956,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
block->request_queue->queuedata = block; block->request_queue->queuedata = block;
elevator_exit(block->request_queue->elevator); elevator_exit(block->request_queue->elevator);
block->request_queue->elevator = NULL;
rc = elevator_init(block->request_queue, "deadline"); rc = elevator_init(block->request_queue, "deadline");
if (rc) { if (rc) {
blk_cleanup_queue(block->request_queue); blk_cleanup_queue(block->request_queue);
@ -2298,9 +2299,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers. * in the other openers.
*/ */
if (device->block) { if (device->block) {
struct dasd_block *block = device->block; max_count = device->block->bdev ? 0 : -1;
max_count = block->bdev ? 0 : -1; open_count = atomic_read(&device->block->open_count);
open_count = (int) atomic_read(&block->open_count);
if (open_count > max_count) { if (open_count > max_count) {
if (open_count > 0) if (open_count > 0)
printk(KERN_WARNING "Can't offline dasd " printk(KERN_WARNING "Can't offline dasd "

View file

@ -1995,6 +1995,36 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound */ } /* end dasd_3990_erp_compound */
/*
*DASD_3990_ERP_HANDLE_SIM
*
*DESCRIPTION
* inspects the SIM SENSE data and starts an appropriate action
*
* PARAMETER
* sense sense data of the actual error
*
* RETURN VALUES
* none
*/
void
dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
{
/* print message according to log or message to operator mode */
if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
/* print SIM SRC from RefCode */
DEV_MESSAGE(KERN_ERR, device, "SIM - SRC: "
"%02x%02x%02x%02x", sense[22],
sense[23], sense[11], sense[12]);
} else if (sense[24] & DASD_SIM_LOG) {
/* print SIM SRC Refcode */
DEV_MESSAGE(KERN_WARNING, device, "SIM - SRC: "
"%02x%02x%02x%02x", sense[22],
sense[23], sense[11], sense[12]);
}
}
/* /*
* DASD_3990_ERP_INSPECT_32 * DASD_3990_ERP_INSPECT_32
* *
@ -2018,6 +2048,10 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_inspect_32; erp->function = dasd_3990_erp_inspect_32;
/* check for SIM sense data */
if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
dasd_3990_erp_handle_sim(device, sense);
if (sense[25] & DASD_SENSE_BIT_0) { if (sense[25] & DASD_SENSE_BIT_0) {
/* compound program action codes (byte25 bit 0 == '1') */ /* compound program action codes (byte25 bit 0 == '1') */

View file

@ -745,6 +745,19 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
spin_unlock_irqrestore(&lcu->lock, flags); spin_unlock_irqrestore(&lcu->lock, flags);
} }
static void __stop_device_on_lcu(struct dasd_device *device,
struct dasd_device *pos)
{
/* If pos == device then device is already locked! */
if (pos == device) {
pos->stopped |= DASD_STOPPED_SU;
return;
}
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
spin_unlock(get_ccwdev_lock(pos->cdev));
}
/* /*
* This function is called in interrupt context, so the * This function is called in interrupt context, so the
* cdev lock for device is already locked! * cdev lock for device is already locked!
@ -755,35 +768,15 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
struct alias_pav_group *pavgroup; struct alias_pav_group *pavgroup;
struct dasd_device *pos; struct dasd_device *pos;
list_for_each_entry(pos, &lcu->active_devices, alias_list) { list_for_each_entry(pos, &lcu->active_devices, alias_list)
if (pos != device) __stop_device_on_lcu(device, pos);
spin_lock(get_ccwdev_lock(pos->cdev)); list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
pos->stopped |= DASD_STOPPED_SU; __stop_device_on_lcu(device, pos);
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
if (pos != device)
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) { list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(pos, &pavgroup->baselist, alias_list) { list_for_each_entry(pos, &pavgroup->baselist, alias_list)
if (pos != device) __stop_device_on_lcu(device, pos);
spin_lock(get_ccwdev_lock(pos->cdev)); list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
pos->stopped |= DASD_STOPPED_SU; __stop_device_on_lcu(device, pos);
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
if (pos != device)
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
} }
} }

View file

@ -1415,6 +1415,13 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
return; return;
} }
/* service information message SIM */
if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) {
dasd_3990_erp_handle_sim(device, irb->ecw);
return;
}
/* just report other unsolicited interrupts */ /* just report other unsolicited interrupts */
DEV_MESSAGE(KERN_DEBUG, device, "%s", DEV_MESSAGE(KERN_DEBUG, device, "%s",
"unsolicited interrupt received"); "unsolicited interrupt received");

View file

@ -125,7 +125,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
private = (struct dasd_fba_private *) device->private; private = (struct dasd_fba_private *) device->private;
if (private == NULL) { if (private == NULL) {
private = kzalloc(sizeof(struct dasd_fba_private), GFP_KERNEL); private = kzalloc(sizeof(struct dasd_fba_private),
GFP_KERNEL | GFP_DMA);
if (private == NULL) { if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s", DEV_MESSAGE(KERN_WARNING, device, "%s",
"memory allocation failed for private " "memory allocation failed for private "

View file

@ -72,6 +72,11 @@ struct dasd_block;
#define DASD_SENSE_BIT_2 0x20 #define DASD_SENSE_BIT_2 0x20
#define DASD_SENSE_BIT_3 0x10 #define DASD_SENSE_BIT_3 0x10
/* BIT DEFINITIONS FOR SIM SENSE */
#define DASD_SIM_SENSE 0x0F
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
/* /*
* SECTION: MACROs for klogd and s390 debug feature (dbf) * SECTION: MACROs for klogd and s390 debug feature (dbf)
*/ */
@ -621,6 +626,7 @@ void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
/* externals in dasd_3990_erp.c */ /* externals in dasd_3990_erp.c */
struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
/* externals in dasd_eer.c */ /* externals in dasd_eer.c */
#ifdef CONFIG_DASD_EER #ifdef CONFIG_DASD_EER

View file

@ -142,57 +142,6 @@ dcssblk_get_device_by_name(char *name)
return NULL; return NULL;
} }
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
static void
dcssblk_segment_warn(int rc, char* seg_name)
{
switch (rc) {
case -ENOENT:
PRINT_WARN("cannot load/query segment %s, does not exist\n",
seg_name);
break;
case -ENOSYS:
PRINT_WARN("cannot load/query segment %s, not running on VM\n",
seg_name);
break;
case -EIO:
PRINT_WARN("cannot load/query segment %s, hardware error\n",
seg_name);
break;
case -ENOTSUPP:
PRINT_WARN("cannot load/query segment %s, is a multi-part "
"segment\n", seg_name);
break;
case -ENOSPC:
PRINT_WARN("cannot load/query segment %s, overlaps with "
"storage\n", seg_name);
break;
case -EBUSY:
PRINT_WARN("cannot load/query segment %s, overlaps with "
"already loaded dcss\n", seg_name);
break;
case -EPERM:
PRINT_WARN("cannot load/query segment %s, already loaded in "
"incompatible mode\n", seg_name);
break;
case -ENOMEM:
PRINT_WARN("cannot load/query segment %s, out of memory\n",
seg_name);
break;
case -ERANGE:
PRINT_WARN("cannot load/query segment %s, exceeds kernel "
"mapping range\n", seg_name);
break;
default:
PRINT_WARN("cannot load/query segment %s, return value %i\n",
seg_name, rc);
break;
}
}
static void dcssblk_unregister_callback(struct device *dev) static void dcssblk_unregister_callback(struct device *dev)
{ {
device_unregister(dev); device_unregister(dev);
@ -423,7 +372,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = segment_load(local_buf, SEGMENT_SHARED, rc = segment_load(local_buf, SEGMENT_SHARED,
&dev_info->start, &dev_info->end); &dev_info->start, &dev_info->end);
if (rc < 0) { if (rc < 0) {
dcssblk_segment_warn(rc, dev_info->segment_name); segment_warning(rc, dev_info->segment_name);
goto dealloc_gendisk; goto dealloc_gendisk;
} }
seg_byte_size = (dev_info->end - dev_info->start + 1); seg_byte_size = (dev_info->end - dev_info->start + 1);

View file

@ -111,56 +111,6 @@ static void dcss_mkname(char *ascii_name, char *ebcdic_name)
ASCEBC(ebcdic_name, 8); ASCEBC(ebcdic_name, 8);
} }
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
static void mon_segment_warn(int rc, char* seg_name)
{
switch (rc) {
case -ENOENT:
P_WARNING("cannot load/query segment %s, does not exist\n",
seg_name);
break;
case -ENOSYS:
P_WARNING("cannot load/query segment %s, not running on VM\n",
seg_name);
break;
case -EIO:
P_WARNING("cannot load/query segment %s, hardware error\n",
seg_name);
break;
case -ENOTSUPP:
P_WARNING("cannot load/query segment %s, is a multi-part "
"segment\n", seg_name);
break;
case -ENOSPC:
P_WARNING("cannot load/query segment %s, overlaps with "
"storage\n", seg_name);
break;
case -EBUSY:
P_WARNING("cannot load/query segment %s, overlaps with "
"already loaded dcss\n", seg_name);
break;
case -EPERM:
P_WARNING("cannot load/query segment %s, already loaded in "
"incompatible mode\n", seg_name);
break;
case -ENOMEM:
P_WARNING("cannot load/query segment %s, out of memory\n",
seg_name);
break;
case -ERANGE:
P_WARNING("cannot load/query segment %s, exceeds kernel "
"mapping range\n", seg_name);
break;
default:
P_WARNING("cannot load/query segment %s, return value %i\n",
seg_name, rc);
break;
}
}
static inline unsigned long mon_mca_start(struct mon_msg *monmsg) static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
{ {
return *(u32 *) &monmsg->msg.rmmsg; return *(u32 *) &monmsg->msg.rmmsg;
@ -585,7 +535,7 @@ static int __init mon_init(void)
rc = segment_type(mon_dcss_name); rc = segment_type(mon_dcss_name);
if (rc < 0) { if (rc < 0) {
mon_segment_warn(rc, mon_dcss_name); segment_warning(rc, mon_dcss_name);
goto out_iucv; goto out_iucv;
} }
if (rc != SEG_TYPE_SC) { if (rc != SEG_TYPE_SC) {
@ -598,7 +548,7 @@ static int __init mon_init(void)
rc = segment_load(mon_dcss_name, SEGMENT_SHARED, rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end); &mon_dcss_start, &mon_dcss_end);
if (rc < 0) { if (rc < 0) {
mon_segment_warn(rc, mon_dcss_name); segment_warning(rc, mon_dcss_name);
rc = -EINVAL; rc = -EINVAL;
goto out_iucv; goto out_iucv;
} }

View file

@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count)
if (sclp_ttybuf == NULL) { if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) { while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags); spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (in_atomic()) if (in_interrupt())
sclp_sync_wait(); sclp_sync_wait();
else else
wait_event(sclp_tty_waitq, wait_event(sclp_tty_waitq,

View file

@ -383,7 +383,7 @@ sclp_vt220_timeout(unsigned long data)
*/ */
static int static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
int convertlf) int convertlf, int may_schedule)
{ {
unsigned long flags; unsigned long flags;
void *page; void *page;
@ -398,9 +398,8 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
/* Create a sclp output buffer if none exists yet */ /* Create a sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) { if (sclp_vt220_current_request == NULL) {
while (list_empty(&sclp_vt220_empty)) { while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock, spin_unlock_irqrestore(&sclp_vt220_lock, flags);
flags); if (in_interrupt() || !may_schedule)
if (in_atomic())
sclp_sync_wait(); sclp_sync_wait();
else else
wait_event(sclp_vt220_waitq, wait_event(sclp_vt220_waitq,
@ -450,7 +449,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
static int static int
sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count) sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
{ {
return __sclp_vt220_write(buf, count, 1, 0); return __sclp_vt220_write(buf, count, 1, 0, 1);
} }
#define SCLP_VT220_SESSION_ENDED 0x01 #define SCLP_VT220_SESSION_ENDED 0x01
@ -529,7 +528,7 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
static void static void
sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
{ {
__sclp_vt220_write(&ch, 1, 0, 0); __sclp_vt220_write(&ch, 1, 0, 0, 1);
} }
/* /*
@ -746,7 +745,7 @@ __initcall(sclp_vt220_tty_init);
static void static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{ {
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1); __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
} }
static struct tty_driver * static struct tty_driver *

View file

@ -394,7 +394,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
return tape_34xx_erp_failed(request, -ENOSPC); return tape_34xx_erp_failed(request, -ENOSPC);
default: default:
PRINT_ERR("Invalid op in %s:%i\n", PRINT_ERR("Invalid op in %s:%i\n",
__FUNCTION__, __LINE__); __func__, __LINE__);
return tape_34xx_erp_failed(request, 0); return tape_34xx_erp_failed(request, 0);
} }
} }

View file

@ -83,9 +83,9 @@ tapechar_setup_device(struct tape_device * device)
void void
tapechar_cleanup_device(struct tape_device *device) tapechar_cleanup_device(struct tape_device *device)
{ {
unregister_tape_dev(device->rt); unregister_tape_dev(&device->cdev->dev, device->rt);
device->rt = NULL; device->rt = NULL;
unregister_tape_dev(device->nt); unregister_tape_dev(&device->cdev->dev, device->nt);
device->nt = NULL; device->nt = NULL;
} }

View file

@ -99,11 +99,10 @@ fail_with_tcd:
} }
EXPORT_SYMBOL(register_tape_dev); EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct tape_class_device *tcd) void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{ {
if (tcd != NULL && !IS_ERR(tcd)) { if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&tcd->class_device->kobj, sysfs_remove_link(&device->kobj, tcd->mode_name);
tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev); device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device); cdev_del(tcd->char_device);
kfree(tcd); kfree(tcd);

View file

@ -56,6 +56,6 @@ struct tape_class_device *register_tape_dev(
char * device_name, char * device_name,
char * node_name char * node_name
); );
void unregister_tape_dev(struct tape_class_device *tcd); void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
#endif /* __TAPE_CLASS_H__ */ #endif /* __TAPE_CLASS_H__ */

View file

@ -100,7 +100,8 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
urd->reclen = cdev->id.driver_info; urd->reclen = cdev->id.driver_info;
ccw_device_get_id(cdev, &urd->dev_id); ccw_device_get_id(cdev, &urd->dev_id);
mutex_init(&urd->io_mutex); mutex_init(&urd->io_mutex);
mutex_init(&urd->open_mutex); init_waitqueue_head(&urd->wait);
spin_lock_init(&urd->open_lock);
atomic_set(&urd->ref_count, 1); atomic_set(&urd->ref_count, 1);
urd->cdev = cdev; urd->cdev = cdev;
get_device(&cdev->dev); get_device(&cdev->dev);
@ -678,17 +679,21 @@ static int ur_open(struct inode *inode, struct file *file)
if (!urd) if (!urd)
return -ENXIO; return -ENXIO;
spin_lock(&urd->open_lock);
while (urd->open_flag) {
spin_unlock(&urd->open_lock);
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (!mutex_trylock(&urd->open_mutex)) {
rc = -EBUSY; rc = -EBUSY;
goto fail_put; goto fail_put;
} }
} else { if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
if (mutex_lock_interruptible(&urd->open_mutex)) {
rc = -ERESTARTSYS; rc = -ERESTARTSYS;
goto fail_put; goto fail_put;
} }
spin_lock(&urd->open_lock);
} }
urd->open_flag++;
spin_unlock(&urd->open_lock);
TRACE("ur_open\n"); TRACE("ur_open\n");
@ -720,7 +725,9 @@ static int ur_open(struct inode *inode, struct file *file)
fail_urfile_free: fail_urfile_free:
urfile_free(urf); urfile_free(urf);
fail_unlock: fail_unlock:
mutex_unlock(&urd->open_mutex); spin_lock(&urd->open_lock);
urd->open_flag--;
spin_unlock(&urd->open_lock);
fail_put: fail_put:
urdev_put(urd); urdev_put(urd);
return rc; return rc;
@ -731,7 +738,10 @@ static int ur_release(struct inode *inode, struct file *file)
struct urfile *urf = file->private_data; struct urfile *urf = file->private_data;
TRACE("ur_release\n"); TRACE("ur_release\n");
mutex_unlock(&urf->urd->open_mutex); spin_lock(&urf->urd->open_lock);
urf->urd->open_flag--;
spin_unlock(&urf->urd->open_lock);
wake_up_interruptible(&urf->urd->wait);
urdev_put(urf->urd); urdev_put(urf->urd);
urfile_free(urf); urfile_free(urf);
return 0; return 0;

View file

@ -62,7 +62,6 @@ struct file_control_block {
struct urdev { struct urdev {
struct ccw_device *cdev; /* Backpointer to ccw device */ struct ccw_device *cdev; /* Backpointer to ccw device */
struct mutex io_mutex; /* Serialises device IO */ struct mutex io_mutex; /* Serialises device IO */
struct mutex open_mutex; /* Serialises access to device */
struct completion *io_done; /* do_ur_io waits; irq completes */ struct completion *io_done; /* do_ur_io waits; irq completes */
struct device *device; struct device *device;
struct cdev *char_device; struct cdev *char_device;
@ -71,6 +70,9 @@ struct urdev {
int class; /* VM device class */ int class; /* VM device class */
int io_request_rc; /* return code from I/O request */ int io_request_rc; /* return code from I/O request */
atomic_t ref_count; /* reference counter */ atomic_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
}; };
/* /*

View file

@ -96,7 +96,7 @@ static int vmwdt_keepalive(void)
if (ret) { if (ret) {
printk(KERN_WARNING "%s: problem setting interval %d, " printk(KERN_WARNING "%s: problem setting interval %d, "
"cmd %s\n", __FUNCTION__, vmwdt_interval, "cmd %s\n", __func__, vmwdt_interval,
vmwdt_cmd); vmwdt_cmd);
} }
return ret; return ret;
@ -107,7 +107,7 @@ static int vmwdt_disable(void)
int ret = __diag288(wdt_cancel, 0, "", 0); int ret = __diag288(wdt_cancel, 0, "", 0);
if (ret) { if (ret) {
printk(KERN_WARNING "%s: problem disabling watchdog\n", printk(KERN_WARNING "%s: problem disabling watchdog\n",
__FUNCTION__); __func__);
} }
return ret; return ret;
} }

View file

@ -224,7 +224,7 @@ static int __init init_cpu_info(enum arch_id arch)
sa = kmalloc(sizeof(*sa), GFP_KERNEL); sa = kmalloc(sizeof(*sa), GFP_KERNEL);
if (!sa) { if (!sa) {
ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
return -ENOMEM; return -ENOMEM;
} }
if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {

View file

@ -217,6 +217,8 @@ void chsc_chp_offline(struct chp_id chpid)
if (chp_get_status(chpid) <= 0) if (chp_get_status(chpid) <= 0)
return; return;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
} }
@ -303,7 +305,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
sprintf(dbf_txt, "fla%x", res_data->fla); sprintf(dbf_txt, "fla%x", res_data->fla);
CIO_TRACE_EVENT( 2, dbf_txt); CIO_TRACE_EVENT( 2, dbf_txt);
} }
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/* /*
* I/O resources may have become accessible. * I/O resources may have become accessible.
* Scan through all subchannels that may be concerned and * Scan through all subchannels that may be concerned and
@ -561,9 +564,12 @@ void chsc_chp_online(struct chp_id chpid)
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt); CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) != 0) if (chp_get_status(chpid) != 0) {
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(__chp_add, __chp_add_new_sch, for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
&chpid); &chpid);
}
} }
static void __s390_subchannel_vary_chpid(struct subchannel *sch, static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@ -650,6 +656,8 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
*/ */
int chsc_chp_vary(struct chp_id chpid, int on) int chsc_chp_vary(struct chp_id chpid, int on)
{ {
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/* /*
* Redo PathVerification on the devices the chpid connects to * Redo PathVerification on the devices the chpid connects to
*/ */
@ -758,7 +766,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
if (!secm_area) if (!secm_area)
return -ENOMEM; return -ENOMEM;
mutex_lock(&css->mutex);
if (enable && !css->cm_enabled) { if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
@ -766,7 +773,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1); free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2); free_page((unsigned long)css->cub_addr2);
free_page((unsigned long)secm_area); free_page((unsigned long)secm_area);
mutex_unlock(&css->mutex);
return -ENOMEM; return -ENOMEM;
} }
} }
@ -787,7 +793,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1); free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2); free_page((unsigned long)css->cub_addr2);
} }
mutex_unlock(&css->mutex);
free_page((unsigned long)secm_area); free_page((unsigned long)secm_area);
return ret; return ret;
} }

View file

@ -24,6 +24,7 @@
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/chpid.h> #include <asm/chpid.h>
#include <asm/airq.h> #include <asm/airq.h>
#include <asm/cpu.h>
#include "cio.h" #include "cio.h"
#include "css.h" #include "css.h"
#include "chsc.h" #include "chsc.h"
@ -649,13 +650,10 @@ do_IRQ (struct pt_regs *regs)
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
asm volatile ("mc 0,0"); s390_idle_check();
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/** /* Serve timer interrupts first. */
* Make sure that the i/o interrupt did not "overtake" clock_comparator_work();
* the last HZ timer interrupt.
*/
account_ticks(S390_lowcore.int_clock);
/* /*
* Get interrupt information from lowcore * Get interrupt information from lowcore
*/ */
@ -672,10 +670,14 @@ do_IRQ (struct pt_regs *regs)
continue; continue;
} }
sch = (struct subchannel *)(unsigned long)tpi_info->intparm; sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch) if (!sch) {
/* Clear pending interrupt condition. */
tsch(tpi_info->schid, irb);
continue;
}
spin_lock(sch->lock); spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */ /* Store interrupt response block to lowcore. */
if (tsch (tpi_info->schid, irb) == 0 && sch) { if (tsch(tpi_info->schid, irb) == 0) {
/* Keep subchannel information word up to date. */ /* Keep subchannel information word up to date. */
memcpy (&sch->schib.scsw, &irb->scsw, memcpy (&sch->schib.scsw, &irb->scsw,
sizeof (irb->scsw)); sizeof (irb->scsw));
@ -683,7 +685,6 @@ do_IRQ (struct pt_regs *regs)
if (sch->driver && sch->driver->irq) if (sch->driver && sch->driver->irq)
sch->driver->irq(sch); sch->driver->irq(sch);
} }
if (sch)
spin_unlock(sch->lock); spin_unlock(sch->lock);
/* /*
* Are more interrupts pending? * Are more interrupts pending?
@ -710,8 +711,9 @@ void *cio_get_console_priv(void)
/* /*
* busy wait for the next interrupt on the console * busy wait for the next interrupt on the console
*/ */
void void wait_cons_dev(void)
wait_cons_dev (void) __releases(console_subchannel.lock)
__acquires(console_subchannel.lock)
{ {
unsigned long cr6 __attribute__ ((aligned (8))); unsigned long cr6 __attribute__ ((aligned (8)));
unsigned long save_cr6 __attribute__ ((aligned (8))); unsigned long save_cr6 __attribute__ ((aligned (8)));

View file

@ -100,6 +100,7 @@ extern int cio_modify (struct subchannel *);
int cio_create_sch_lock(struct subchannel *); int cio_create_sch_lock(struct subchannel *);
void do_adapter_IO(void); void do_adapter_IO(void);
void do_IRQ(struct pt_regs *);
/* Use with care. */ /* Use with care. */
#ifdef CONFIG_CCW_CONSOLE #ifdef CONFIG_CCW_CONSOLE

View file

@ -533,6 +533,12 @@ void css_schedule_eval_all(void)
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
} }
void css_wait_for_slow_path(void)
{
flush_workqueue(ccw_device_notify_work);
flush_workqueue(slow_path_wq);
}
/* Reprobe subchannel if unregistered. */ /* Reprobe subchannel if unregistered. */
static int reprobe_subchannel(struct subchannel_id schid, void *data) static int reprobe_subchannel(struct subchannel_id schid, void *data)
{ {
@ -683,10 +689,14 @@ css_cm_enable_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct channel_subsystem *css = to_css(dev); struct channel_subsystem *css = to_css(dev);
int ret;
if (!css) if (!css)
return 0; return 0;
return sprintf(buf, "%x\n", css->cm_enabled); mutex_lock(&css->mutex);
ret = sprintf(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
} }
static ssize_t static ssize_t
@ -696,6 +706,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
struct channel_subsystem *css = to_css(dev); struct channel_subsystem *css = to_css(dev);
int ret; int ret;
mutex_lock(&css->mutex);
switch (buf[0]) { switch (buf[0]) {
case '0': case '0':
ret = css->cm_enabled ? chsc_secm(css, 0) : 0; ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
@ -706,6 +717,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
default: default:
ret = -EINVAL; ret = -EINVAL;
} }
mutex_unlock(&css->mutex);
return ret < 0 ? ret : count; return ret < 0 ? ret : count;
} }
@ -752,9 +764,11 @@ static int css_reboot_event(struct notifier_block *this,
struct channel_subsystem *css; struct channel_subsystem *css;
css = channel_subsystems[i]; css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (css->cm_enabled) if (css->cm_enabled)
if (chsc_secm(css, 0)) if (chsc_secm(css, 0))
ret = NOTIFY_BAD; ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
} }
return ret; return ret;

View file

@ -144,6 +144,7 @@ struct schib;
int css_sch_is_valid(struct schib *); int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq; extern struct workqueue_struct *slow_path_wq;
void css_wait_for_slow_path(void);
extern struct attribute_group *subch_attr_groups[]; extern struct attribute_group *subch_attr_groups[];
#endif #endif

View file

@ -577,7 +577,6 @@ static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(online, 0644, online_show, online_store);
extern struct device_attribute dev_attr_cmb_enable;
static DEVICE_ATTR(availability, 0444, available_show, NULL); static DEVICE_ATTR(availability, 0444, available_show, NULL);
static struct attribute * subch_attrs[] = { static struct attribute * subch_attrs[] = {

View file

@ -127,4 +127,5 @@ extern struct bus_type ccw_bus_type;
void retry_set_schib(struct ccw_device *cdev); void retry_set_schib(struct ccw_device *cdev);
void cmf_retry_copy_block(struct ccw_device *); void cmf_retry_copy_block(struct ccw_device *);
int cmf_reenable(struct ccw_device *); int cmf_reenable(struct ccw_device *);
extern struct device_attribute dev_attr_cmb_enable;
#endif #endif

View file

@ -193,8 +193,15 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EACCES; return -EACCES;
} }
ret = cio_start_key (sch, cpa, lpm, key); ret = cio_start_key (sch, cpa, lpm, key);
if (ret == 0) switch (ret) {
case 0:
cdev->private->intparm = intparm; cdev->private->intparm = intparm;
break;
case -EACCES:
case -ENODEV:
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
break;
}
return ret; return ret;
} }

View file

@ -62,7 +62,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
stsch (sch->schid, &sch->schib); stsch (sch->schid, &sch->schib);
CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
"not operational \n", __FUNCTION__, "not operational \n", __func__,
sch->schid.ssid, sch->schid.sch_no, sch->schid.ssid, sch->schid.sch_no,
sch->schib.pmcw.pnom); sch->schib.pmcw.pnom);
@ -312,6 +312,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
{ {
struct subchannel *sch; struct subchannel *sch;
struct ccw1 *sense_ccw; struct ccw1 *sense_ccw;
int rc;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
@ -337,7 +338,10 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
/* Reset internal retry indication. */ /* Reset internal retry indication. */
cdev->private->flags.intretry = 0; cdev->private->flags.intretry = 0;
return cio_start(sch, sense_ccw, 0xff); rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
return rc;
} }
/* /*

View file

@ -1399,7 +1399,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
* q->dev_st_chg_ind is the indicator, be it shared or not. * q->dev_st_chg_ind is the indicator, be it shared or not.
* only clear it, if indicator is non-shared * only clear it, if indicator is non-shared
*/ */
if (!spare_ind_was_set) if (q->dev_st_chg_ind != &spare_indicator)
tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
if (q->hydra_gives_outbound_pcis) { if (q->hydra_gives_outbound_pcis) {
@ -2217,9 +2217,78 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
return cc; return cc;
} }
static int
qdio_get_ssqd_information(struct subchannel_id *schid,
struct qdio_chsc_ssqd **ssqd_area)
{
int result;
QDIO_DBF_TEXT0(0, setup, "getssqd");
*ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
if (!ssqd_area) {
QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
schid->sch_no);
return -ENOMEM;
}
(*ssqd_area)->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
(*ssqd_area)->first_sch = schid->sch_no;
(*ssqd_area)->last_sch = schid->sch_no;
(*ssqd_area)->ssid = schid->ssid;
result = chsc(*ssqd_area);
if (result) {
QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
result, schid->ssid, schid->sch_no);
goto out;
}
if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
(*ssqd_area)->response.code,
schid->ssid, schid->sch_no);
goto out;
}
if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
((*ssqd_area)->sch != schid->sch_no)) {
QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
"using all SIGAs.\n",
schid->ssid, schid->sch_no);
goto out;
}
return 0;
out:
return -EINVAL;
}
int
qdio_get_ssqd_pct(struct ccw_device *cdev)
{
struct qdio_chsc_ssqd *ssqd_area;
struct subchannel_id schid;
char dbf_text[15];
int rc;
int pct = 0;
QDIO_DBF_TEXT0(0, setup, "getpct");
schid = ccw_device_get_subchannel_id(cdev);
rc = qdio_get_ssqd_information(&schid, &ssqd_area);
if (!rc)
pct = (int)ssqd_area->pct;
if (rc != -ENOMEM)
mempool_free(ssqd_area, qdio_mempool_scssc);
sprintf(dbf_text, "pct: %d", pct);
QDIO_DBF_TEXT2(0, setup, dbf_text);
return pct;
}
EXPORT_SYMBOL(qdio_get_ssqd_pct);
static void static void
qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
unsigned long token)
{ {
struct qdio_q *q; struct qdio_q *q;
int i; int i;
@ -2227,7 +2296,7 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
char dbf_text[15]; char dbf_text[15];
/*check if QEBSM is disabled */ /*check if QEBSM is disabled */
if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) { if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
irq_ptr->is_qebsm = 0; irq_ptr->is_qebsm = 0;
irq_ptr->sch_token = 0; irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
@ -2256,102 +2325,27 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
} }
static void static void
qdio_get_ssqd_information(struct qdio_irq *irq_ptr) qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
{ {
int result; int rc;
unsigned char qdioac; struct qdio_chsc_ssqd *ssqd_area;
struct {
struct chsc_header request;
u16 reserved1:10;
u16 ssid:2;
u16 fmt:4;
u16 first_sch;
u16 reserved2;
u16 last_sch;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 reserved5;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 reserved7;
u8 icnt;
u8 reserved8;
u8 ocnt;
u8 reserved9;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
} *ssqd_area;
QDIO_DBF_TEXT0(0,setup,"getssqd"); QDIO_DBF_TEXT0(0,setup,"getssqd");
qdioac = 0; irq_ptr->qdioac = 0;
ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
if (!ssqd_area) { if (rc) {
QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
"SIGAs for sch x%x.\n", irq_ptr->schid.sch_no); irq_ptr->schid.sch_no);
irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0; irq_ptr->is_qebsm = 0;
irq_ptr->sch_token = 0; } else
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; irq_ptr->qdioac = ssqd_area->qdioac1;
return;
}
ssqd_area->request = (struct chsc_header) { qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
.length = 0x0010, if (rc != -ENOMEM)
.code = 0x0024,
};
ssqd_area->first_sch = irq_ptr->schid.sch_no;
ssqd_area->last_sch = irq_ptr->schid.sch_no;
ssqd_area->ssid = irq_ptr->schid.ssid;
result = chsc(ssqd_area);
if (result) {
QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
"SIGAs for sch 0.%x.%x.\n", result,
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
goto out;
}
if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("response upon checking SIGA needs " \
"is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
ssqd_area->response.code,
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
goto out;
}
if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
(ssqd_area->sch != irq_ptr->schid.sch_no)) {
QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
"using all SIGAs.\n",
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
irq_ptr->is_qebsm = 0;
goto out;
}
qdioac = ssqd_area->qdioac1;
out:
qdio_check_subchannel_qebsm(irq_ptr, qdioac,
ssqd_area->sch_token);
mempool_free(ssqd_area, qdio_mempool_scssc); mempool_free(ssqd_area, qdio_mempool_scssc);
irq_ptr->qdioac = qdioac;
} }
static unsigned int static unsigned int
@ -3227,7 +3221,7 @@ qdio_establish(struct qdio_initialize *init_data)
return -EIO; return -EIO;
} }
qdio_get_ssqd_information(irq_ptr); qdio_get_ssqd_siga(irq_ptr);
/* if this gets set once, we're running under VM and can omit SVSes */ /* if this gets set once, we're running under VM and can omit SVSes */
if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
omit_svs=1; omit_svs=1;

View file

@ -406,6 +406,34 @@ do_clear_global_summary(void)
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
struct qdio_chsc_ssqd {
struct chsc_header request;
u16 reserved1:10;
u16 ssid:2;
u16 fmt:4;
u16 first_sch;
u16 reserved2;
u16 last_sch;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 reserved5;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 pct;
u8 icnt;
u8 reserved7;
u8 ocnt;
u8 reserved8;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
};
struct qdio_perf_stats { struct qdio_perf_stats {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
atomic64_t tl_runs; atomic64_t tl_runs;

View file

@ -45,7 +45,7 @@ static int ap_poll_thread_start(void);
static void ap_poll_thread_stop(void); static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long); static void ap_request_timeout(unsigned long);
/** /*
* Module description. * Module description.
*/ */
MODULE_AUTHOR("IBM Corporation"); MODULE_AUTHOR("IBM Corporation");
@ -53,7 +53,7 @@ MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
"Copyright 2006 IBM Corporation"); "Copyright 2006 IBM Corporation");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
/** /*
* Module parameter * Module parameter
*/ */
int ap_domain_index = -1; /* Adjunct Processor Domain Index */ int ap_domain_index = -1; /* Adjunct Processor Domain Index */
@ -69,7 +69,7 @@ static struct device *ap_root_device = NULL;
static DEFINE_SPINLOCK(ap_device_lock); static DEFINE_SPINLOCK(ap_device_lock);
static LIST_HEAD(ap_device_list); static LIST_HEAD(ap_device_list);
/** /*
* Workqueue & timer for bus rescan. * Workqueue & timer for bus rescan.
*/ */
static struct workqueue_struct *ap_work_queue; static struct workqueue_struct *ap_work_queue;
@ -77,7 +77,7 @@ static struct timer_list ap_config_timer;
static int ap_config_time = AP_CONFIG_TIME; static int ap_config_time = AP_CONFIG_TIME;
static DECLARE_WORK(ap_config_work, ap_scan_bus); static DECLARE_WORK(ap_config_work, ap_scan_bus);
/** /*
* Tasklet & timer for AP request polling. * Tasklet & timer for AP request polling.
*/ */
static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0); static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
@ -88,9 +88,9 @@ static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex); static DEFINE_MUTEX(ap_poll_thread_mutex);
/** /**
* Test if ap instructions are available. * ap_intructions_available() - Test if AP instructions are available.
* *
* Returns 0 if the ap instructions are installed. * Returns 0 if the AP instructions are installed.
*/ */
static inline int ap_instructions_available(void) static inline int ap_instructions_available(void)
{ {
@ -108,12 +108,12 @@ static inline int ap_instructions_available(void)
} }
/** /**
* Test adjunct processor queue. * ap_test_queue(): Test adjunct processor queue.
* @qid: the ap queue number * @qid: The AP queue number
* @queue_depth: pointer to queue depth value * @queue_depth: Pointer to queue depth value
* @device_type: pointer to device type value * @device_type: Pointer to device type value
* *
* Returns ap queue status structure. * Returns AP queue status structure.
*/ */
static inline struct ap_queue_status static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
@ -130,10 +130,10 @@ ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
} }
/** /**
* Reset adjunct processor queue. * ap_reset_queue(): Reset adjunct processor queue.
* @qid: the ap queue number * @qid: The AP queue number
* *
* Returns ap queue status structure. * Returns AP queue status structure.
*/ */
static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
{ {
@ -148,16 +148,14 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
} }
/** /**
* Send message to adjunct processor queue. * __ap_send(): Send message to adjunct processor queue.
* @qid: the ap queue number * @qid: The AP queue number
* @psmid: the program supplied message identifier * @psmid: The program supplied message identifier
* @msg: the message text * @msg: The message text
* @length: the message length * @length: The message length
*
* Returns ap queue status structure.
* *
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1. * Condition code 1 on NQAP can't happen because the L bit is 1.
*
* Condition code 2 on NQAP also means the send is incomplete, * Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated. * because a segment boundary was reached. The NQAP is repeated.
*/ */
@ -198,23 +196,20 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
} }
EXPORT_SYMBOL(ap_send); EXPORT_SYMBOL(ap_send);
/* /**
* Receive message from adjunct processor queue. * __ap_recv(): Receive message from adjunct processor queue.
* @qid: the ap queue number * @qid: The AP queue number
* @psmid: pointer to program supplied message identifier * @psmid: Pointer to program supplied message identifier
* @msg: the message text * @msg: The message text
* @length: the message length * @length: The message length
*
* Returns ap queue status structure.
* *
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place * Condition code 1 on DQAP means the receive has taken place
* but only partially. The response is incomplete, hence the * but only partially. The response is incomplete, hence the
* DQAP is repeated. * DQAP is repeated.
*
* Condition code 2 on DQAP also means the receive is incomplete, * Condition code 2 on DQAP also means the receive is incomplete,
* this time because a segment boundary was reached. Again, the * this time because a segment boundary was reached. Again, the
* DQAP is repeated. * DQAP is repeated.
*
* Note that gpr2 is used by the DQAP instruction to keep track of * Note that gpr2 is used by the DQAP instruction to keep track of
* any 'residual' length, in case the instruction gets interrupted. * any 'residual' length, in case the instruction gets interrupted.
* Hence it gets zeroed before the instruction. * Hence it gets zeroed before the instruction.
@ -263,11 +258,12 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
EXPORT_SYMBOL(ap_recv); EXPORT_SYMBOL(ap_recv);
/** /**
* Check if an AP queue is available. The test is repeated for * ap_query_queue(): Check if an AP queue is available.
* AP_MAX_RESET times. * @qid: The AP queue number
* @qid: the ap queue number * @queue_depth: Pointer to queue depth value
* @queue_depth: pointer to queue depth value * @device_type: Pointer to device type value
* @device_type: pointer to device type value *
* The test is repeated for AP_MAX_RESET times.
*/ */
static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
{ {
@ -308,8 +304,10 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
} }
/** /**
* ap_init_queue(): Reset an AP queue.
* @qid: The AP queue number
*
* Reset an AP queue and wait for it to become available again. * Reset an AP queue and wait for it to become available again.
* @qid: the ap queue number
*/ */
static int ap_init_queue(ap_qid_t qid) static int ap_init_queue(ap_qid_t qid)
{ {
@ -346,7 +344,10 @@ static int ap_init_queue(ap_qid_t qid)
} }
/** /**
* Arm request timeout if a AP device was idle and a new request is submitted. * ap_increase_queue_count(): Arm request timeout.
* @ap_dev: Pointer to an AP device.
*
* Arm request timeout if an AP device was idle and a new request is submitted.
*/ */
static void ap_increase_queue_count(struct ap_device *ap_dev) static void ap_increase_queue_count(struct ap_device *ap_dev)
{ {
@ -360,7 +361,10 @@ static void ap_increase_queue_count(struct ap_device *ap_dev)
} }
/** /**
* AP device is still alive, re-schedule request timeout if there are still * ap_decrease_queue_count(): Decrease queue count.
* @ap_dev: Pointer to an AP device.
*
* If AP device is still alive, re-schedule request timeout if there are still
* pending requests. * pending requests.
*/ */
static void ap_decrease_queue_count(struct ap_device *ap_dev) static void ap_decrease_queue_count(struct ap_device *ap_dev)
@ -371,7 +375,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
if (ap_dev->queue_count > 0) if (ap_dev->queue_count > 0)
mod_timer(&ap_dev->timeout, jiffies + timeout); mod_timer(&ap_dev->timeout, jiffies + timeout);
else else
/** /*
* The timeout timer should to be disabled now - since * The timeout timer should to be disabled now - since
* del_timer_sync() is very expensive, we just tell via the * del_timer_sync() is very expensive, we just tell via the
* reset flag to ignore the pending timeout timer. * reset flag to ignore the pending timeout timer.
@ -379,7 +383,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
ap_dev->reset = AP_RESET_IGNORE; ap_dev->reset = AP_RESET_IGNORE;
} }
/** /*
* AP device related attributes. * AP device related attributes.
*/ */
static ssize_t ap_hwtype_show(struct device *dev, static ssize_t ap_hwtype_show(struct device *dev,
@ -433,6 +437,10 @@ static struct attribute_group ap_dev_attr_group = {
}; };
/** /**
* ap_bus_match()
* @dev: Pointer to device
* @drv: Pointer to device_driver
*
* AP bus driver registration/unregistration. * AP bus driver registration/unregistration.
*/ */
static int ap_bus_match(struct device *dev, struct device_driver *drv) static int ap_bus_match(struct device *dev, struct device_driver *drv)
@ -441,7 +449,7 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
struct ap_driver *ap_drv = to_ap_drv(drv); struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id; struct ap_device_id *id;
/** /*
* Compare device type of the device with the list of * Compare device type of the device with the list of
* supported types of the device_driver. * supported types of the device_driver.
*/ */
@ -455,8 +463,12 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
} }
/** /**
* uevent function for AP devices. It sets up a single environment * ap_uevent(): Uevent function for AP devices.
* variable DEV_TYPE which contains the hardware device type. * @dev: Pointer to device
* @env: Pointer to kobj_uevent_env
*
* It sets up a single environment variable DEV_TYPE which contains the
* hardware device type.
*/ */
static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
{ {
@ -500,8 +512,10 @@ static int ap_device_probe(struct device *dev)
} }
/** /**
* __ap_flush_queue(): Flush requests.
* @ap_dev: Pointer to the AP device
*
* Flush all requests from the request/pending queue of an AP device. * Flush all requests from the request/pending queue of an AP device.
* @ap_dev: pointer to the AP device.
*/ */
static void __ap_flush_queue(struct ap_device *ap_dev) static void __ap_flush_queue(struct ap_device *ap_dev)
{ {
@ -565,7 +579,7 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
} }
EXPORT_SYMBOL(ap_driver_unregister); EXPORT_SYMBOL(ap_driver_unregister);
/** /*
* AP bus attributes. * AP bus attributes.
*/ */
static ssize_t ap_domain_show(struct bus_type *bus, char *buf) static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
@ -630,14 +644,16 @@ static struct bus_attribute *const ap_bus_attrs[] = {
}; };
/** /**
* Pick one of the 16 ap domains. * ap_select_domain(): Select an AP domain.
*
* Pick one of the 16 AP domains.
*/ */
static int ap_select_domain(void) static int ap_select_domain(void)
{ {
int queue_depth, device_type, count, max_count, best_domain; int queue_depth, device_type, count, max_count, best_domain;
int rc, i, j; int rc, i, j;
/** /*
* We want to use a single domain. Either the one specified with * We want to use a single domain. Either the one specified with
* the "domain=" parameter or the domain with the maximum number * the "domain=" parameter or the domain with the maximum number
* of devices. * of devices.
@ -669,8 +685,10 @@ static int ap_select_domain(void)
} }
/** /**
* Find the device type if query queue returned a device type of 0. * ap_probe_device_type(): Find the device type of an AP.
* @ap_dev: pointer to the AP device. * @ap_dev: pointer to the AP device.
*
* Find the device type if query queue returned a device type of 0.
*/ */
static int ap_probe_device_type(struct ap_device *ap_dev) static int ap_probe_device_type(struct ap_device *ap_dev)
{ {
@ -764,7 +782,11 @@ out:
} }
/** /**
* Scan the ap bus for new devices. * __ap_scan_bus(): Scan the AP bus.
* @dev: Pointer to device
* @data: Pointer to data
*
* Scan the AP bus for new devices.
*/ */
static int __ap_scan_bus(struct device *dev, void *data) static int __ap_scan_bus(struct device *dev, void *data)
{ {
@ -867,6 +889,8 @@ ap_config_timeout(unsigned long ptr)
} }
/** /**
* ap_schedule_poll_timer(): Schedule poll timer.
*
* Set up the timer to run the poll tasklet * Set up the timer to run the poll tasklet
*/ */
static inline void ap_schedule_poll_timer(void) static inline void ap_schedule_poll_timer(void)
@ -877,10 +901,11 @@ static inline void ap_schedule_poll_timer(void)
} }
/** /**
* Receive pending reply messages from an AP device. * ap_poll_read(): Receive pending reply messages from an AP device.
* @ap_dev: pointer to the AP device * @ap_dev: pointer to the AP device
* @flags: pointer to control flags, bit 2^0 is set if another poll is * @flags: pointer to control flags, bit 2^0 is set if another poll is
* required, bit 2^1 is set if the poll timer needs to get armed * required, bit 2^1 is set if the poll timer needs to get armed
*
* Returns 0 if the device is still present, -ENODEV if not. * Returns 0 if the device is still present, -ENODEV if not.
*/ */
static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
@ -925,10 +950,11 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
} }
/** /**
* Send messages from the request queue to an AP device. * ap_poll_write(): Send messages from the request queue to an AP device.
* @ap_dev: pointer to the AP device * @ap_dev: pointer to the AP device
* @flags: pointer to control flags, bit 2^0 is set if another poll is * @flags: pointer to control flags, bit 2^0 is set if another poll is
* required, bit 2^1 is set if the poll timer needs to get armed * required, bit 2^1 is set if the poll timer needs to get armed
*
* Returns 0 if the device is still present, -ENODEV if not. * Returns 0 if the device is still present, -ENODEV if not.
*/ */
static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
@ -968,11 +994,13 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
} }
/** /**
* Poll AP device for pending replies and send new messages. If either * ap_poll_queue(): Poll AP device for pending replies and send new messages.
* ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
* @ap_dev: pointer to the bus device * @ap_dev: pointer to the bus device
* @flags: pointer to control flags, bit 2^0 is set if another poll is * @flags: pointer to control flags, bit 2^0 is set if another poll is
* required, bit 2^1 is set if the poll timer needs to get armed * required, bit 2^1 is set if the poll timer needs to get armed
*
* Poll AP device for pending replies and send new messages. If either
* ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
* Returns 0. * Returns 0.
*/ */
static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
@ -986,9 +1014,11 @@ static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
} }
/** /**
* Queue a message to a device. * __ap_queue_message(): Queue a message to a device.
* @ap_dev: pointer to the AP device * @ap_dev: pointer to the AP device
* @ap_msg: the message to be queued * @ap_msg: the message to be queued
*
* Queue a message to a device. Returns 0 if successful.
*/ */
static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
{ {
@ -1055,12 +1085,14 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
EXPORT_SYMBOL(ap_queue_message); EXPORT_SYMBOL(ap_queue_message);
/** /**
* ap_cancel_message(): Cancel a crypto request.
* @ap_dev: The AP device that has the message queued
* @ap_msg: The message that is to be removed
*
* Cancel a crypto request. This is done by removing the request * Cancel a crypto request. This is done by removing the request
* from the devive pendingq or requestq queue. Note that the * from the device pending or request queue. Note that the
* request stays on the AP queue. When it finishes the message * request stays on the AP queue. When it finishes the message
* reply will be discarded because the psmid can't be found. * reply will be discarded because the psmid can't be found.
* @ap_dev: AP device that has the message queued
* @ap_msg: the message that is to be removed
*/ */
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
{ {
@ -1082,7 +1114,10 @@ void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
EXPORT_SYMBOL(ap_cancel_message); EXPORT_SYMBOL(ap_cancel_message);
/** /**
* AP receive polling for finished AP requests * ap_poll_timeout(): AP receive polling for finished AP requests.
* @unused: Unused variable.
*
* Schedules the AP tasklet.
*/ */
static void ap_poll_timeout(unsigned long unused) static void ap_poll_timeout(unsigned long unused)
{ {
@ -1090,6 +1125,9 @@ static void ap_poll_timeout(unsigned long unused)
} }
/** /**
* ap_reset(): Reset a not responding AP device.
* @ap_dev: Pointer to the AP device
*
* Reset a not responding AP device and move all requests from the * Reset a not responding AP device and move all requests from the
* pending queue to the request queue. * pending queue to the request queue.
*/ */
@ -1108,11 +1146,6 @@ static void ap_reset(struct ap_device *ap_dev)
ap_dev->unregistered = 1; ap_dev->unregistered = 1;
} }
/**
* Poll all AP devices on the bus in a round robin fashion. Continue
* polling until bit 2^0 of the control flags is not set. If bit 2^1
* of the control flags has been set arm the poll timer.
*/
static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
{ {
spin_lock(&ap_dev->lock); spin_lock(&ap_dev->lock);
@ -1126,6 +1159,14 @@ static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
return 0; return 0;
} }
/**
* ap_poll_all(): Poll all AP devices.
* @dummy: Unused variable
*
* Poll all AP devices on the bus in a round robin fashion. Continue
* polling until bit 2^0 of the control flags is not set. If bit 2^1
* of the control flags has been set arm the poll timer.
*/
static void ap_poll_all(unsigned long dummy) static void ap_poll_all(unsigned long dummy)
{ {
unsigned long flags; unsigned long flags;
@ -1144,6 +1185,9 @@ static void ap_poll_all(unsigned long dummy)
} }
/** /**
* ap_poll_thread(): Thread that polls for finished requests.
* @data: Unused pointer
*
* AP bus poll thread. The purpose of this thread is to poll for * AP bus poll thread. The purpose of this thread is to poll for
* finished requests in a loop if there is a "free" cpu - that is * finished requests in a loop if there is a "free" cpu - that is
* a cpu that doesn't have anything better to do. The polling stops * a cpu that doesn't have anything better to do. The polling stops
@ -1213,7 +1257,10 @@ static void ap_poll_thread_stop(void)
} }
/** /**
* Handling of request timeouts * ap_request_timeout(): Handling of request timeouts
* @data: Holds the AP device.
*
* Handles request timeouts.
*/ */
static void ap_request_timeout(unsigned long data) static void ap_request_timeout(unsigned long data)
{ {
@ -1246,7 +1293,9 @@ static struct reset_call ap_reset_call = {
}; };
/** /**
* The module initialization code. * ap_module_init(): The module initialization code.
*
* Initializes the module.
*/ */
int __init ap_module_init(void) int __init ap_module_init(void)
{ {
@ -1288,7 +1337,7 @@ int __init ap_module_init(void)
if (ap_select_domain() == 0) if (ap_select_domain() == 0)
ap_scan_bus(NULL); ap_scan_bus(NULL);
/* Setup the ap bus rescan timer. */ /* Setup the AP bus rescan timer. */
init_timer(&ap_config_timer); init_timer(&ap_config_timer);
ap_config_timer.function = ap_config_timeout; ap_config_timer.function = ap_config_timeout;
ap_config_timer.data = 0; ap_config_timer.data = 0;
@ -1325,7 +1374,9 @@ static int __ap_match_all(struct device *dev, void *data)
} }
/** /**
* The module termination code * ap_modules_exit(): The module termination code
*
* Terminates the module.
*/ */
void ap_module_exit(void) void ap_module_exit(void)
{ {

View file

@ -50,6 +50,15 @@ typedef unsigned int ap_qid_t;
#define AP_QID_QUEUE(_qid) ((_qid) & 15) #define AP_QID_QUEUE(_qid) ((_qid) & 15)
/** /**
* structy ap_queue_status - Holds the AP queue status.
* @queue_empty: Shows if queue is empty
* @replies_waiting: Waiting replies
* @queue_full: Is 1 if the queue is full
* @pad: A 4 bit pad
* @int_enabled: Shows if interrupts are enabled for the AP
* @response_conde: Holds the 8 bit response code
* @pad2: A 16 bit pad
*
* The ap queue status word is returned by all three AP functions * The ap queue status word is returned by all three AP functions
* (PQAP, NQAP and DQAP). There's a set of flags in the first * (PQAP, NQAP and DQAP). There's a set of flags in the first
* byte, followed by a 1 byte response code. * byte, followed by a 1 byte response code.
@ -75,7 +84,7 @@ struct ap_queue_status {
#define AP_RESPONSE_NO_FIRST_PART 0x13 #define AP_RESPONSE_NO_FIRST_PART 0x13
#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 #define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
/** /*
* Known device types * Known device types
*/ */
#define AP_DEVICE_TYPE_PCICC 3 #define AP_DEVICE_TYPE_PCICC 3
@ -84,7 +93,7 @@ struct ap_queue_status {
#define AP_DEVICE_TYPE_CEX2A 6 #define AP_DEVICE_TYPE_CEX2A 6
#define AP_DEVICE_TYPE_CEX2C 7 #define AP_DEVICE_TYPE_CEX2C 7
/** /*
* AP reset flag states * AP reset flag states
*/ */
#define AP_RESET_IGNORE 0 /* request timeout will be ignored */ #define AP_RESET_IGNORE 0 /* request timeout will be ignored */
@ -152,7 +161,7 @@ struct ap_message {
.dev_type=(dt), \ .dev_type=(dt), \
.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
/** /*
* Note: don't use ap_send/ap_recv after using ap_queue_message * Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get * for the first time. Otherwise the ap message queue will get
* confused. * confused.

View file

@ -36,10 +36,11 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/hw_random.h>
#include "zcrypt_api.h" #include "zcrypt_api.h"
/** /*
* Module description. * Module description.
*/ */
MODULE_AUTHOR("IBM Corporation"); MODULE_AUTHOR("IBM Corporation");
@ -52,7 +53,10 @@ static LIST_HEAD(zcrypt_device_list);
static int zcrypt_device_count = 0; static int zcrypt_device_count = 0;
static atomic_t zcrypt_open_count = ATOMIC_INIT(0); static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
/** static int zcrypt_rng_device_add(void);
static void zcrypt_rng_device_remove(void);
/*
* Device attributes common for all crypto devices. * Device attributes common for all crypto devices.
*/ */
static ssize_t zcrypt_type_show(struct device *dev, static ssize_t zcrypt_type_show(struct device *dev,
@ -99,6 +103,9 @@ static struct attribute_group zcrypt_device_attr_group = {
}; };
/** /**
* __zcrypt_increase_preference(): Increase preference of a crypto device.
* @zdev: Pointer the crypto device
*
* Move the device towards the head of the device list. * Move the device towards the head of the device list.
* Need to be called while holding the zcrypt device list lock. * Need to be called while holding the zcrypt device list lock.
* Note: cards with speed_rating of 0 are kept at the end of the list. * Note: cards with speed_rating of 0 are kept at the end of the list.
@ -125,6 +132,9 @@ static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
} }
/** /**
* __zcrypt_decrease_preference(): Decrease preference of a crypto device.
* @zdev: Pointer to a crypto device.
*
* Move the device towards the tail of the device list. * Move the device towards the tail of the device list.
* Need to be called while holding the zcrypt device list lock. * Need to be called while holding the zcrypt device list lock.
* Note: cards with speed_rating of 0 are kept at the end of the list. * Note: cards with speed_rating of 0 are kept at the end of the list.
@ -198,7 +208,10 @@ void zcrypt_device_free(struct zcrypt_device *zdev)
EXPORT_SYMBOL(zcrypt_device_free); EXPORT_SYMBOL(zcrypt_device_free);
/** /**
* Register a crypto device. * zcrypt_device_register() - Register a crypto device.
* @zdev: Pointer to a crypto device
*
* Register a crypto device. Returns 0 if successful.
*/ */
int zcrypt_device_register(struct zcrypt_device *zdev) int zcrypt_device_register(struct zcrypt_device *zdev)
{ {
@ -216,16 +229,37 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
__zcrypt_increase_preference(zdev); __zcrypt_increase_preference(zdev);
zcrypt_device_count++; zcrypt_device_count++;
spin_unlock_bh(&zcrypt_device_lock); spin_unlock_bh(&zcrypt_device_lock);
if (zdev->ops->rng) {
rc = zcrypt_rng_device_add();
if (rc)
goto out_unregister;
}
return 0;
out_unregister:
spin_lock_bh(&zcrypt_device_lock);
zcrypt_device_count--;
list_del_init(&zdev->list);
spin_unlock_bh(&zcrypt_device_lock);
sysfs_remove_group(&zdev->ap_dev->device.kobj,
&zcrypt_device_attr_group);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
out: out:
return rc; return rc;
} }
EXPORT_SYMBOL(zcrypt_device_register); EXPORT_SYMBOL(zcrypt_device_register);
/** /**
* zcrypt_device_unregister(): Unregister a crypto device.
* @zdev: Pointer to crypto device
*
* Unregister a crypto device. * Unregister a crypto device.
*/ */
void zcrypt_device_unregister(struct zcrypt_device *zdev) void zcrypt_device_unregister(struct zcrypt_device *zdev)
{ {
if (zdev->ops->rng)
zcrypt_rng_device_remove();
spin_lock_bh(&zcrypt_device_lock); spin_lock_bh(&zcrypt_device_lock);
zcrypt_device_count--; zcrypt_device_count--;
list_del_init(&zdev->list); list_del_init(&zdev->list);
@ -238,7 +272,9 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev)
EXPORT_SYMBOL(zcrypt_device_unregister); EXPORT_SYMBOL(zcrypt_device_unregister);
/** /**
* zcrypt_read is not be supported beyond zcrypt 1.3.1 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
*/ */
static ssize_t zcrypt_read(struct file *filp, char __user *buf, static ssize_t zcrypt_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos) size_t count, loff_t *f_pos)
@ -247,6 +283,8 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
} }
/** /**
* zcrypt_write(): Not allowed.
*
* Write is is not allowed * Write is is not allowed
*/ */
static ssize_t zcrypt_write(struct file *filp, const char __user *buf, static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
@ -256,7 +294,9 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
} }
/** /**
* Device open/close functions to count number of users. * zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
*/ */
static int zcrypt_open(struct inode *inode, struct file *filp) static int zcrypt_open(struct inode *inode, struct file *filp)
{ {
@ -264,13 +304,18 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
return 0; return 0;
} }
/**
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
*/
static int zcrypt_release(struct inode *inode, struct file *filp) static int zcrypt_release(struct inode *inode, struct file *filp)
{ {
atomic_dec(&zcrypt_open_count); atomic_dec(&zcrypt_open_count);
return 0; return 0;
} }
/** /*
* zcrypt ioctls. * zcrypt ioctls.
*/ */
static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
@ -280,7 +325,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
if (mex->outputdatalength < mex->inputdatalength) if (mex->outputdatalength < mex->inputdatalength)
return -EINVAL; return -EINVAL;
/** /*
* As long as outputdatalength is big enough, we can set the * As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the * outputdatalength equal to the inputdatalength, since that is the
* number of bytes we will copy in any case * number of bytes we will copy in any case
@ -326,7 +371,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
if (crt->outputdatalength < crt->inputdatalength || if (crt->outputdatalength < crt->inputdatalength ||
(crt->inputdatalength & 1)) (crt->inputdatalength & 1))
return -EINVAL; return -EINVAL;
/** /*
* As long as outputdatalength is big enough, we can set the * As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the * outputdatalength equal to the inputdatalength, since that is the
* number of bytes we will copy in any case * number of bytes we will copy in any case
@ -343,7 +388,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
zdev->max_mod_size < crt->inputdatalength) zdev->max_mod_size < crt->inputdatalength)
continue; continue;
if (zdev->short_crt && crt->inputdatalength > 240) { if (zdev->short_crt && crt->inputdatalength > 240) {
/** /*
* Check inputdata for leading zeros for cards * Check inputdata for leading zeros for cards
* that can't handle np_prime, bp_key, or * that can't handle np_prime, bp_key, or
* u_mult_inv > 128 bytes. * u_mult_inv > 128 bytes.
@ -359,7 +404,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
copy_from_user(&z3, crt->u_mult_inv, len)) copy_from_user(&z3, crt->u_mult_inv, len))
return -EFAULT; return -EFAULT;
copied = 1; copied = 1;
/** /*
* We have to restart device lookup - * We have to restart device lookup -
* the device list may have changed by now. * the device list may have changed by now.
*/ */
@ -427,6 +472,37 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
return -ENODEV; return -ENODEV;
} }
static long zcrypt_rng(char *buffer)
{
struct zcrypt_device *zdev;
int rc;
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) {
if (!zdev->online || !zdev->ops->rng)
continue;
zcrypt_device_get(zdev);
get_device(&zdev->ap_dev->device);
zdev->request_count++;
__zcrypt_decrease_preference(zdev);
if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
spin_unlock_bh(&zcrypt_device_lock);
rc = zdev->ops->rng(zdev, buffer);
spin_lock_bh(&zcrypt_device_lock);
module_put(zdev->ap_dev->drv->driver.owner);
} else
rc = -EAGAIN;
zdev->request_count--;
__zcrypt_increase_preference(zdev);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
spin_unlock_bh(&zcrypt_device_lock);
return rc;
}
spin_unlock_bh(&zcrypt_device_lock);
return -ENODEV;
}
static void zcrypt_status_mask(char status[AP_DEVICES]) static void zcrypt_status_mask(char status[AP_DEVICES])
{ {
struct zcrypt_device *zdev; struct zcrypt_device *zdev;
@ -514,6 +590,8 @@ static int zcrypt_count_type(int type)
} }
/** /**
* zcrypt_ica_status(): Old, depracted combi status call.
*
* Old, deprecated combi status call. * Old, deprecated combi status call.
*/ */
static long zcrypt_ica_status(struct file *filp, unsigned long arg) static long zcrypt_ica_status(struct file *filp, unsigned long arg)
@ -615,7 +693,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
(int __user *) arg); (int __user *) arg);
case Z90STAT_DOMAIN_INDEX: case Z90STAT_DOMAIN_INDEX:
return put_user(ap_domain_index, (int __user *) arg); return put_user(ap_domain_index, (int __user *) arg);
/** /*
* Deprecated ioctls. Don't add another device count ioctl, * Deprecated ioctls. Don't add another device count ioctl,
* you can count them yourself in the user space with the * you can count them yourself in the user space with the
* output of the Z90STAT_STATUS_MASK ioctl. * output of the Z90STAT_STATUS_MASK ioctl.
@ -653,7 +731,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
/** /*
* ioctl32 conversion routines * ioctl32 conversion routines
*/ */
struct compat_ica_rsa_modexpo { struct compat_ica_rsa_modexpo {
@ -804,7 +882,7 @@ static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
} }
#endif #endif
/** /*
* Misc device file operations. * Misc device file operations.
*/ */
static const struct file_operations zcrypt_fops = { static const struct file_operations zcrypt_fops = {
@ -819,7 +897,7 @@ static const struct file_operations zcrypt_fops = {
.release = zcrypt_release .release = zcrypt_release
}; };
/** /*
* Misc device. * Misc device.
*/ */
static struct miscdevice zcrypt_misc_device = { static struct miscdevice zcrypt_misc_device = {
@ -828,7 +906,7 @@ static struct miscdevice zcrypt_misc_device = {
.fops = &zcrypt_fops, .fops = &zcrypt_fops,
}; };
/** /*
* Deprecated /proc entry support. * Deprecated /proc entry support.
*/ */
static struct proc_dir_entry *zcrypt_entry; static struct proc_dir_entry *zcrypt_entry;
@ -1022,7 +1100,7 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
} }
for (j = 0; j < 64 && *ptr; ptr++) { for (j = 0; j < 64 && *ptr; ptr++) {
/** /*
* '0' for no device, '1' for PCICA, '2' for PCICC, * '0' for no device, '1' for PCICA, '2' for PCICC,
* '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
* '5' for CEX2C and '6' for CEX2A' * '5' for CEX2C and '6' for CEX2A'
@ -1041,7 +1119,76 @@ out:
return count; return count;
} }
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
static DEFINE_MUTEX(zcrypt_rng_mutex);
static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
{
int rc;
/*
* We don't need locking here because the RNG API guarantees serialized
* read method calls.
*/
if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *) zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
zcrypt_rng_buffer_index = rc / sizeof *data;
}
*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
return sizeof *data;
}
static struct hwrng zcrypt_rng_dev = {
.name = "zcrypt",
.data_read = zcrypt_rng_data_read,
};
static int zcrypt_rng_device_add(void)
{
int rc = 0;
mutex_lock(&zcrypt_rng_mutex);
if (zcrypt_rng_device_count == 0) {
zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
if (!zcrypt_rng_buffer) {
rc = -ENOMEM;
goto out;
}
zcrypt_rng_buffer_index = 0;
rc = hwrng_register(&zcrypt_rng_dev);
if (rc)
goto out_free;
zcrypt_rng_device_count = 1;
} else
zcrypt_rng_device_count++;
mutex_unlock(&zcrypt_rng_mutex);
return 0;
out_free:
free_page((unsigned long) zcrypt_rng_buffer);
out:
mutex_unlock(&zcrypt_rng_mutex);
return rc;
}
static void zcrypt_rng_device_remove(void)
{
mutex_lock(&zcrypt_rng_mutex);
zcrypt_rng_device_count--;
if (zcrypt_rng_device_count == 0) {
hwrng_unregister(&zcrypt_rng_dev);
free_page((unsigned long) zcrypt_rng_buffer);
}
mutex_unlock(&zcrypt_rng_mutex);
}
/** /**
* zcrypt_api_init(): Module initialization.
*
* The module initialization code. * The module initialization code.
*/ */
int __init zcrypt_api_init(void) int __init zcrypt_api_init(void)
@ -1076,6 +1223,8 @@ out:
} }
/** /**
* zcrypt_api_exit(): Module termination.
*
* The module termination code. * The module termination code.
*/ */
void zcrypt_api_exit(void) void zcrypt_api_exit(void)

View file

@ -43,17 +43,17 @@
#define DEV_NAME "zcrypt" #define DEV_NAME "zcrypt"
#define PRINTK(fmt, args...) \ #define PRINTK(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
#define PRINTKN(fmt, args...) \ #define PRINTKN(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
#define PRINTKW(fmt, args...) \ #define PRINTKW(fmt, args...) \
printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
#define PRINTKC(fmt, args...) \ #define PRINTKC(fmt, args...) \
printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
#ifdef ZCRYPT_DEBUG #ifdef ZCRYPT_DEBUG
#define PDEBUG(fmt, args...) \ #define PDEBUG(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
#else #else
#define PDEBUG(fmt, args...) do {} while (0) #define PDEBUG(fmt, args...) do {} while (0)
#endif #endif
@ -100,6 +100,13 @@ struct ica_z90_status {
#define ZCRYPT_CEX2C 5 #define ZCRYPT_CEX2C 5
#define ZCRYPT_CEX2A 6 #define ZCRYPT_CEX2A 6
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
* and stored in a page. Be carefull when increasing this buffer due to size
* limitations for AP requests.
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
struct zcrypt_device; struct zcrypt_device;
struct zcrypt_ops { struct zcrypt_ops {
@ -107,6 +114,7 @@ struct zcrypt_ops {
long (*rsa_modexpo_crt)(struct zcrypt_device *, long (*rsa_modexpo_crt)(struct zcrypt_device *,
struct ica_rsa_modexpo_crt *); struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
long (*rng)(struct zcrypt_device *, char *);
}; };
struct zcrypt_device { struct zcrypt_device {

View file

@ -174,7 +174,7 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
key->pvtMeHdr = static_pvt_me_hdr; key->pvtMeHdr = static_pvt_me_hdr;
key->pvtMeSec = static_pvt_me_sec; key->pvtMeSec = static_pvt_me_sec;
key->pubMeSec = static_pub_me_sec; key->pubMeSec = static_pub_me_sec;
/** /*
* In a private key, the modulus doesn't appear in the public * In a private key, the modulus doesn't appear in the public
* section. So, an arbitrary public exponent of 0x010001 will be * section. So, an arbitrary public exponent of 0x010001 will be
* used. * used.
@ -338,7 +338,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
pub = (struct cca_public_sec *)(key->key_parts + key_len); pub = (struct cca_public_sec *)(key->key_parts + key_len);
*pub = static_cca_pub_sec; *pub = static_cca_pub_sec;
pub->modulus_bit_len = 8 * crt->inputdatalength; pub->modulus_bit_len = 8 * crt->inputdatalength;
/** /*
* In a private key, the modulus doesn't appear in the public * In a private key, the modulus doesn't appear in the public
* section. So, an arbitrary public exponent of 0x010001 will be * section. So, an arbitrary public exponent of 0x010001 will be
* used. * used.

View file

@ -108,7 +108,7 @@ static inline int convert_error(struct zcrypt_device *zdev,
return -EINVAL; return -EINVAL;
case REP82_ERROR_MESSAGE_TYPE: case REP82_ERROR_MESSAGE_TYPE:
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
/** /*
* To sent a message of the wrong type is a bug in the * To sent a message of the wrong type is a bug in the
* device driver. Warn about it, disable the device * device driver. Warn about it, disable the device
* and then repeat the request. * and then repeat the request.

View file

@ -42,7 +42,7 @@
#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */ #define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */ #define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
/** /*
* PCICC cards need a speed rating of 0. This keeps them at the end of * PCICC cards need a speed rating of 0. This keeps them at the end of
* the zcrypt device list (see zcrypt_api.c). PCICC cards are only * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
* used if no other cards are present because they are slow and can only * used if no other cards are present because they are slow and can only
@ -388,7 +388,7 @@ static int convert_type86(struct zcrypt_device *zdev,
reply_len = le16_to_cpu(msg->length) - 2; reply_len = le16_to_cpu(msg->length) - 2;
if (reply_len > outputdatalength) if (reply_len > outputdatalength)
return -EINVAL; return -EINVAL;
/** /*
* For all encipher requests, the length of the ciphertext (reply_len) * For all encipher requests, the length of the ciphertext (reply_len)
* will always equal the modulus length. For MEX decipher requests * will always equal the modulus length. For MEX decipher requests
* the output needs to get padded. Minimum pad size is 10. * the output needs to get padded. Minimum pad size is 10.

View file

@ -355,6 +355,55 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return 0; return 0;
} }
/**
* Prepare a type6 CPRB message for random number generation
*
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
struct ap_message *ap_msg,
unsigned random_number_length)
{
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
char function_code[2];
short int rule_length;
char rule[8];
short int verb_length;
short int key_length;
} __attribute__((packed)) *msg = ap_msg->message;
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C', 'A'},
.function_code = {'R', 'L'},
.ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
.FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
};
static struct CPRBX static_cprbx = {
.cprb_len = 0x00dc,
.cprb_ver_id = 0x02,
.func_id = {0x54, 0x32},
.req_parml = sizeof *msg - sizeof(msg->hdr) -
sizeof(msg->cprbx),
.rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
};
msg->hdr = static_type6_hdrX;
msg->hdr.FromCardLen2 = random_number_length,
msg->cprbx = static_cprbx;
msg->cprbx.rpl_datal = random_number_length,
msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
ap_msg->length = sizeof *msg;
}
/** /**
* Copy results from a type 86 ICA reply message back to user space. * Copy results from a type 86 ICA reply message back to user space.
* *
@ -452,7 +501,7 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
reply_len = msg->length - 2; reply_len = msg->length - 2;
if (reply_len > outputdatalength) if (reply_len > outputdatalength)
return -EINVAL; return -EINVAL;
/** /*
* For all encipher requests, the length of the ciphertext (reply_len) * For all encipher requests, the length of the ciphertext (reply_len)
* will always equal the modulus length. For MEX decipher requests * will always equal the modulus length. For MEX decipher requests
* the output needs to get padded. Minimum pad size is 10. * the output needs to get padded. Minimum pad size is 10.
@ -509,6 +558,26 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
return 0; return 0;
} }
static int convert_type86_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *buffer)
{
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *msg = reply->message;
char *data = reply->message;
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) {
PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
rc, rs);
return -EINVAL;
}
memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
return msg->fmt2.count2;
}
static int convert_response_ica(struct zcrypt_device *zdev, static int convert_response_ica(struct zcrypt_device *zdev,
struct ap_message *reply, struct ap_message *reply,
char __user *outputdata, char __user *outputdata,
@ -567,6 +636,31 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
} }
} }
static int convert_response_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *data)
{
struct type86x_reply *msg = reply->message;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return -EINVAL;
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zdev, reply, data);
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
}
/** /**
* This function is called from the AP bus code after a crypto request * This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply". * "msg" has finished with the reply message "reply".
@ -735,6 +829,42 @@ out_free:
return rc; return rc;
} }
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @buffer: pointer to a memory page to return random data
*/
static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
char *buffer)
{
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
int rc;
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0)
rc = convert_response_rng(zdev, &ap_msg, buffer);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
kfree(ap_msg.message);
return rc;
}
/** /**
* The crypto operations for a PCIXCC/CEX2C card. * The crypto operations for a PCIXCC/CEX2C card.
*/ */
@ -744,6 +874,13 @@ static struct zcrypt_ops zcrypt_pcixcc_ops = {
.send_cprb = zcrypt_pcixcc_send_cprb, .send_cprb = zcrypt_pcixcc_send_cprb,
}; };
static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
.rsa_modexpo = zcrypt_pcixcc_modexpo,
.rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
.send_cprb = zcrypt_pcixcc_send_cprb,
.rng = zcrypt_pcixcc_rng,
};
/** /**
* Micro-code detection function. Its sends a message to a pcixcc card * Micro-code detection function. Its sends a message to a pcixcc card
* to find out the microcode level. * to find out the microcode level.
@ -858,6 +995,58 @@ out_free:
return rc; return rc;
} }
/**
* Large random number detection function. Its sends a message to a pcixcc
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
*
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/
static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
{
struct ap_message ap_msg;
unsigned long long psmid;
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *reply;
int rc, i;
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length);
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
if (i >= 2 * HZ) {
/* Got no answer. */
rc = -ENODEV;
goto out_free;
}
reply = ap_msg.message;
if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
rc = 1;
else
rc = 0;
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/** /**
* Probe function for PCIXCC/CEX2C cards. It always accepts the AP device * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
* since the bus_match already checked the hardware type. The PCIXCC * since the bus_match already checked the hardware type. The PCIXCC
@ -874,7 +1063,6 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
if (!zdev) if (!zdev)
return -ENOMEM; return -ENOMEM;
zdev->ap_dev = ap_dev; zdev->ap_dev = ap_dev;
zdev->ops = &zcrypt_pcixcc_ops;
zdev->online = 1; zdev->online = 1;
if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) { if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
rc = zcrypt_pcixcc_mcl(ap_dev); rc = zcrypt_pcixcc_mcl(ap_dev);
@ -901,6 +1089,15 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
} }
rc = zcrypt_pcixcc_rng_supported(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
return rc;
}
if (rc)
zdev->ops = &zcrypt_pcixcc_with_rng_ops;
else
zdev->ops = &zcrypt_pcixcc_ops;
ap_dev->reply = &zdev->reply; ap_dev->reply = &zdev->reply;
ap_dev->private = zdev; ap_dev->private = zdev;
rc = zcrypt_device_register(zdev); rc = zcrypt_device_register(zdev);

File diff suppressed because it is too large Load diff

View file

@ -670,7 +670,7 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
struct netiucv_priv *privptr = netdev_priv(conn->netdev); struct netiucv_priv *privptr = netdev_priv(conn->netdev);
int rc; int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
if (!conn->netdev) { if (!conn->netdev) {
iucv_message_reject(conn->path, msg); iucv_message_reject(conn->path, msg);
@ -718,7 +718,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
struct ll_header header; struct ll_header header;
int rc; int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
if (conn && conn->netdev) if (conn && conn->netdev)
privptr = netdev_priv(conn->netdev); privptr = netdev_priv(conn->netdev);
@ -799,7 +799,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
struct netiucv_priv *privptr = netdev_priv(netdev); struct netiucv_priv *privptr = netdev_priv(netdev);
int rc; int rc;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
conn->path = path; conn->path = path;
path->msglim = NETIUCV_QUEUELEN_DEFAULT; path->msglim = NETIUCV_QUEUELEN_DEFAULT;
@ -821,7 +821,7 @@ static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
struct iucv_event *ev = arg; struct iucv_event *ev = arg;
struct iucv_path *path = ev->data; struct iucv_path *path = ev->data;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
iucv_path_sever(path, NULL); iucv_path_sever(path, NULL);
} }
@ -831,7 +831,7 @@ static void conn_action_connack(fsm_instance *fi, int event, void *arg)
struct net_device *netdev = conn->netdev; struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev); struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer); fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_IDLE); fsm_newstate(fi, CONN_STATE_IDLE);
netdev->tx_queue_len = conn->path->msglim; netdev->tx_queue_len = conn->path->msglim;
@ -842,7 +842,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
{ {
struct iucv_connection *conn = arg; struct iucv_connection *conn = arg;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer); fsm_deltimer(&conn->timer);
iucv_path_sever(conn->path, NULL); iucv_path_sever(conn->path, NULL);
fsm_newstate(fi, CONN_STATE_STARTWAIT); fsm_newstate(fi, CONN_STATE_STARTWAIT);
@ -854,7 +854,7 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
struct net_device *netdev = conn->netdev; struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev); struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer); fsm_deltimer(&conn->timer);
iucv_path_sever(conn->path, NULL); iucv_path_sever(conn->path, NULL);
@ -870,7 +870,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
struct iucv_connection *conn = arg; struct iucv_connection *conn = arg;
int rc; int rc;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT); fsm_newstate(fi, CONN_STATE_STARTWAIT);
PRINT_DEBUG("%s('%s'): connecting ...\n", PRINT_DEBUG("%s('%s'): connecting ...\n",
@ -948,7 +948,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
struct net_device *netdev = conn->netdev; struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev); struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer); fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_STOPPED); fsm_newstate(fi, CONN_STATE_STOPPED);
@ -1024,7 +1024,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
struct net_device *dev = arg; struct net_device *dev = arg;
struct netiucv_priv *privptr = netdev_priv(dev); struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, DEV_STATE_STARTWAIT); fsm_newstate(fi, DEV_STATE_STARTWAIT);
fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
@ -1044,7 +1044,7 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
struct netiucv_priv *privptr = netdev_priv(dev); struct netiucv_priv *privptr = netdev_priv(dev);
struct iucv_event ev; struct iucv_event ev;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
ev.conn = privptr->conn; ev.conn = privptr->conn;
@ -1066,7 +1066,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
struct net_device *dev = arg; struct net_device *dev = arg;
struct netiucv_priv *privptr = netdev_priv(dev); struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
switch (fsm_getstate(fi)) { switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT: case DEV_STATE_STARTWAIT:
@ -1097,7 +1097,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
static void static void
dev_action_conndown(fsm_instance *fi, int event, void *arg) dev_action_conndown(fsm_instance *fi, int event, void *arg)
{ {
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
switch (fsm_getstate(fi)) { switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING: case DEV_STATE_RUNNING:
@ -1288,7 +1288,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
struct netiucv_priv *privptr = netdev_priv(dev); struct netiucv_priv *privptr = netdev_priv(dev);
int rc; int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
/** /**
* Some sanity checks ... * Some sanity checks ...
*/ */
@ -1344,7 +1344,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
{ {
struct netiucv_priv *priv = netdev_priv(dev); struct netiucv_priv *priv = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return &priv->stats; return &priv->stats;
} }
@ -1360,7 +1360,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
*/ */
static int netiucv_change_mtu(struct net_device * dev, int new_mtu) static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
{ {
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
return -EINVAL; return -EINVAL;
@ -1378,7 +1378,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
} }
@ -1393,7 +1393,7 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
int i; int i;
struct iucv_connection *cp; struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
if (count > 9) { if (count > 9) {
PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
IUCV_DBF_TEXT_(setup, 2, IUCV_DBF_TEXT_(setup, 2,
@ -1449,7 +1449,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ struct netiucv_priv *priv = dev->driver_data; { struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize); return sprintf(buf, "%d\n", priv->conn->max_buffsize);
} }
@ -1461,7 +1461,7 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
char *e; char *e;
int bs1; int bs1;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= 39) if (count >= 39)
return -EINVAL; return -EINVAL;
@ -1513,7 +1513,7 @@ static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
} }
@ -1524,7 +1524,7 @@ static ssize_t conn_fsm_show (struct device *dev,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
} }
@ -1535,7 +1535,7 @@ static ssize_t maxmulti_show (struct device *dev,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
} }
@ -1545,7 +1545,7 @@ static ssize_t maxmulti_write (struct device *dev,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxmulti = 0; priv->conn->prof.maxmulti = 0;
return count; return count;
} }
@ -1557,7 +1557,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
} }
@ -1566,7 +1566,7 @@ static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxcqueue = 0; priv->conn->prof.maxcqueue = 0;
return count; return count;
} }
@ -1578,7 +1578,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
} }
@ -1587,7 +1587,7 @@ static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.doios_single = 0; priv->conn->prof.doios_single = 0;
return count; return count;
} }
@ -1599,7 +1599,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
} }
@ -1608,7 +1608,7 @@ static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
priv->conn->prof.doios_multi = 0; priv->conn->prof.doios_multi = 0;
return count; return count;
} }
@ -1620,7 +1620,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.txlen); return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
} }
@ -1629,7 +1629,7 @@ static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.txlen = 0; priv->conn->prof.txlen = 0;
return count; return count;
} }
@ -1641,7 +1641,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
} }
@ -1650,7 +1650,7 @@ static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_time = 0; priv->conn->prof.tx_time = 0;
return count; return count;
} }
@ -1662,7 +1662,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
} }
@ -1671,7 +1671,7 @@ static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_pending = 0; priv->conn->prof.tx_pending = 0;
return count; return count;
} }
@ -1683,7 +1683,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__); IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
} }
@ -1692,7 +1692,7 @@ static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__); IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_max_pending = 0; priv->conn->prof.tx_max_pending = 0;
return count; return count;
} }
@ -1732,7 +1732,7 @@ static int netiucv_add_files(struct device *dev)
{ {
int ret; int ret;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group); ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
if (ret) if (ret)
return ret; return ret;
@ -1744,7 +1744,7 @@ static int netiucv_add_files(struct device *dev)
static void netiucv_remove_files(struct device *dev) static void netiucv_remove_files(struct device *dev)
{ {
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
sysfs_remove_group(&dev->kobj, &netiucv_attr_group); sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
} }
@ -1756,7 +1756,7 @@ static int netiucv_register_device(struct net_device *ndev)
int ret; int ret;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
if (dev) { if (dev) {
snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
@ -1792,7 +1792,7 @@ out_unreg:
static void netiucv_unregister_device(struct device *dev) static void netiucv_unregister_device(struct device *dev)
{ {
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
netiucv_remove_files(dev); netiucv_remove_files(dev);
device_unregister(dev); device_unregister(dev);
} }
@ -1857,7 +1857,7 @@ out:
*/ */
static void netiucv_remove_connection(struct iucv_connection *conn) static void netiucv_remove_connection(struct iucv_connection *conn)
{ {
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
write_lock_bh(&iucv_connection_rwlock); write_lock_bh(&iucv_connection_rwlock);
list_del_init(&conn->list); list_del_init(&conn->list);
write_unlock_bh(&iucv_connection_rwlock); write_unlock_bh(&iucv_connection_rwlock);
@ -1881,7 +1881,7 @@ static void netiucv_free_netdevice(struct net_device *dev)
{ {
struct netiucv_priv *privptr = netdev_priv(dev); struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
if (!dev) if (!dev)
return; return;
@ -1963,7 +1963,7 @@ static ssize_t conn_write(struct device_driver *drv,
struct netiucv_priv *priv; struct netiucv_priv *priv;
struct iucv_connection *cp; struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
if (count>9) { if (count>9) {
PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
@ -2048,7 +2048,7 @@ static ssize_t remove_write (struct device_driver *drv,
const char *p; const char *p;
int i; int i;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= IFNAMSIZ) if (count >= IFNAMSIZ)
count = IFNAMSIZ - 1;; count = IFNAMSIZ - 1;;
@ -2116,7 +2116,7 @@ static void __exit netiucv_exit(void)
struct netiucv_priv *priv; struct netiucv_priv *priv;
struct device *dev; struct device *dev;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
while (!list_empty(&iucv_connection_list)) { while (!list_empty(&iucv_connection_list)) {
cp = list_entry(iucv_connection_list.next, cp = list_entry(iucv_connection_list.next,
struct iucv_connection, list); struct iucv_connection, list);
@ -2146,8 +2146,7 @@ static int __init netiucv_init(void)
rc = iucv_register(&netiucv_handler, 1); rc = iucv_register(&netiucv_handler, 1);
if (rc) if (rc)
goto out_dbf; goto out_dbf;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__); IUCV_DBF_TEXT(trace, 3, __func__);
netiucv_driver.groups = netiucv_drv_attr_groups;
rc = driver_register(&netiucv_driver); rc = driver_register(&netiucv_driver);
if (rc) { if (rc) {
PRINT_ERR("NETIUCV: failed to register driver.\n"); PRINT_ERR("NETIUCV: failed to register driver.\n");

View file

@ -59,15 +59,15 @@ repeat:
printk(KERN_WARNING"%s: Code does not support more " printk(KERN_WARNING"%s: Code does not support more "
"than two chained crws; please report to " "than two chained crws; please report to "
"linux390@de.ibm.com!\n", __FUNCTION__); "linux390@de.ibm.com!\n", __func__);
ccode = stcrw(&tmp_crw); ccode = stcrw(&tmp_crw);
printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
__FUNCTION__, tmp_crw.slct, tmp_crw.oflw, __func__, tmp_crw.slct, tmp_crw.oflw,
tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
tmp_crw.erc, tmp_crw.rsid); tmp_crw.erc, tmp_crw.rsid);
printk(KERN_WARNING"%s: This was crw number %x in the " printk(KERN_WARNING"%s: This was crw number %x in the "
"chain\n", __FUNCTION__, chain); "chain\n", __func__, chain);
if (ccode != 0) if (ccode != 0)
break; break;
chain = tmp_crw.chn ? chain + 1 : 0; chain = tmp_crw.chn ? chain + 1 : 0;
@ -83,7 +83,7 @@ repeat:
crw[chain].rsid); crw[chain].rsid);
/* Check for overflows. */ /* Check for overflows. */
if (crw[chain].oflw) { if (crw[chain].oflw) {
pr_debug("%s: crw overflow detected!\n", __FUNCTION__); pr_debug("%s: crw overflow detected!\n", __func__);
css_schedule_eval_all(); css_schedule_eval_all();
chain = 0; chain = 0;
continue; continue;

View file

@ -105,4 +105,8 @@ static inline int stcrw(struct crw *pcrw )
#define ED_ETR_SYNC 12 /* External damage ETR sync check */ #define ED_ETR_SYNC 12 /* External damage ETR sync check */
#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
struct pt_regs;
void s390_handle_mcck(void);
void s390_do_machine_check(struct pt_regs *regs);
#endif /* __s390mach */ #endif /* __s390mach */

View file

@ -539,7 +539,7 @@ struct zfcp_rc_entry {
/* logging routine for zfcp */ /* logging routine for zfcp */
#define _ZFCP_LOG(fmt, args...) \ #define _ZFCP_LOG(fmt, args...) \
printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __FUNCTION__, \ printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
__LINE__ , ##args) __LINE__ , ##args)
#define ZFCP_LOG(level, fmt, args...) \ #define ZFCP_LOG(level, fmt, args...) \

View file

@ -11,111 +11,13 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/sysinfo.h>
/* Sigh, math-emu. Don't ask. */ /* Sigh, math-emu. Don't ask. */
#include <asm/sfp-util.h> #include <asm/sfp-util.h>
#include <math-emu/soft-fp.h> #include <math-emu/soft-fp.h>
#include <math-emu/single.h> #include <math-emu/single.h>
struct sysinfo_1_1_1 {
char reserved_0[32];
char manufacturer[16];
char type[4];
char reserved_1[12];
char model_capacity[16];
char sequence[16];
char plant[4];
char model[16];
};
struct sysinfo_1_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
char reserved_1[2];
unsigned short cpu_address;
};
struct sysinfo_1_2_2 {
char format;
char reserved_0[1];
unsigned short acc_offset;
char reserved_1[24];
unsigned int secondary_capability;
unsigned int capability;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
unsigned short adjustment[0];
};
struct sysinfo_1_2_2_extension {
unsigned int alt_capability;
unsigned short alt_adjustment[0];
};
struct sysinfo_2_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
unsigned short cpu_id;
unsigned short cpu_address;
};
struct sysinfo_2_2_2 {
char reserved_0[32];
unsigned short lpar_number;
char reserved_1;
unsigned char characteristics;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char reserved_2[16];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
};
#define LPAR_CHAR_DEDICATED (1 << 7)
#define LPAR_CHAR_SHARED (1 << 6)
#define LPAR_CHAR_LIMITED (1 << 5)
struct sysinfo_3_2_2 {
char reserved_0[31];
unsigned char count;
struct {
char reserved_0[4];
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char cpi[16];
char reserved_1[24];
} vm[8];
};
static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
register int r0 asm("0") = (fc << 28) | sel1;
register int r1 asm("1") = sel2;
asm volatile(
" stsi 0(%2)\n"
"0: jz 2f\n"
"1: lhi %0,%3\n"
"2:\n"
EX_TABLE(0b,1b)
: "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
: "cc", "memory" );
return r0;
}
static inline int stsi_0(void) static inline int stsi_0(void)
{ {
int rc = stsi (NULL, 0, 0, 0); int rc = stsi (NULL, 0, 0, 0);
@ -133,6 +35,8 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
EBCASC(info->sequence, sizeof(info->sequence)); EBCASC(info->sequence, sizeof(info->sequence));
EBCASC(info->plant, sizeof(info->plant)); EBCASC(info->plant, sizeof(info->plant));
EBCASC(info->model_capacity, sizeof(info->model_capacity)); EBCASC(info->model_capacity, sizeof(info->model_capacity));
EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
len += sprintf(page + len, "Manufacturer: %-16.16s\n", len += sprintf(page + len, "Manufacturer: %-16.16s\n",
info->manufacturer); info->manufacturer);
len += sprintf(page + len, "Type: %-4.4s\n", len += sprintf(page + len, "Type: %-4.4s\n",
@ -155,8 +59,18 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
info->sequence); info->sequence);
len += sprintf(page + len, "Plant: %-4.4s\n", len += sprintf(page + len, "Plant: %-4.4s\n",
info->plant); info->plant);
len += sprintf(page + len, "Model Capacity: %-16.16s\n", len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
info->model_capacity); info->model_capacity, *(u32 *) info->model_cap_rating);
if (info->model_perm_cap[0] != '\0')
len += sprintf(page + len,
"Model Perm. Capacity: %-16.16s %08u\n",
info->model_perm_cap,
*(u32 *) info->model_perm_cap_rating);
if (info->model_temp_cap[0] != '\0')
len += sprintf(page + len,
"Model Temp. Capacity: %-16.16s %08u\n",
info->model_temp_cap,
*(u32 *) info->model_temp_cap_rating);
return len; return len;
} }

View file

@ -397,6 +397,10 @@ struct cio_iplinfo {
extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
/* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
int chsc_sstpi(void *page, void *result, size_t size);
#endif #endif
#endif #endif

View file

@ -22,4 +22,12 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle); DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
void s390_idle_leave(void);
static inline void s390_idle_check(void)
{
if ((&__get_cpu_var(s390_idle))->in_idle)
s390_idle_leave();
}
#endif /* _ASM_S390_CPU_H_ */ #endif /* _ASM_S390_CPU_H_ */

View file

@ -73,6 +73,7 @@ typedef struct debug_info {
struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
struct debug_view* views[DEBUG_MAX_VIEWS]; struct debug_view* views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN]; char name[DEBUG_MAX_NAME_LEN];
mode_t mode;
} debug_info_t; } debug_info_t;
typedef int (debug_header_proc_t) (debug_info_t* id, typedef int (debug_header_proc_t) (debug_info_t* id,
@ -122,6 +123,10 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level,
debug_info_t* debug_register(char* name, int pages, int nr_areas, debug_info_t* debug_register(char* name, int pages, int nr_areas,
int buf_size); int buf_size);
debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
int buf_size, mode_t mode, uid_t uid,
gid_t gid);
void debug_unregister(debug_info_t* id); void debug_unregister(debug_info_t* id);
void debug_set_level(debug_info_t* id, int new_level); void debug_set_level(debug_info_t* id, int new_level);

View file

@ -22,11 +22,12 @@
#define SEGMENT_SHARED 0 #define SEGMENT_SHARED 0
#define SEGMENT_EXCLUSIVE 1 #define SEGMENT_EXCLUSIVE 1
extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length); int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
extern void segment_unload(char *name); void segment_unload(char *name);
extern void segment_save(char *name); void segment_save(char *name);
extern int segment_type (char* name); int segment_type (char* name);
extern int segment_modify_shared (char *name, int do_nonshared); int segment_modify_shared (char *name, int do_nonshared);
void segment_warning(int rc, char *seg_name);
#endif #endif
#endif #endif

View file

@ -32,6 +32,6 @@ typedef struct {
#define HARDIRQ_BITS 8 #define HARDIRQ_BITS 8
extern void account_ticks(u64 time); void clock_comparator_work(void);
#endif /* __ASM_HARDIRQ_H */ #endif /* __ASM_HARDIRQ_H */

View file

@ -56,6 +56,8 @@
#define __LC_IO_INT_WORD 0x0C0 #define __LC_IO_INT_WORD 0x0C0
#define __LC_MCCK_CODE 0x0E8 #define __LC_MCCK_CODE 0x0E8
#define __LC_LAST_BREAK 0x110
#define __LC_RETURN_PSW 0x200 #define __LC_RETURN_PSW 0x200
#define __LC_SAVE_AREA 0xC00 #define __LC_SAVE_AREA 0xC00
@ -80,7 +82,6 @@
#define __LC_CPUID 0xC60 #define __LC_CPUID 0xC60
#define __LC_CPUADDR 0xC68 #define __LC_CPUADDR 0xC68
#define __LC_IPLDEV 0xC7C #define __LC_IPLDEV 0xC7C
#define __LC_JIFFY_TIMER 0xC80
#define __LC_CURRENT 0xC90 #define __LC_CURRENT 0xC90
#define __LC_INT_CLOCK 0xC98 #define __LC_INT_CLOCK 0xC98
#else /* __s390x__ */ #else /* __s390x__ */
@ -103,7 +104,6 @@
#define __LC_CPUID 0xD80 #define __LC_CPUID 0xD80
#define __LC_CPUADDR 0xD88 #define __LC_CPUADDR 0xD88
#define __LC_IPLDEV 0xDB8 #define __LC_IPLDEV 0xDB8
#define __LC_JIFFY_TIMER 0xDC0
#define __LC_CURRENT 0xDD8 #define __LC_CURRENT 0xDD8
#define __LC_INT_CLOCK 0xDE8 #define __LC_INT_CLOCK 0xDE8
#endif /* __s390x__ */ #endif /* __s390x__ */
@ -276,7 +276,7 @@ struct _lowcore
/* entry.S sensitive area end */ /* entry.S sensitive area end */
/* SMP info area: defined by DJB */ /* SMP info area: defined by DJB */
__u64 jiffy_timer; /* 0xc80 */ __u64 clock_comparator; /* 0xc80 */
__u32 ext_call_fast; /* 0xc88 */ __u32 ext_call_fast; /* 0xc88 */
__u32 percpu_offset; /* 0xc8c */ __u32 percpu_offset; /* 0xc8c */
__u32 current_task; /* 0xc90 */ __u32 current_task; /* 0xc90 */
@ -368,11 +368,12 @@ struct _lowcore
/* entry.S sensitive area end */ /* entry.S sensitive area end */
/* SMP info area: defined by DJB */ /* SMP info area: defined by DJB */
__u64 jiffy_timer; /* 0xdc0 */ __u64 clock_comparator; /* 0xdc0 */
__u64 ext_call_fast; /* 0xdc8 */ __u64 ext_call_fast; /* 0xdc8 */
__u64 percpu_offset; /* 0xdd0 */ __u64 percpu_offset; /* 0xdd0 */
__u64 current_task; /* 0xdd8 */ __u64 current_task; /* 0xdd8 */
__u64 softirq_pending; /* 0xde0 */ __u32 softirq_pending; /* 0xde0 */
__u32 pad_0x0de4; /* 0xde4 */
__u64 int_clock; /* 0xde8 */ __u64 int_clock; /* 0xde8 */
__u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */

View file

@ -175,6 +175,13 @@ extern void task_show_regs(struct seq_file *m, struct task_struct *task);
extern void show_registers(struct pt_regs *regs); extern void show_registers(struct pt_regs *regs);
extern void show_code(struct pt_regs *regs); extern void show_code(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp); extern void show_trace(struct task_struct *task, unsigned long *sp);
#ifdef CONFIG_64BIT
extern void show_last_breaking_event(struct pt_regs *regs);
#else
static inline void show_last_breaking_event(struct pt_regs *regs)
{
}
#endif
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \ #define task_pt_regs(tsk) ((struct pt_regs *) \

View file

@ -90,6 +90,9 @@ extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn)); extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu); extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait); void *info, int wait);
#endif #endif

116
include/asm-s390/sysinfo.h Normal file
View file

@ -0,0 +1,116 @@
/*
* definition for store system information stsi
*
* Copyright IBM Corp. 2001,2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Ulrich Weigand <weigand@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
struct sysinfo_1_1_1 {
char reserved_0[32];
char manufacturer[16];
char type[4];
char reserved_1[12];
char model_capacity[16];
char sequence[16];
char plant[4];
char model[16];
char model_perm_cap[16];
char model_temp_cap[16];
char model_cap_rating[4];
char model_perm_cap_rating[4];
char model_temp_cap_rating[4];
};
struct sysinfo_1_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
char reserved_1[2];
unsigned short cpu_address;
};
struct sysinfo_1_2_2 {
char format;
char reserved_0[1];
unsigned short acc_offset;
char reserved_1[24];
unsigned int secondary_capability;
unsigned int capability;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
unsigned short adjustment[0];
};
struct sysinfo_1_2_2_extension {
unsigned int alt_capability;
unsigned short alt_adjustment[0];
};
struct sysinfo_2_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
unsigned short cpu_id;
unsigned short cpu_address;
};
struct sysinfo_2_2_2 {
char reserved_0[32];
unsigned short lpar_number;
char reserved_1;
unsigned char characteristics;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char reserved_2[16];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
};
#define LPAR_CHAR_DEDICATED (1 << 7)
#define LPAR_CHAR_SHARED (1 << 6)
#define LPAR_CHAR_LIMITED (1 << 5)
struct sysinfo_3_2_2 {
char reserved_0[31];
unsigned char count;
struct {
char reserved_0[4];
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char cpi[16];
char reserved_1[24];
} vm[8];
};
static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
register int r0 asm("0") = (fc << 28) | sel1;
register int r1 asm("1") = sel2;
asm volatile(
" stsi 0(%2)\n"
"0: jz 2f\n"
"1: lhi %0,%3\n"
"2:\n"
EX_TABLE(0b, 1b)
: "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
: "cc", "memory");
return r0;
}

View file

@ -406,6 +406,8 @@ __set_psw_mask(unsigned long mask)
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) #define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
int stfle(unsigned long long *list, int doublewords);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void smp_ctl_set_bit(int cr, int bit); extern void smp_ctl_set_bit(int cr, int bit);

View file

@ -62,16 +62,18 @@ static inline unsigned long long get_clock (void)
return clk; return clk;
} }
static inline void get_clock_extended(void *dest) static inline unsigned long long get_clock_xt(void)
{ {
typedef struct { unsigned long long clk[2]; } __clock_t; unsigned char clk[16];
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc"); asm volatile("stcke %0" : "=Q" (clk) : : "cc");
#else /* __GNUC__ */ #else /* __GNUC__ */
asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest)) asm volatile("stcke 0(%1)" : "=m" (clk)
: "a" ((__clock_t *)dest) : "cc"); : "a" (clk) : "cc");
#endif /* __GNUC__ */ #endif /* __GNUC__ */
return *((unsigned long long *)&clk[1]);
} }
static inline cycles_t get_cycles(void) static inline cycles_t get_cycles(void)
@ -81,5 +83,6 @@ static inline cycles_t get_cycles(void)
int get_sync_clock(unsigned long long *clock); int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void); void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
#endif #endif

View file

@ -13,12 +13,14 @@ static inline void __tlb_flush_local(void)
asm volatile("ptlb" : : : "memory"); asm volatile("ptlb" : : : "memory");
} }
#ifdef CONFIG_SMP
/* /*
* Flush all tlb entries on all cpus. * Flush all tlb entries on all cpus.
*/ */
void smp_ptlb_all(void);
static inline void __tlb_flush_global(void) static inline void __tlb_flush_global(void)
{ {
extern void smp_ptlb_all(void);
register unsigned long reg2 asm("2"); register unsigned long reg2 asm("2");
register unsigned long reg3 asm("3"); register unsigned long reg3 asm("3");
register unsigned long reg4 asm("4"); register unsigned long reg4 asm("4");
@ -39,6 +41,25 @@ static inline void __tlb_flush_global(void)
: : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
} }
static inline void __tlb_flush_full(struct mm_struct *mm)
{
cpumask_t local_cpumask;
preempt_disable();
/*
* If the process only ran on the local cpu, do a local flush.
*/
local_cpumask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
__tlb_flush_local();
else
__tlb_flush_global();
preempt_enable();
}
#else
#define __tlb_flush_full(mm) __tlb_flush_local()
#endif
/* /*
* Flush all tlb entries of a page table on all cpus. * Flush all tlb entries of a page table on all cpus.
*/ */
@ -51,8 +72,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
static inline void __tlb_flush_mm(struct mm_struct * mm) static inline void __tlb_flush_mm(struct mm_struct * mm)
{ {
cpumask_t local_cpumask;
if (unlikely(cpus_empty(mm->cpu_vm_mask))) if (unlikely(cpus_empty(mm->cpu_vm_mask)))
return; return;
/* /*
@ -69,16 +88,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
mm->context.asce_bits); mm->context.asce_bits);
return; return;
} }
preempt_disable(); __tlb_flush_full(mm);
/*
* If the process only ran on the local cpu, do a local flush.
*/
local_cpumask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
__tlb_flush_local();
else
__tlb_flush_global();
preempt_enable();
} }
static inline void __tlb_flush_mm_cond(struct mm_struct * mm) static inline void __tlb_flush_mm_cond(struct mm_struct * mm)

View file

@ -1,6 +1,29 @@
#ifndef _ASM_S390_TOPOLOGY_H #ifndef _ASM_S390_TOPOLOGY_H
#define _ASM_S390_TOPOLOGY_H #define _ASM_S390_TOPOLOGY_H
#include <linux/cpumask.h>
#define mc_capable() (1)
cpumask_t cpu_coregroup_map(unsigned int cpu);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
#define POLARIZATION_UNKNWN (-1)
#define POLARIZATION_HRZ (0)
#define POLARIZATION_VL (1)
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
#ifdef CONFIG_SMP
void s390_init_cpu_topology(void);
#else
static inline void s390_init_cpu_topology(void)
{
};
#endif
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */ #endif /* _ASM_S390_TOPOLOGY_H */

View file

@ -102,6 +102,25 @@ extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq); extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq); extern void enable_irq(unsigned int irq);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
{
return -EINVAL;
}
static inline int irq_can_set_affinity(unsigned int irq)
{
return 0;
}
#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
#ifdef CONFIG_GENERIC_HARDIRQS #ifdef CONFIG_GENERIC_HARDIRQS
/* /*
* Special lockdep variants of irq disabling/enabling. * Special lockdep variants of irq disabling/enabling.

View file

@ -228,21 +228,11 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
#endif /* CONFIG_GENERIC_PENDING_IRQ */ #endif /* CONFIG_GENERIC_PENDING_IRQ */
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
#define move_native_irq(x) #define move_native_irq(x)
#define move_masked_irq(x) #define move_masked_irq(x)
static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
{
return -EINVAL;
}
static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_IRQBALANCE #ifdef CONFIG_IRQBALANCE

View file

@ -14,7 +14,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/irq.h> #include <linux/interrupt.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/sched.h> #include <linux/sched.h>

View file

@ -14,12 +14,14 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/irq.h> #include <linux/interrupt.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <asm/irq_regs.h>
#include "tick-internal.h" #include "tick-internal.h"
/* /*

View file

@ -14,7 +14,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/irq.h> #include <linux/interrupt.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/sched.h> #include <linux/sched.h>