Merge branch 'linus' into x86/pci-ioapic-boot-irq-quirks
Conflicts: arch/x86/mm/ioremap.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
dbbcfb2211
115 changed files with 2223 additions and 388 deletions
1353
Documentation/ftrace.txt
Normal file
1353
Documentation/ftrace.txt
Normal file
File diff suppressed because it is too large
Load diff
|
@ -148,9 +148,9 @@ tcp_available_congestion_control - STRING
|
||||||
but not loaded.
|
but not loaded.
|
||||||
|
|
||||||
tcp_base_mss - INTEGER
|
tcp_base_mss - INTEGER
|
||||||
The initial value of search_low to be used by Packetization Layer
|
The initial value of search_low to be used by the packetization layer
|
||||||
Path MTU Discovery (MTU probing). If MTU probing is enabled,
|
Path MTU discovery (MTU probing). If MTU probing is enabled,
|
||||||
this is the inital MSS used by the connection.
|
this is the initial MSS used by the connection.
|
||||||
|
|
||||||
tcp_congestion_control - STRING
|
tcp_congestion_control - STRING
|
||||||
Set the congestion control algorithm to be used for new
|
Set the congestion control algorithm to be used for new
|
||||||
|
@ -185,10 +185,9 @@ tcp_frto - INTEGER
|
||||||
timeouts. It is particularly beneficial in wireless environments
|
timeouts. It is particularly beneficial in wireless environments
|
||||||
where packet loss is typically due to random radio interference
|
where packet loss is typically due to random radio interference
|
||||||
rather than intermediate router congestion. F-RTO is sender-side
|
rather than intermediate router congestion. F-RTO is sender-side
|
||||||
only modification. Therefore it does not require any support from
|
only modification. Therefore it does not require any support from
|
||||||
the peer, but in a typical case, however, where wireless link is
|
the peer.
|
||||||
the local access link and most of the data flows downlink, the
|
|
||||||
faraway servers should have F-RTO enabled to take advantage of it.
|
|
||||||
If set to 1, basic version is enabled. 2 enables SACK enhanced
|
If set to 1, basic version is enabled. 2 enables SACK enhanced
|
||||||
F-RTO if flow uses SACK. The basic version can be used also when
|
F-RTO if flow uses SACK. The basic version can be used also when
|
||||||
SACK is in use though scenario(s) with it exists where F-RTO
|
SACK is in use though scenario(s) with it exists where F-RTO
|
||||||
|
@ -276,7 +275,7 @@ tcp_mem - vector of 3 INTEGERs: min, pressure, max
|
||||||
memory.
|
memory.
|
||||||
|
|
||||||
tcp_moderate_rcvbuf - BOOLEAN
|
tcp_moderate_rcvbuf - BOOLEAN
|
||||||
If set, TCP performs receive buffer autotuning, attempting to
|
If set, TCP performs receive buffer auto-tuning, attempting to
|
||||||
automatically size the buffer (no greater than tcp_rmem[2]) to
|
automatically size the buffer (no greater than tcp_rmem[2]) to
|
||||||
match the size required by the path for full throughput. Enabled by
|
match the size required by the path for full throughput. Enabled by
|
||||||
default.
|
default.
|
||||||
|
@ -336,7 +335,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
|
||||||
pressure.
|
pressure.
|
||||||
Default: 8K
|
Default: 8K
|
||||||
|
|
||||||
default: default size of receive buffer used by TCP sockets.
|
default: initial size of receive buffer used by TCP sockets.
|
||||||
This value overrides net.core.rmem_default used by other protocols.
|
This value overrides net.core.rmem_default used by other protocols.
|
||||||
Default: 87380 bytes. This value results in window of 65535 with
|
Default: 87380 bytes. This value results in window of 65535 with
|
||||||
default setting of tcp_adv_win_scale and tcp_app_win:0 and a bit
|
default setting of tcp_adv_win_scale and tcp_app_win:0 and a bit
|
||||||
|
@ -344,8 +343,10 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
|
||||||
|
|
||||||
max: maximal size of receive buffer allowed for automatically
|
max: maximal size of receive buffer allowed for automatically
|
||||||
selected receiver buffers for TCP socket. This value does not override
|
selected receiver buffers for TCP socket. This value does not override
|
||||||
net.core.rmem_max, "static" selection via SO_RCVBUF does not use this.
|
net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
|
||||||
Default: 87380*2 bytes.
|
automatic tuning of that socket's receive buffer size, in which
|
||||||
|
case this value is ignored.
|
||||||
|
Default: between 87380B and 4MB, depending on RAM size.
|
||||||
|
|
||||||
tcp_sack - BOOLEAN
|
tcp_sack - BOOLEAN
|
||||||
Enable select acknowledgments (SACKS).
|
Enable select acknowledgments (SACKS).
|
||||||
|
@ -358,7 +359,7 @@ tcp_slow_start_after_idle - BOOLEAN
|
||||||
Default: 1
|
Default: 1
|
||||||
|
|
||||||
tcp_stdurg - BOOLEAN
|
tcp_stdurg - BOOLEAN
|
||||||
Use the Host requirements interpretation of the TCP urg pointer field.
|
Use the Host requirements interpretation of the TCP urgent pointer field.
|
||||||
Most hosts use the older BSD interpretation, so if you turn this on
|
Most hosts use the older BSD interpretation, so if you turn this on
|
||||||
Linux might not communicate correctly with them.
|
Linux might not communicate correctly with them.
|
||||||
Default: FALSE
|
Default: FALSE
|
||||||
|
@ -371,12 +372,12 @@ tcp_synack_retries - INTEGER
|
||||||
tcp_syncookies - BOOLEAN
|
tcp_syncookies - BOOLEAN
|
||||||
Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
|
Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
|
||||||
Send out syncookies when the syn backlog queue of a socket
|
Send out syncookies when the syn backlog queue of a socket
|
||||||
overflows. This is to prevent against the common 'syn flood attack'
|
overflows. This is to prevent against the common 'SYN flood attack'
|
||||||
Default: FALSE
|
Default: FALSE
|
||||||
|
|
||||||
Note, that syncookies is fallback facility.
|
Note, that syncookies is fallback facility.
|
||||||
It MUST NOT be used to help highly loaded servers to stand
|
It MUST NOT be used to help highly loaded servers to stand
|
||||||
against legal connection rate. If you see synflood warnings
|
against legal connection rate. If you see SYN flood warnings
|
||||||
in your logs, but investigation shows that they occur
|
in your logs, but investigation shows that they occur
|
||||||
because of overload with legal connections, you should tune
|
because of overload with legal connections, you should tune
|
||||||
another parameters until this warning disappear.
|
another parameters until this warning disappear.
|
||||||
|
@ -386,7 +387,7 @@ tcp_syncookies - BOOLEAN
|
||||||
to use TCP extensions, can result in serious degradation
|
to use TCP extensions, can result in serious degradation
|
||||||
of some services (f.e. SMTP relaying), visible not by you,
|
of some services (f.e. SMTP relaying), visible not by you,
|
||||||
but your clients and relays, contacting you. While you see
|
but your clients and relays, contacting you. While you see
|
||||||
synflood warnings in logs not being really flooded, your server
|
SYN flood warnings in logs not being really flooded, your server
|
||||||
is seriously misconfigured.
|
is seriously misconfigured.
|
||||||
|
|
||||||
tcp_syn_retries - INTEGER
|
tcp_syn_retries - INTEGER
|
||||||
|
@ -419,19 +420,21 @@ tcp_window_scaling - BOOLEAN
|
||||||
Enable window scaling as defined in RFC1323.
|
Enable window scaling as defined in RFC1323.
|
||||||
|
|
||||||
tcp_wmem - vector of 3 INTEGERs: min, default, max
|
tcp_wmem - vector of 3 INTEGERs: min, default, max
|
||||||
min: Amount of memory reserved for send buffers for TCP socket.
|
min: Amount of memory reserved for send buffers for TCP sockets.
|
||||||
Each TCP socket has rights to use it due to fact of its birth.
|
Each TCP socket has rights to use it due to fact of its birth.
|
||||||
Default: 4K
|
Default: 4K
|
||||||
|
|
||||||
default: Amount of memory allowed for send buffers for TCP socket
|
default: initial size of send buffer used by TCP sockets. This
|
||||||
by default. This value overrides net.core.wmem_default used
|
value overrides net.core.wmem_default used by other protocols.
|
||||||
by other protocols, it is usually lower than net.core.wmem_default.
|
It is usually lower than net.core.wmem_default.
|
||||||
Default: 16K
|
Default: 16K
|
||||||
|
|
||||||
max: Maximal amount of memory allowed for automatically selected
|
max: Maximal amount of memory allowed for automatically tuned
|
||||||
send buffers for TCP socket. This value does not override
|
send buffers for TCP sockets. This value does not override
|
||||||
net.core.wmem_max, "static" selection via SO_SNDBUF does not use this.
|
net.core.wmem_max. Calling setsockopt() with SO_SNDBUF disables
|
||||||
Default: 128K
|
automatic tuning of that socket's send buffer size, in which case
|
||||||
|
this value is ignored.
|
||||||
|
Default: between 64K and 4MB, depending on RAM size.
|
||||||
|
|
||||||
tcp_workaround_signed_windows - BOOLEAN
|
tcp_workaround_signed_windows - BOOLEAN
|
||||||
If set, assume no receipt of a window scaling option means the
|
If set, assume no receipt of a window scaling option means the
|
||||||
|
@ -1060,24 +1063,193 @@ bridge-nf-filter-pppoe-tagged - BOOLEAN
|
||||||
Default: 1
|
Default: 1
|
||||||
|
|
||||||
|
|
||||||
|
proc/sys/net/sctp/* Variables:
|
||||||
|
|
||||||
|
addip_enable - BOOLEAN
|
||||||
|
Enable or disable extension of Dynamic Address Reconfiguration
|
||||||
|
(ADD-IP) functionality specified in RFC5061. This extension provides
|
||||||
|
the ability to dynamically add and remove new addresses for the SCTP
|
||||||
|
associations.
|
||||||
|
|
||||||
|
1: Enable extension.
|
||||||
|
|
||||||
|
0: Disable extension.
|
||||||
|
|
||||||
|
Default: 0
|
||||||
|
|
||||||
|
addip_noauth_enable - BOOLEAN
|
||||||
|
Dynamic Address Reconfiguration (ADD-IP) requires the use of
|
||||||
|
authentication to protect the operations of adding or removing new
|
||||||
|
addresses. This requirement is mandated so that unauthorized hosts
|
||||||
|
would not be able to hijack associations. However, older
|
||||||
|
implementations may not have implemented this requirement while
|
||||||
|
allowing the ADD-IP extension. For reasons of interoperability,
|
||||||
|
we provide this variable to control the enforcement of the
|
||||||
|
authentication requirement.
|
||||||
|
|
||||||
|
1: Allow ADD-IP extension to be used without authentication. This
|
||||||
|
should only be set in a closed environment for interoperability
|
||||||
|
with older implementations.
|
||||||
|
|
||||||
|
0: Enforce the authentication requirement
|
||||||
|
|
||||||
|
Default: 0
|
||||||
|
|
||||||
|
auth_enable - BOOLEAN
|
||||||
|
Enable or disable Authenticated Chunks extension. This extension
|
||||||
|
provides the ability to send and receive authenticated chunks and is
|
||||||
|
required for secure operation of Dynamic Address Reconfiguration
|
||||||
|
(ADD-IP) extension.
|
||||||
|
|
||||||
|
1: Enable this extension.
|
||||||
|
0: Disable this extension.
|
||||||
|
|
||||||
|
Default: 0
|
||||||
|
|
||||||
|
prsctp_enable - BOOLEAN
|
||||||
|
Enable or disable the Partial Reliability extension (RFC3758) which
|
||||||
|
is used to notify peers that a given DATA should no longer be expected.
|
||||||
|
|
||||||
|
1: Enable extension
|
||||||
|
0: Disable
|
||||||
|
|
||||||
|
Default: 1
|
||||||
|
|
||||||
|
max_burst - INTEGER
|
||||||
|
The limit of the number of new packets that can be initially sent. It
|
||||||
|
controls how bursty the generated traffic can be.
|
||||||
|
|
||||||
|
Default: 4
|
||||||
|
|
||||||
|
association_max_retrans - INTEGER
|
||||||
|
Set the maximum number for retransmissions that an association can
|
||||||
|
attempt deciding that the remote end is unreachable. If this value
|
||||||
|
is exceeded, the association is terminated.
|
||||||
|
|
||||||
|
Default: 10
|
||||||
|
|
||||||
|
max_init_retransmits - INTEGER
|
||||||
|
The maximum number of retransmissions of INIT and COOKIE-ECHO chunks
|
||||||
|
that an association will attempt before declaring the destination
|
||||||
|
unreachable and terminating.
|
||||||
|
|
||||||
|
Default: 8
|
||||||
|
|
||||||
|
path_max_retrans - INTEGER
|
||||||
|
The maximum number of retransmissions that will be attempted on a given
|
||||||
|
path. Once this threshold is exceeded, the path is considered
|
||||||
|
unreachable, and new traffic will use a different path when the
|
||||||
|
association is multihomed.
|
||||||
|
|
||||||
|
Default: 5
|
||||||
|
|
||||||
|
rto_initial - INTEGER
|
||||||
|
The initial round trip timeout value in milliseconds that will be used
|
||||||
|
in calculating round trip times. This is the initial time interval
|
||||||
|
for retransmissions.
|
||||||
|
|
||||||
|
Default: 3000
|
||||||
|
|
||||||
|
rto_max - INTEGER
|
||||||
|
The maximum value (in milliseconds) of the round trip timeout. This
|
||||||
|
is the largest time interval that can elapse between retransmissions.
|
||||||
|
|
||||||
|
Default: 60000
|
||||||
|
|
||||||
|
rto_min - INTEGER
|
||||||
|
The minimum value (in milliseconds) of the round trip timeout. This
|
||||||
|
is the smallest time interval the can elapse between retransmissions.
|
||||||
|
|
||||||
|
Default: 1000
|
||||||
|
|
||||||
|
hb_interval - INTEGER
|
||||||
|
The interval (in milliseconds) between HEARTBEAT chunks. These chunks
|
||||||
|
are sent at the specified interval on idle paths to probe the state of
|
||||||
|
a given path between 2 associations.
|
||||||
|
|
||||||
|
Default: 30000
|
||||||
|
|
||||||
|
sack_timeout - INTEGER
|
||||||
|
The amount of time (in milliseconds) that the implementation will wait
|
||||||
|
to send a SACK.
|
||||||
|
|
||||||
|
Default: 200
|
||||||
|
|
||||||
|
valid_cookie_life - INTEGER
|
||||||
|
The default lifetime of the SCTP cookie (in milliseconds). The cookie
|
||||||
|
is used during association establishment.
|
||||||
|
|
||||||
|
Default: 60000
|
||||||
|
|
||||||
|
cookie_preserve_enable - BOOLEAN
|
||||||
|
Enable or disable the ability to extend the lifetime of the SCTP cookie
|
||||||
|
that is used during the establishment phase of SCTP association
|
||||||
|
|
||||||
|
1: Enable cookie lifetime extension.
|
||||||
|
0: Disable
|
||||||
|
|
||||||
|
Default: 1
|
||||||
|
|
||||||
|
rcvbuf_policy - INTEGER
|
||||||
|
Determines if the receive buffer is attributed to the socket or to
|
||||||
|
association. SCTP supports the capability to create multiple
|
||||||
|
associations on a single socket. When using this capability, it is
|
||||||
|
possible that a single stalled association that's buffering a lot
|
||||||
|
of data may block other associations from delivering their data by
|
||||||
|
consuming all of the receive buffer space. To work around this,
|
||||||
|
the rcvbuf_policy could be set to attribute the receiver buffer space
|
||||||
|
to each association instead of the socket. This prevents the described
|
||||||
|
blocking.
|
||||||
|
|
||||||
|
1: rcvbuf space is per association
|
||||||
|
0: recbuf space is per socket
|
||||||
|
|
||||||
|
Default: 0
|
||||||
|
|
||||||
|
sndbuf_policy - INTEGER
|
||||||
|
Similar to rcvbuf_policy above, this applies to send buffer space.
|
||||||
|
|
||||||
|
1: Send buffer is tracked per association
|
||||||
|
0: Send buffer is tracked per socket.
|
||||||
|
|
||||||
|
Default: 0
|
||||||
|
|
||||||
|
sctp_mem - vector of 3 INTEGERs: min, pressure, max
|
||||||
|
Number of pages allowed for queueing by all SCTP sockets.
|
||||||
|
|
||||||
|
min: Below this number of pages SCTP is not bothered about its
|
||||||
|
memory appetite. When amount of memory allocated by SCTP exceeds
|
||||||
|
this number, SCTP starts to moderate memory usage.
|
||||||
|
|
||||||
|
pressure: This value was introduced to follow format of tcp_mem.
|
||||||
|
|
||||||
|
max: Number of pages allowed for queueing by all SCTP sockets.
|
||||||
|
|
||||||
|
Default is calculated at boot time from amount of available memory.
|
||||||
|
|
||||||
|
sctp_rmem - vector of 3 INTEGERs: min, default, max
|
||||||
|
See tcp_rmem for a description.
|
||||||
|
|
||||||
|
sctp_wmem - vector of 3 INTEGERs: min, default, max
|
||||||
|
See tcp_wmem for a description.
|
||||||
|
|
||||||
UNDOCUMENTED:
|
UNDOCUMENTED:
|
||||||
|
|
||||||
dev_weight FIXME
|
/proc/sys/net/core/*
|
||||||
discovery_slots FIXME
|
dev_weight FIXME
|
||||||
discovery_timeout FIXME
|
|
||||||
fast_poll_increase FIXME
|
|
||||||
ip6_queue_maxlen FIXME
|
|
||||||
lap_keepalive_time FIXME
|
|
||||||
lo_cong FIXME
|
|
||||||
max_baud_rate FIXME
|
|
||||||
max_dgram_qlen FIXME
|
|
||||||
max_noreply_time FIXME
|
|
||||||
max_tx_data_size FIXME
|
|
||||||
max_tx_window FIXME
|
|
||||||
min_tx_turn_time FIXME
|
|
||||||
mod_cong FIXME
|
|
||||||
no_cong FIXME
|
|
||||||
no_cong_thresh FIXME
|
|
||||||
slot_timeout FIXME
|
|
||||||
warn_noreply_time FIXME
|
|
||||||
|
|
||||||
|
/proc/sys/net/unix/*
|
||||||
|
max_dgram_qlen FIXME
|
||||||
|
|
||||||
|
/proc/sys/net/irda/*
|
||||||
|
fast_poll_increase FIXME
|
||||||
|
warn_noreply_time FIXME
|
||||||
|
discovery_slots FIXME
|
||||||
|
slot_timeout FIXME
|
||||||
|
max_baud_rate FIXME
|
||||||
|
discovery_timeout FIXME
|
||||||
|
lap_keepalive_time FIXME
|
||||||
|
max_noreply_time FIXME
|
||||||
|
max_tx_data_size FIXME
|
||||||
|
max_tx_window FIXME
|
||||||
|
min_tx_turn_time FIXME
|
||||||
|
|
|
@ -3088,8 +3088,8 @@ L: linux-scsi@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
OPROFILE
|
OPROFILE
|
||||||
P: Philippe Elie
|
P: Robert Richter
|
||||||
M: phil.el@wanadoo.fr
|
M: robert.richter@amd.com
|
||||||
L: oprofile-list@lists.sf.net
|
L: oprofile-list@lists.sf.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
static unsigned long icache_size, dcache_size; /* Size in bytes */
|
static unsigned long icache_size, dcache_size; /* Size in bytes */
|
||||||
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
|
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
|
||||||
|
|
||||||
unsigned long __init r3k_cache_size(unsigned long ca_flags)
|
unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
|
||||||
{
|
{
|
||||||
unsigned long flags, status, dummy, size;
|
unsigned long flags, status, dummy, size;
|
||||||
volatile unsigned long *p;
|
volatile unsigned long *p;
|
||||||
|
@ -61,7 +61,7 @@ unsigned long __init r3k_cache_size(unsigned long ca_flags)
|
||||||
return size * sizeof(*p);
|
return size * sizeof(*p);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
|
unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
|
||||||
{
|
{
|
||||||
unsigned long flags, status, lsize, i;
|
unsigned long flags, status, lsize, i;
|
||||||
volatile unsigned long *p;
|
volatile unsigned long *p;
|
||||||
|
@ -90,7 +90,7 @@ unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
|
||||||
return lsize * sizeof(*p);
|
return lsize * sizeof(*p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init r3k_probe_cache(void)
|
static void __cpuinit r3k_probe_cache(void)
|
||||||
{
|
{
|
||||||
dcache_size = r3k_cache_size(ST0_ISC);
|
dcache_size = r3k_cache_size(ST0_ISC);
|
||||||
if (dcache_size)
|
if (dcache_size)
|
||||||
|
|
|
@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void)
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Too much unrolling will overflow the available space in
|
* Too much unrolling will overflow the available space in
|
||||||
* clear_space_array / copy_page_array. 8 words sounds generous,
|
* clear_space_array / copy_page_array.
|
||||||
* but a R4000 with 128 byte L2 line length can exceed even that.
|
|
||||||
*/
|
*/
|
||||||
half_clear_loop_size = min(8 * clear_word_size,
|
half_clear_loop_size = min(16 * clear_word_size,
|
||||||
max(cache_line_size >> 1,
|
max(cache_line_size >> 1,
|
||||||
4 * clear_word_size));
|
4 * clear_word_size));
|
||||||
half_copy_loop_size = min(8 * copy_word_size,
|
half_copy_loop_size = min(16 * copy_word_size,
|
||||||
max(cache_line_size >> 1,
|
max(cache_line_size >> 1,
|
||||||
4 * copy_word_size));
|
4 * copy_word_size));
|
||||||
}
|
}
|
||||||
|
@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
|
||||||
if (pref_bias_clear_store) {
|
if (pref_bias_clear_store) {
|
||||||
uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
|
uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
|
||||||
A0);
|
A0);
|
||||||
} else if (cpu_has_cache_cdex_s) {
|
} else if (cache_line_size == (half_clear_loop_size << 1)) {
|
||||||
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
|
if (cpu_has_cache_cdex_s) {
|
||||||
} else if (cpu_has_cache_cdex_p) {
|
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
|
||||||
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
|
} else if (cpu_has_cache_cdex_p) {
|
||||||
uasm_i_nop(buf);
|
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
|
||||||
uasm_i_nop(buf);
|
uasm_i_nop(buf);
|
||||||
uasm_i_nop(buf);
|
uasm_i_nop(buf);
|
||||||
uasm_i_nop(buf);
|
uasm_i_nop(buf);
|
||||||
|
uasm_i_nop(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||||
|
uasm_i_lw(buf, ZERO, ZERO, AT);
|
||||||
|
|
||||||
|
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
|
||||||
uasm_i_lw(buf, ZERO, ZERO, AT);
|
|
||||||
|
|
||||||
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cpuinit build_clear_page(void)
|
void __cpuinit build_clear_page(void)
|
||||||
|
@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off)
|
||||||
if (pref_bias_copy_store) {
|
if (pref_bias_copy_store) {
|
||||||
uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
|
uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
|
||||||
A0);
|
A0);
|
||||||
} else if (cpu_has_cache_cdex_s) {
|
} else if (cache_line_size == (half_copy_loop_size << 1)) {
|
||||||
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
|
if (cpu_has_cache_cdex_s) {
|
||||||
} else if (cpu_has_cache_cdex_p) {
|
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
|
||||||
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
|
} else if (cpu_has_cache_cdex_p) {
|
||||||
uasm_i_nop(buf);
|
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
|
||||||
uasm_i_nop(buf);
|
uasm_i_nop(buf);
|
||||||
uasm_i_nop(buf);
|
uasm_i_nop(buf);
|
||||||
uasm_i_nop(buf);
|
uasm_i_nop(buf);
|
||||||
|
uasm_i_nop(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||||
|
uasm_i_lw(buf, ZERO, ZERO, AT);
|
||||||
|
|
||||||
|
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
|
||||||
uasm_i_lw(buf, ZERO, ZERO, AT);
|
|
||||||
|
|
||||||
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
|
||||||
/*
|
/*
|
||||||
* This function is executed in uncached address space.
|
* This function is executed in uncached address space.
|
||||||
*/
|
*/
|
||||||
static __init void __rm7k_sc_enable(void)
|
static __cpuinit void __rm7k_sc_enable(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ static __init void __rm7k_sc_enable(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init void rm7k_sc_enable(void)
|
static __cpuinit void rm7k_sc_enable(void)
|
||||||
{
|
{
|
||||||
if (read_c0_config() & RM7K_CONF_SE)
|
if (read_c0_config() & RM7K_CONF_SE)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -33,13 +33,14 @@ static struct legacy_serial_info {
|
||||||
phys_addr_t taddr;
|
phys_addr_t taddr;
|
||||||
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
|
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
|
||||||
|
|
||||||
static struct __initdata of_device_id parents[] = {
|
static struct __initdata of_device_id legacy_serial_parents[] = {
|
||||||
{.type = "soc",},
|
{.type = "soc",},
|
||||||
{.type = "tsi-bridge",},
|
{.type = "tsi-bridge",},
|
||||||
{.type = "opb", },
|
{.type = "opb", },
|
||||||
{.compatible = "ibm,opb",},
|
{.compatible = "ibm,opb",},
|
||||||
{.compatible = "simple-bus",},
|
{.compatible = "simple-bus",},
|
||||||
{.compatible = "wrs,epld-localbus",},
|
{.compatible = "wrs,epld-localbus",},
|
||||||
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned int legacy_serial_count;
|
static unsigned int legacy_serial_count;
|
||||||
|
@ -327,7 +328,7 @@ void __init find_legacy_serial_ports(void)
|
||||||
struct device_node *parent = of_get_parent(np);
|
struct device_node *parent = of_get_parent(np);
|
||||||
if (!parent)
|
if (!parent)
|
||||||
continue;
|
continue;
|
||||||
if (of_match_node(parents, parent) != NULL) {
|
if (of_match_node(legacy_serial_parents, parent) != NULL) {
|
||||||
index = add_legacy_soc_port(np, np);
|
index = add_legacy_soc_port(np, np);
|
||||||
if (index >= 0 && np == stdout)
|
if (index >= 0 && np == stdout)
|
||||||
legacy_serial_console = index;
|
legacy_serial_console = index;
|
||||||
|
|
|
@ -76,6 +76,8 @@ struct of_device* of_platform_device_create(struct device_node *np,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
dev->dma_mask = 0xffffffffUL;
|
dev->dma_mask = 0xffffffffUL;
|
||||||
|
dev->dev.coherent_dma_mask = DMA_32BIT_MASK;
|
||||||
|
|
||||||
dev->dev.bus = &of_platform_bus_type;
|
dev->dev.bus = &of_platform_bus_type;
|
||||||
|
|
||||||
/* We do not fill the DMA ops for platform devices by default.
|
/* We do not fill the DMA ops for platform devices by default.
|
||||||
|
|
|
@ -77,7 +77,6 @@ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
|
||||||
KERNEL_DEFINES = $(strip -Derrno=kernel_errno -Dsigprocmask=kernel_sigprocmask \
|
KERNEL_DEFINES = $(strip -Derrno=kernel_errno -Dsigprocmask=kernel_sigprocmask \
|
||||||
-Dmktime=kernel_mktime $(ARCH_KERNEL_DEFINES))
|
-Dmktime=kernel_mktime $(ARCH_KERNEL_DEFINES))
|
||||||
KBUILD_CFLAGS += $(KERNEL_DEFINES)
|
KBUILD_CFLAGS += $(KERNEL_DEFINES)
|
||||||
KBUILD_CFLAGS += $(call cc-option,-fno-unit-at-a-time,)
|
|
||||||
|
|
||||||
PHONY += linux
|
PHONY += linux
|
||||||
|
|
||||||
|
|
|
@ -32,4 +32,11 @@ cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
|
||||||
# an unresolved reference.
|
# an unresolved reference.
|
||||||
cflags-y += -ffreestanding
|
cflags-y += -ffreestanding
|
||||||
|
|
||||||
|
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
|
||||||
|
# a lot more stack due to the lack of sharing of stacklots. Also, gcc
|
||||||
|
# 4.3.0 needs -funit-at-a-time for extern inline functions.
|
||||||
|
KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \
|
||||||
|
echo $(call cc-option,-fno-unit-at-a-time); \
|
||||||
|
else echo $(call cc-option,-funit-at-a-time); fi ;)
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(cflags-y)
|
KBUILD_CFLAGS += $(cflags-y)
|
||||||
|
|
|
@ -21,3 +21,6 @@ HEADER_ARCH := x86
|
||||||
|
|
||||||
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
|
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
|
||||||
LINK-y += -m64
|
LINK-y += -m64
|
||||||
|
|
||||||
|
# Do unit-at-a-time unconditionally on x86_64, following the host
|
||||||
|
KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
|
||||||
|
|
1
arch/x86/kernel/.gitignore
vendored
1
arch/x86/kernel/.gitignore
vendored
|
@ -1,2 +1,3 @@
|
||||||
vsyscall.lds
|
vsyscall.lds
|
||||||
vsyscall_32.lds
|
vsyscall_32.lds
|
||||||
|
vmlinux.lds
|
||||||
|
|
|
@ -300,6 +300,29 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ioremap_cache);
|
EXPORT_SYMBOL(ioremap_cache);
|
||||||
|
|
||||||
|
static void __iomem *ioremap_default(resource_size_t phys_addr,
|
||||||
|
unsigned long size)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
void *ret;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* - WB for WB-able memory and no other conflicting mappings
|
||||||
|
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
|
||||||
|
* - Inherit from confliting mappings otherwise
|
||||||
|
*/
|
||||||
|
err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
|
||||||
|
if (err < 0)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
ret = (void *) __ioremap_caller(phys_addr, size, flags,
|
||||||
|
__builtin_return_address(0));
|
||||||
|
|
||||||
|
free_memtype(phys_addr, phys_addr + size);
|
||||||
|
return (void __iomem *)ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iounmap - Free a IO remapping
|
* iounmap - Free a IO remapping
|
||||||
* @addr: virtual address from ioremap_*
|
* @addr: virtual address from ioremap_*
|
||||||
|
@ -365,7 +388,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
|
||||||
if (page_is_ram(start >> PAGE_SHIFT))
|
if (page_is_ram(start >> PAGE_SHIFT))
|
||||||
return __va(phys);
|
return __va(phys);
|
||||||
|
|
||||||
addr = (void __force *)ioremap(start, PAGE_SIZE);
|
addr = (void __force *)ioremap_default(start, PAGE_SIZE);
|
||||||
if (addr)
|
if (addr)
|
||||||
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
|
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
|
||||||
|
|
||||||
|
|
|
@ -330,18 +330,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
.callback = set_bf_sort,
|
.callback = set_bf_sort,
|
||||||
.ident = "HP ProLiant DL360",
|
.ident = "HP ProLiant DL385 G2",
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.callback = set_bf_sort,
|
.callback = set_bf_sort,
|
||||||
.ident = "HP ProLiant DL380",
|
.ident = "HP ProLiant DL585 G2",
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{}
|
{}
|
||||||
|
|
|
@ -709,11 +709,12 @@ static void bsg_kref_release_function(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct bsg_class_device *bcd =
|
struct bsg_class_device *bcd =
|
||||||
container_of(kref, struct bsg_class_device, ref);
|
container_of(kref, struct bsg_class_device, ref);
|
||||||
|
struct device *parent = bcd->parent;
|
||||||
|
|
||||||
if (bcd->release)
|
if (bcd->release)
|
||||||
bcd->release(bcd->parent);
|
bcd->release(bcd->parent);
|
||||||
|
|
||||||
put_device(bcd->parent);
|
put_device(parent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bsg_put_device(struct bsg_device *bd)
|
static int bsg_put_device(struct bsg_device *bd)
|
||||||
|
|
|
@ -117,6 +117,7 @@ static int chainiv_init(struct crypto_tfm *tfm)
|
||||||
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
|
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
|
||||||
{
|
{
|
||||||
int queued;
|
int queued;
|
||||||
|
int err = ctx->err;
|
||||||
|
|
||||||
if (!ctx->queue.qlen) {
|
if (!ctx->queue.qlen) {
|
||||||
smp_mb__before_clear_bit();
|
smp_mb__before_clear_bit();
|
||||||
|
@ -131,7 +132,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
|
||||||
BUG_ON(!queued);
|
BUG_ON(!queued);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ctx->err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
|
static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
|
||||||
|
@ -227,6 +228,7 @@ static void async_chainiv_do_postponed(struct work_struct *work)
|
||||||
postponed);
|
postponed);
|
||||||
struct skcipher_givcrypt_request *req;
|
struct skcipher_givcrypt_request *req;
|
||||||
struct ablkcipher_request *subreq;
|
struct ablkcipher_request *subreq;
|
||||||
|
int err;
|
||||||
|
|
||||||
/* Only handle one request at a time to avoid hogging keventd. */
|
/* Only handle one request at a time to avoid hogging keventd. */
|
||||||
spin_lock_bh(&ctx->lock);
|
spin_lock_bh(&ctx->lock);
|
||||||
|
@ -241,7 +243,11 @@ static void async_chainiv_do_postponed(struct work_struct *work)
|
||||||
subreq = skcipher_givcrypt_reqctx(req);
|
subreq = skcipher_givcrypt_reqctx(req);
|
||||||
subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
|
subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||||
|
|
||||||
async_chainiv_givencrypt_tail(req);
|
err = async_chainiv_givencrypt_tail(req);
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
|
skcipher_givcrypt_complete(req, err);
|
||||||
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int async_chainiv_init(struct crypto_tfm *tfm)
|
static int async_chainiv_init(struct crypto_tfm *tfm)
|
||||||
|
|
|
@ -586,12 +586,6 @@ static void test_cipher(char *algo, int enc,
|
||||||
j = 0;
|
j = 0;
|
||||||
for (i = 0; i < tcount; i++) {
|
for (i = 0; i < tcount; i++) {
|
||||||
|
|
||||||
data = kzalloc(template[i].ilen, GFP_KERNEL);
|
|
||||||
if (!data)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
memcpy(data, template[i].input, template[i].ilen);
|
|
||||||
|
|
||||||
if (template[i].iv)
|
if (template[i].iv)
|
||||||
memcpy(iv, template[i].iv, MAX_IVLEN);
|
memcpy(iv, template[i].iv, MAX_IVLEN);
|
||||||
else
|
else
|
||||||
|
@ -613,10 +607,8 @@ static void test_cipher(char *algo, int enc,
|
||||||
printk("setkey() failed flags=%x\n",
|
printk("setkey() failed flags=%x\n",
|
||||||
crypto_ablkcipher_get_flags(tfm));
|
crypto_ablkcipher_get_flags(tfm));
|
||||||
|
|
||||||
if (!template[i].fail) {
|
if (!template[i].fail)
|
||||||
kfree(data);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
temp = 0;
|
temp = 0;
|
||||||
|
|
|
@ -29,14 +29,16 @@
|
||||||
enum {
|
enum {
|
||||||
ATA_ACPI_FILTER_SETXFER = 1 << 0,
|
ATA_ACPI_FILTER_SETXFER = 1 << 0,
|
||||||
ATA_ACPI_FILTER_LOCK = 1 << 1,
|
ATA_ACPI_FILTER_LOCK = 1 << 1,
|
||||||
|
ATA_ACPI_FILTER_DIPM = 1 << 2,
|
||||||
|
|
||||||
ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER |
|
ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER |
|
||||||
ATA_ACPI_FILTER_LOCK,
|
ATA_ACPI_FILTER_LOCK |
|
||||||
|
ATA_ACPI_FILTER_DIPM,
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
|
static unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
|
||||||
module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
|
module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
|
||||||
MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock)");
|
MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM)");
|
||||||
|
|
||||||
#define NO_PORT_MULT 0xffff
|
#define NO_PORT_MULT 0xffff
|
||||||
#define SATA_ADR(root, pmp) (((root) << 16) | (pmp))
|
#define SATA_ADR(root, pmp) (((root) << 16) | (pmp))
|
||||||
|
@ -195,6 +197,10 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
|
||||||
/* This device does not support hotplug */
|
/* This device does not support hotplug */
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (event == ACPI_NOTIFY_BUS_CHECK ||
|
||||||
|
event == ACPI_NOTIFY_DEVICE_CHECK)
|
||||||
|
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
|
||||||
|
|
||||||
spin_lock_irqsave(ap->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
|
@ -202,7 +208,6 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
|
||||||
case ACPI_NOTIFY_DEVICE_CHECK:
|
case ACPI_NOTIFY_DEVICE_CHECK:
|
||||||
ata_ehi_push_desc(ehi, "ACPI event");
|
ata_ehi_push_desc(ehi, "ACPI event");
|
||||||
|
|
||||||
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
|
|
||||||
if (ACPI_FAILURE(status)) {
|
if (ACPI_FAILURE(status)) {
|
||||||
ata_port_printk(ap, KERN_ERR,
|
ata_port_printk(ap, KERN_ERR,
|
||||||
"acpi: failed to determine bay status (0x%x)\n",
|
"acpi: failed to determine bay status (0x%x)\n",
|
||||||
|
@ -690,6 +695,14 @@ static int ata_acpi_filter_tf(const struct ata_taskfile *tf,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_DIPM) {
|
||||||
|
/* inhibit enabling DIPM */
|
||||||
|
if (tf->command == ATA_CMD_SET_FEATURES &&
|
||||||
|
tf->feature == SETFEATURES_SATA_ENABLE &&
|
||||||
|
tf->nsect == SATA_DIPM)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,6 +56,7 @@ static const struct sis_laptop sis_laptop[] = {
|
||||||
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
|
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
|
||||||
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
|
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
|
||||||
{ 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
|
{ 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
|
||||||
|
{ 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
|
||||||
/* end marker */
|
/* end marker */
|
||||||
{ 0, }
|
{ 0, }
|
||||||
};
|
};
|
||||||
|
|
|
@ -755,9 +755,8 @@ static ssize_t ipmi_write(struct file *file,
|
||||||
rv = ipmi_heartbeat();
|
rv = ipmi_heartbeat();
|
||||||
if (rv)
|
if (rv)
|
||||||
return rv;
|
return rv;
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
return 0;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t ipmi_read(struct file *file,
|
static ssize_t ipmi_read(struct file *file,
|
||||||
|
|
|
@ -590,8 +590,10 @@ static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw,
|
||||||
packet = kmalloc(sizeof(struct ipw_rx_packet) +
|
packet = kmalloc(sizeof(struct ipw_rx_packet) +
|
||||||
old_packet->length + minimum_free_space,
|
old_packet->length + minimum_free_space,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!packet)
|
if (!packet) {
|
||||||
|
kfree(old_packet);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
memcpy(packet, old_packet,
|
memcpy(packet, old_packet,
|
||||||
sizeof(struct ipw_rx_packet)
|
sizeof(struct ipw_rx_packet)
|
||||||
+ old_packet->length);
|
+ old_packet->length);
|
||||||
|
|
|
@ -678,12 +678,13 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
|
||||||
if (arg != (1<<tmp))
|
if (arg != (1<<tmp))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
rtc_freq = arg;
|
||||||
|
|
||||||
spin_lock_irqsave(&rtc_lock, flags);
|
spin_lock_irqsave(&rtc_lock, flags);
|
||||||
if (hpet_set_periodic_freq(arg)) {
|
if (hpet_set_periodic_freq(arg)) {
|
||||||
spin_unlock_irqrestore(&rtc_lock, flags);
|
spin_unlock_irqrestore(&rtc_lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
rtc_freq = arg;
|
|
||||||
|
|
||||||
val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0;
|
val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0;
|
||||||
val |= (16 - tmp);
|
val |= (16 - tmp);
|
||||||
|
|
|
@ -623,6 +623,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
|
||||||
{"IFX0102", 0}, /* Infineon */
|
{"IFX0102", 0}, /* Infineon */
|
||||||
{"BCM0101", 0}, /* Broadcom */
|
{"BCM0101", 0}, /* Broadcom */
|
||||||
{"NSC1200", 0}, /* National */
|
{"NSC1200", 0}, /* National */
|
||||||
|
{"ICO0102", 0}, /* Intel */
|
||||||
/* Add new here */
|
/* Add new here */
|
||||||
{"", 0}, /* User Specified */
|
{"", 0}, /* User Specified */
|
||||||
{"", 0} /* Terminator */
|
{"", 0} /* Terminator */
|
||||||
|
|
|
@ -76,7 +76,7 @@ struct palm_bk3710_udmatiming {
|
||||||
|
|
||||||
#include "../ide-timing.h"
|
#include "../ide-timing.h"
|
||||||
|
|
||||||
static long ide_palm_clk;
|
static unsigned ideclk_period; /* in nanoseconds */
|
||||||
|
|
||||||
static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
|
static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
|
||||||
{160, 240}, /* UDMA Mode 0 */
|
{160, 240}, /* UDMA Mode 0 */
|
||||||
|
@ -86,8 +86,6 @@ static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
|
||||||
{85, 60}, /* UDMA Mode 4 */
|
{85, 60}, /* UDMA Mode 4 */
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct clk *ideclkp;
|
|
||||||
|
|
||||||
static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
|
static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
|
||||||
unsigned int mode)
|
unsigned int mode)
|
||||||
{
|
{
|
||||||
|
@ -97,10 +95,10 @@ static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
|
||||||
|
|
||||||
/* DMA Data Setup */
|
/* DMA Data Setup */
|
||||||
t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
|
t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
|
||||||
ide_palm_clk) - 1;
|
ideclk_period) - 1;
|
||||||
tenv = DIV_ROUND_UP(20, ide_palm_clk) - 1;
|
tenv = DIV_ROUND_UP(20, ideclk_period) - 1;
|
||||||
trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
|
trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
|
||||||
ide_palm_clk) - 1;
|
ideclk_period) - 1;
|
||||||
|
|
||||||
/* udmatim Register */
|
/* udmatim Register */
|
||||||
val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
|
val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
|
||||||
|
@ -141,8 +139,8 @@ static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
|
||||||
cycletime = max_t(int, t->cycle, min_cycle);
|
cycletime = max_t(int, t->cycle, min_cycle);
|
||||||
|
|
||||||
/* DMA Data Setup */
|
/* DMA Data Setup */
|
||||||
t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
|
t0 = DIV_ROUND_UP(cycletime, ideclk_period);
|
||||||
td = DIV_ROUND_UP(t->active, ide_palm_clk);
|
td = DIV_ROUND_UP(t->active, ideclk_period);
|
||||||
tkw = t0 - td - 1;
|
tkw = t0 - td - 1;
|
||||||
td -= 1;
|
td -= 1;
|
||||||
|
|
||||||
|
@ -168,9 +166,9 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
|
||||||
struct ide_timing *t;
|
struct ide_timing *t;
|
||||||
|
|
||||||
/* PIO Data Setup */
|
/* PIO Data Setup */
|
||||||
t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
|
t0 = DIV_ROUND_UP(cycletime, ideclk_period);
|
||||||
t2 = DIV_ROUND_UP(ide_timing_find_mode(XFER_PIO_0 + mode)->active,
|
t2 = DIV_ROUND_UP(ide_timing_find_mode(XFER_PIO_0 + mode)->active,
|
||||||
ide_palm_clk);
|
ideclk_period);
|
||||||
|
|
||||||
t2i = t0 - t2 - 1;
|
t2i = t0 - t2 - 1;
|
||||||
t2 -= 1;
|
t2 -= 1;
|
||||||
|
@ -192,8 +190,8 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
|
||||||
|
|
||||||
/* TASKFILE Setup */
|
/* TASKFILE Setup */
|
||||||
t = ide_timing_find_mode(XFER_PIO_0 + mode);
|
t = ide_timing_find_mode(XFER_PIO_0 + mode);
|
||||||
t0 = DIV_ROUND_UP(t->cyc8b, ide_palm_clk);
|
t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
|
||||||
t2 = DIV_ROUND_UP(t->act8b, ide_palm_clk);
|
t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
|
||||||
|
|
||||||
t2i = t0 - t2 - 1;
|
t2i = t0 - t2 - 1;
|
||||||
t2 -= 1;
|
t2 -= 1;
|
||||||
|
@ -350,22 +348,22 @@ static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
|
||||||
|
|
||||||
static int __devinit palm_bk3710_probe(struct platform_device *pdev)
|
static int __devinit palm_bk3710_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct clk *clkp;
|
struct clk *clk;
|
||||||
struct resource *mem, *irq;
|
struct resource *mem, *irq;
|
||||||
ide_hwif_t *hwif;
|
ide_hwif_t *hwif;
|
||||||
unsigned long base;
|
unsigned long base, rate;
|
||||||
int i;
|
int i;
|
||||||
hw_regs_t hw;
|
hw_regs_t hw;
|
||||||
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
|
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
|
||||||
|
|
||||||
clkp = clk_get(NULL, "IDECLK");
|
clk = clk_get(NULL, "IDECLK");
|
||||||
if (IS_ERR(clkp))
|
if (IS_ERR(clk))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ideclkp = clkp;
|
clk_enable(clk);
|
||||||
clk_enable(ideclkp);
|
rate = clk_get_rate(clk);
|
||||||
ide_palm_clk = clk_get_rate(ideclkp)/100000;
|
ideclk_period = 1000000000UL / rate;
|
||||||
ide_palm_clk = (10000/ide_palm_clk) + 1;
|
|
||||||
/* Register the IDE interface with Linux ATA Interface */
|
/* Register the IDE interface with Linux ATA Interface */
|
||||||
memset(&hw, 0, sizeof(hw));
|
memset(&hw, 0, sizeof(hw));
|
||||||
|
|
||||||
|
|
|
@ -1218,16 +1218,12 @@ static void drive_release_dev (struct device *dev)
|
||||||
complete(&drive->gendev_rel_comp);
|
complete(&drive->gendev_rel_comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ide_default_irq
|
|
||||||
#define ide_default_irq(irq) 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int hwif_init(ide_hwif_t *hwif)
|
static int hwif_init(ide_hwif_t *hwif)
|
||||||
{
|
{
|
||||||
int old_irq;
|
int old_irq;
|
||||||
|
|
||||||
if (!hwif->irq) {
|
if (!hwif->irq) {
|
||||||
hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
|
hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
|
||||||
if (!hwif->irq) {
|
if (!hwif->irq) {
|
||||||
printk("%s: DISABLED, NO IRQ\n", hwif->name);
|
printk("%s: DISABLED, NO IRQ\n", hwif->name);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1257,7 +1253,7 @@ static int hwif_init(ide_hwif_t *hwif)
|
||||||
* It failed to initialise. Find the default IRQ for
|
* It failed to initialise. Find the default IRQ for
|
||||||
* this port and try that.
|
* this port and try that.
|
||||||
*/
|
*/
|
||||||
hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
|
hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
|
||||||
if (!hwif->irq) {
|
if (!hwif->irq) {
|
||||||
printk("%s: Disabled unable to get IRQ %d.\n",
|
printk("%s: Disabled unable to get IRQ %d.\n",
|
||||||
hwif->name, old_irq);
|
hwif->name, old_irq);
|
||||||
|
|
|
@ -184,8 +184,7 @@ static const struct ide_port_info it8213_chipsets[] __devinitdata = {
|
||||||
|
|
||||||
static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
|
static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
|
||||||
{
|
{
|
||||||
ide_setup_pci_device(dev, &it8213_chipsets[id->driver_data]);
|
return ide_setup_pci_device(dev, &it8213_chipsets[id->driver_data]);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pci_device_id it8213_pci_tbl[] = {
|
static const struct pci_device_id it8213_pci_tbl[] = {
|
||||||
|
|
|
@ -225,10 +225,6 @@ static int ns87415_dma_setup(ide_drive_t *drive)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ide_default_irq
|
|
||||||
#define ide_default_irq(irq) 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
|
static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
|
||||||
{
|
{
|
||||||
struct pci_dev *dev = to_pci_dev(hwif->dev);
|
struct pci_dev *dev = to_pci_dev(hwif->dev);
|
||||||
|
@ -288,7 +284,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!using_inta)
|
if (!using_inta)
|
||||||
hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
|
hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
|
||||||
else if (!hwif->irq && hwif->mate && hwif->mate->irq)
|
else if (!hwif->irq && hwif->mate && hwif->mate->irq)
|
||||||
hwif->irq = hwif->mate->irq; /* share IRQ with mate */
|
hwif->irq = hwif->mate->irq; /* share IRQ with mate */
|
||||||
|
|
||||||
|
|
|
@ -1096,7 +1096,9 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch
|
||||||
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
|
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
|
||||||
|
|
||||||
PDBG("%s dev 0x%p\n", __func__, dev);
|
PDBG("%s dev 0x%p\n", __func__, dev);
|
||||||
|
rtnl_lock();
|
||||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||||
|
rtnl_unlock();
|
||||||
return sprintf(buf, "%s\n", info.fw_version);
|
return sprintf(buf, "%s\n", info.fw_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1109,7 +1111,9 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
|
||||||
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
|
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
|
||||||
|
|
||||||
PDBG("%s dev 0x%p\n", __func__, dev);
|
PDBG("%s dev 0x%p\n", __func__, dev);
|
||||||
|
rtnl_lock();
|
||||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||||
|
rtnl_unlock();
|
||||||
return sprintf(buf, "%s\n", info.driver);
|
return sprintf(buf, "%s\n", info.driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1977,8 +1977,10 @@ isdn_writebuf_stub(int drvidx, int chan, const u_char __user * buf, int len)
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
skb_reserve(skb, hl);
|
skb_reserve(skb, hl);
|
||||||
if (copy_from_user(skb_put(skb, len), buf, len))
|
if (copy_from_user(skb_put(skb, len), buf, len)) {
|
||||||
|
dev_kfree_skb(skb);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
}
|
||||||
ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, 1, skb);
|
ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, 1, skb);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
|
|
|
@ -2017,12 +2017,7 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
|
||||||
*/
|
*/
|
||||||
s->uptodate++;
|
s->uptodate++;
|
||||||
return 0; /* uptodate + compute == disks */
|
return 0; /* uptodate + compute == disks */
|
||||||
} else if ((s->uptodate < disks - 1) &&
|
} else if (test_bit(R5_Insync, &dev->flags)) {
|
||||||
test_bit(R5_Insync, &dev->flags)) {
|
|
||||||
/* Note: we hold off compute operations while checks are
|
|
||||||
* in flight, but we still prefer 'compute' over 'read'
|
|
||||||
* hence we only read if (uptodate < * disks-1)
|
|
||||||
*/
|
|
||||||
set_bit(R5_LOCKED, &dev->flags);
|
set_bit(R5_LOCKED, &dev->flags);
|
||||||
set_bit(R5_Wantread, &dev->flags);
|
set_bit(R5_Wantread, &dev->flags);
|
||||||
if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
|
if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
|
||||||
|
|
|
@ -406,8 +406,10 @@ static int ov7670_read(struct i2c_client *c, unsigned char reg,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i2c_smbus_read_byte_data(c, reg);
|
ret = i2c_smbus_read_byte_data(c, reg);
|
||||||
if (ret >= 0)
|
if (ret >= 0) {
|
||||||
*value = (unsigned char) ret;
|
*value = (unsigned char) ret;
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1686,9 +1686,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
ioc->bus_type = SAS;
|
ioc->bus_type = SAS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ioc->bus_type == SAS && mpt_msi_enable == -1)
|
if (mpt_msi_enable == -1) {
|
||||||
ioc->msi_enable = 1;
|
/* Enable on SAS, disable on FC and SPI */
|
||||||
else
|
if (ioc->bus_type == SAS)
|
||||||
|
ioc->msi_enable = 1;
|
||||||
|
else
|
||||||
|
ioc->msi_enable = 0;
|
||||||
|
} else
|
||||||
|
/* follow flag: 0 - disable; 1 - enable */
|
||||||
ioc->msi_enable = mpt_msi_enable;
|
ioc->msi_enable = mpt_msi_enable;
|
||||||
|
|
||||||
if (ioc->errata_flag_1064)
|
if (ioc->errata_flag_1064)
|
||||||
|
|
|
@ -1266,13 +1266,18 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
|
||||||
static int
|
static int
|
||||||
mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
|
mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
|
||||||
{
|
{
|
||||||
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
|
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = mptscsih_ioc_reset(ioc, reset_phase);
|
rc = mptscsih_ioc_reset(ioc, reset_phase);
|
||||||
|
|
||||||
if (reset_phase == MPT_IOC_POST_RESET)
|
/* only try to do a renegotiation if we're properly set up
|
||||||
|
* if we get an ioc fault on bringup, ioc->sh will be NULL */
|
||||||
|
if (reset_phase == MPT_IOC_POST_RESET &&
|
||||||
|
ioc->sh) {
|
||||||
|
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
|
||||||
|
|
||||||
mptspi_dv_renegotiate(hd);
|
mptspi_dv_renegotiate(hd);
|
||||||
|
}
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
|
||||||
#define DRV_NAME "ehea"
|
#define DRV_NAME "ehea"
|
||||||
#define DRV_VERSION "EHEA_0091"
|
#define DRV_VERSION "EHEA_0092"
|
||||||
|
|
||||||
/* eHEA capability flags */
|
/* eHEA capability flags */
|
||||||
#define DLPAR_PORT_ADD_REM 1
|
#define DLPAR_PORT_ADD_REM 1
|
||||||
|
@ -452,7 +452,7 @@ struct ehea_bcmc_reg_entry {
|
||||||
struct ehea_bcmc_reg_array {
|
struct ehea_bcmc_reg_array {
|
||||||
struct ehea_bcmc_reg_entry *arr;
|
struct ehea_bcmc_reg_entry *arr;
|
||||||
int num_entries;
|
int num_entries;
|
||||||
struct mutex lock;
|
spinlock_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define EHEA_PORT_UP 1
|
#define EHEA_PORT_UP 1
|
||||||
|
@ -478,6 +478,7 @@ struct ehea_port {
|
||||||
int num_add_tx_qps;
|
int num_add_tx_qps;
|
||||||
int num_mcs;
|
int num_mcs;
|
||||||
int resets;
|
int resets;
|
||||||
|
u64 flags;
|
||||||
u64 mac_addr;
|
u64 mac_addr;
|
||||||
u32 logical_port_id;
|
u32 logical_port_id;
|
||||||
u32 port_speed;
|
u32 port_speed;
|
||||||
|
@ -501,7 +502,8 @@ struct port_res_cfg {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum ehea_flag_bits {
|
enum ehea_flag_bits {
|
||||||
__EHEA_STOP_XFER
|
__EHEA_STOP_XFER,
|
||||||
|
__EHEA_DISABLE_PORT_RESET
|
||||||
};
|
};
|
||||||
|
|
||||||
void ehea_set_ethtool_ops(struct net_device *netdev);
|
void ehea_set_ethtool_ops(struct net_device *netdev);
|
||||||
|
|
|
@ -118,6 +118,7 @@ static struct of_device_id ehea_device_table[] = {
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, ehea_device_table);
|
||||||
|
|
||||||
static struct of_platform_driver ehea_driver = {
|
static struct of_platform_driver ehea_driver = {
|
||||||
.name = "ehea",
|
.name = "ehea",
|
||||||
|
@ -137,6 +138,12 @@ void ehea_dump(void *adr, int len, char *msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ehea_schedule_port_reset(struct ehea_port *port)
|
||||||
|
{
|
||||||
|
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
|
||||||
|
schedule_work(&port->reset_task);
|
||||||
|
}
|
||||||
|
|
||||||
static void ehea_update_firmware_handles(void)
|
static void ehea_update_firmware_handles(void)
|
||||||
{
|
{
|
||||||
struct ehea_fw_handle_entry *arr = NULL;
|
struct ehea_fw_handle_entry *arr = NULL;
|
||||||
|
@ -241,7 +248,7 @@ static void ehea_update_bcmc_registrations(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_registrations) {
|
if (num_registrations) {
|
||||||
arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL);
|
arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
|
||||||
if (!arr)
|
if (!arr)
|
||||||
return; /* Keep the existing array */
|
return; /* Keep the existing array */
|
||||||
} else
|
} else
|
||||||
|
@ -301,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
|
||||||
|
|
||||||
memset(stats, 0, sizeof(*stats));
|
memset(stats, 0, sizeof(*stats));
|
||||||
|
|
||||||
cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||||
if (!cb2) {
|
if (!cb2) {
|
||||||
ehea_error("no mem for cb2");
|
ehea_error("no mem for cb2");
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -587,7 +594,7 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
|
||||||
"Resetting port.", pr->qp->init_attr.qp_nr);
|
"Resetting port.", pr->qp->init_attr.qp_nr);
|
||||||
ehea_dump(cqe, sizeof(*cqe), "CQE");
|
ehea_dump(cqe, sizeof(*cqe), "CQE");
|
||||||
}
|
}
|
||||||
schedule_work(&pr->port->reset_task);
|
ehea_schedule_port_reset(pr->port);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -616,7 +623,7 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
|
||||||
*tcph = tcp_hdr(skb);
|
*tcph = tcp_hdr(skb);
|
||||||
|
|
||||||
/* check if ip header and tcp header are complete */
|
/* check if ip header and tcp header are complete */
|
||||||
if (iph->tot_len < ip_len + tcp_hdrlen(skb))
|
if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
*hdr_flags = LRO_IPV4 | LRO_TCP;
|
*hdr_flags = LRO_IPV4 | LRO_TCP;
|
||||||
|
@ -765,7 +772,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
|
||||||
ehea_error("Send Completion Error: Resetting port");
|
ehea_error("Send Completion Error: Resetting port");
|
||||||
if (netif_msg_tx_err(pr->port))
|
if (netif_msg_tx_err(pr->port))
|
||||||
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
|
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
|
||||||
schedule_work(&pr->port->reset_task);
|
ehea_schedule_port_reset(pr->port);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -885,7 +892,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
|
||||||
eqe = ehea_poll_eq(port->qp_eq);
|
eqe = ehea_poll_eq(port->qp_eq);
|
||||||
}
|
}
|
||||||
|
|
||||||
schedule_work(&port->reset_task);
|
ehea_schedule_port_reset(port);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -1763,7 +1770,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
|
||||||
|
|
||||||
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
|
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
|
||||||
|
|
||||||
mutex_lock(&ehea_bcmc_regs.lock);
|
spin_lock(&ehea_bcmc_regs.lock);
|
||||||
|
|
||||||
/* Deregister old MAC in pHYP */
|
/* Deregister old MAC in pHYP */
|
||||||
if (port->state == EHEA_PORT_UP) {
|
if (port->state == EHEA_PORT_UP) {
|
||||||
|
@ -1785,7 +1792,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
|
||||||
|
|
||||||
out_upregs:
|
out_upregs:
|
||||||
ehea_update_bcmc_registrations();
|
ehea_update_bcmc_registrations();
|
||||||
mutex_unlock(&ehea_bcmc_regs.lock);
|
spin_unlock(&ehea_bcmc_regs.lock);
|
||||||
out_free:
|
out_free:
|
||||||
kfree(cb0);
|
kfree(cb0);
|
||||||
out:
|
out:
|
||||||
|
@ -1947,7 +1954,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
|
||||||
}
|
}
|
||||||
ehea_promiscuous(dev, 0);
|
ehea_promiscuous(dev, 0);
|
||||||
|
|
||||||
mutex_lock(&ehea_bcmc_regs.lock);
|
spin_lock(&ehea_bcmc_regs.lock);
|
||||||
|
|
||||||
if (dev->flags & IFF_ALLMULTI) {
|
if (dev->flags & IFF_ALLMULTI) {
|
||||||
ehea_allmulti(dev, 1);
|
ehea_allmulti(dev, 1);
|
||||||
|
@ -1978,7 +1985,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
ehea_update_bcmc_registrations();
|
ehea_update_bcmc_registrations();
|
||||||
mutex_unlock(&ehea_bcmc_regs.lock);
|
spin_unlock(&ehea_bcmc_regs.lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2497,7 +2504,7 @@ static int ehea_up(struct net_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ehea_bcmc_regs.lock);
|
spin_lock(&ehea_bcmc_regs.lock);
|
||||||
|
|
||||||
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -2520,7 +2527,7 @@ out:
|
||||||
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
|
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
|
||||||
|
|
||||||
ehea_update_bcmc_registrations();
|
ehea_update_bcmc_registrations();
|
||||||
mutex_unlock(&ehea_bcmc_regs.lock);
|
spin_unlock(&ehea_bcmc_regs.lock);
|
||||||
|
|
||||||
ehea_update_firmware_handles();
|
ehea_update_firmware_handles();
|
||||||
mutex_unlock(&ehea_fw_handles.lock);
|
mutex_unlock(&ehea_fw_handles.lock);
|
||||||
|
@ -2575,7 +2582,7 @@ static int ehea_down(struct net_device *dev)
|
||||||
|
|
||||||
mutex_lock(&ehea_fw_handles.lock);
|
mutex_lock(&ehea_fw_handles.lock);
|
||||||
|
|
||||||
mutex_lock(&ehea_bcmc_regs.lock);
|
spin_lock(&ehea_bcmc_regs.lock);
|
||||||
ehea_drop_multicast_list(dev);
|
ehea_drop_multicast_list(dev);
|
||||||
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
||||||
|
|
||||||
|
@ -2584,7 +2591,7 @@ static int ehea_down(struct net_device *dev)
|
||||||
port->state = EHEA_PORT_DOWN;
|
port->state = EHEA_PORT_DOWN;
|
||||||
|
|
||||||
ehea_update_bcmc_registrations();
|
ehea_update_bcmc_registrations();
|
||||||
mutex_unlock(&ehea_bcmc_regs.lock);
|
spin_unlock(&ehea_bcmc_regs.lock);
|
||||||
|
|
||||||
ret = ehea_clean_all_portres(port);
|
ret = ehea_clean_all_portres(port);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -2605,13 +2612,14 @@ static int ehea_stop(struct net_device *dev)
|
||||||
if (netif_msg_ifdown(port))
|
if (netif_msg_ifdown(port))
|
||||||
ehea_info("disabling port %s", dev->name);
|
ehea_info("disabling port %s", dev->name);
|
||||||
|
|
||||||
|
set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
|
||||||
cancel_work_sync(&port->reset_task);
|
cancel_work_sync(&port->reset_task);
|
||||||
|
|
||||||
mutex_lock(&port->port_lock);
|
mutex_lock(&port->port_lock);
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
port_napi_disable(port);
|
port_napi_disable(port);
|
||||||
ret = ehea_down(dev);
|
ret = ehea_down(dev);
|
||||||
mutex_unlock(&port->port_lock);
|
mutex_unlock(&port->port_lock);
|
||||||
|
clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2941,7 +2949,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
|
||||||
|
|
||||||
if (netif_carrier_ok(dev) &&
|
if (netif_carrier_ok(dev) &&
|
||||||
!test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
|
!test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
|
||||||
schedule_work(&port->reset_task);
|
ehea_schedule_port_reset(port);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
|
int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
|
||||||
|
@ -3590,7 +3598,7 @@ int __init ehea_module_init(void)
|
||||||
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
|
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
|
||||||
|
|
||||||
mutex_init(&ehea_fw_handles.lock);
|
mutex_init(&ehea_fw_handles.lock);
|
||||||
mutex_init(&ehea_bcmc_regs.lock);
|
spin_lock_init(&ehea_bcmc_regs.lock);
|
||||||
|
|
||||||
ret = check_module_parm();
|
ret = check_module_parm();
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -4194,12 +4194,23 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
||||||
|
|
||||||
netif_carrier_off(dev);
|
netif_carrier_off(dev);
|
||||||
if (netif_running(dev)) {
|
if (netif_running(dev)) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
nv_disable_irq(dev);
|
nv_disable_irq(dev);
|
||||||
netif_tx_lock_bh(dev);
|
netif_tx_lock_bh(dev);
|
||||||
spin_lock(&np->lock);
|
/* with plain spinlock lockdep complains */
|
||||||
|
spin_lock_irqsave(&np->lock, flags);
|
||||||
/* stop engines */
|
/* stop engines */
|
||||||
|
/* FIXME:
|
||||||
|
* this can take some time, and interrupts are disabled
|
||||||
|
* due to spin_lock_irqsave, but let's hope no daemon
|
||||||
|
* is going to change the settings very often...
|
||||||
|
* Worst case:
|
||||||
|
* NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
|
||||||
|
* + some minor delays, which is up to a second approximately
|
||||||
|
*/
|
||||||
nv_stop_rxtx(dev);
|
nv_stop_rxtx(dev);
|
||||||
spin_unlock(&np->lock);
|
spin_unlock_irqrestore(&np->lock, flags);
|
||||||
netif_tx_unlock_bh(dev);
|
netif_tx_unlock_bh(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -463,6 +463,9 @@ static void restart(struct net_device *dev)
|
||||||
else
|
else
|
||||||
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
|
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
|
||||||
|
|
||||||
|
/* Restore multicast and promiscuous settings */
|
||||||
|
set_multicast_list(dev);
|
||||||
|
|
||||||
S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
|
S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1636,6 +1636,12 @@ static int emac_poll_rx(void *param, int budget)
|
||||||
goto next;
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (len < ETH_HLEN) {
|
||||||
|
++dev->estats.rx_dropped_stack;
|
||||||
|
emac_recycle_rx_skb(dev, slot, len);
|
||||||
|
goto next;
|
||||||
|
}
|
||||||
|
|
||||||
if (len && len < EMAC_RX_COPY_THRESH) {
|
if (len && len < EMAC_RX_COPY_THRESH) {
|
||||||
struct sk_buff *copy_skb =
|
struct sk_buff *copy_skb =
|
||||||
alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
|
alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
|
||||||
|
@ -2719,6 +2725,8 @@ static int __devinit emac_probe(struct of_device *ofdev,
|
||||||
/* Clean rings */
|
/* Clean rings */
|
||||||
memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
|
memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
|
||||||
memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
|
memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
|
||||||
|
memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
|
||||||
|
memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
|
||||||
|
|
||||||
/* Attach to ZMII, if needed */
|
/* Attach to ZMII, if needed */
|
||||||
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
|
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
|
||||||
|
|
|
@ -152,6 +152,7 @@ static chipio_t pnp_info;
|
||||||
static const struct pnp_device_id nsc_ircc_pnp_table[] = {
|
static const struct pnp_device_id nsc_ircc_pnp_table[] = {
|
||||||
{ .id = "NSC6001", .driver_data = 0 },
|
{ .id = "NSC6001", .driver_data = 0 },
|
||||||
{ .id = "IBM0071", .driver_data = 0 },
|
{ .id = "IBM0071", .driver_data = 0 },
|
||||||
|
{ .id = "HWPC224", .driver_data = 0 },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1546,6 +1546,7 @@ static int via_ircc_net_open(struct net_device *dev)
|
||||||
IRDA_WARNING("%s, unable to allocate dma2=%d\n",
|
IRDA_WARNING("%s, unable to allocate dma2=%d\n",
|
||||||
driver_name, self->io.dma2);
|
driver_name, self->io.dma2);
|
||||||
free_irq(self->io.irq, self);
|
free_irq(self->io.irq, self);
|
||||||
|
free_dma(self->io.dma);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1606,6 +1607,8 @@ static int via_ircc_net_close(struct net_device *dev)
|
||||||
EnAllInt(iobase, OFF);
|
EnAllInt(iobase, OFF);
|
||||||
free_irq(self->io.irq, dev);
|
free_irq(self->io.irq, dev);
|
||||||
free_dma(self->io.dma);
|
free_dma(self->io.dma);
|
||||||
|
if (self->io.dma2 != self->io.dma)
|
||||||
|
free_dma(self->io.dma2);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -277,7 +277,7 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
|
||||||
*tcph = tcp_hdr(skb);
|
*tcph = tcp_hdr(skb);
|
||||||
|
|
||||||
/* check if ip header and tcp header are complete */
|
/* check if ip header and tcp header are complete */
|
||||||
if (iph->tot_len < ip_len + tcp_hdrlen(skb))
|
if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
*hdr_flags = LRO_IPV4 | LRO_TCP;
|
*hdr_flags = LRO_IPV4 | LRO_TCP;
|
||||||
|
|
|
@ -602,6 +602,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||||
tun->attached = 1;
|
tun->attached = 1;
|
||||||
get_net(dev_net(tun->dev));
|
get_net(dev_net(tun->dev));
|
||||||
|
|
||||||
|
/* Make sure persistent devices do not get stuck in
|
||||||
|
* xoff state.
|
||||||
|
*/
|
||||||
|
if (netif_running(tun->dev))
|
||||||
|
netif_wake_queue(tun->dev);
|
||||||
|
|
||||||
strcpy(ifr->ifr_name, tun->dev->name);
|
strcpy(ifr->ifr_name, tun->dev->name);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -1008,6 +1008,7 @@ static int fr_rx(struct sk_buff *skb)
|
||||||
stats->rx_bytes += skb->len;
|
stats->rx_bytes += skb->len;
|
||||||
if (pvc->state.becn)
|
if (pvc->state.becn)
|
||||||
stats->rx_compressed++;
|
stats->rx_compressed++;
|
||||||
|
skb->dev = dev;
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
return NET_RX_SUCCESS;
|
return NET_RX_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -777,8 +777,10 @@ static int hostap_cs_suspend(struct pcmcia_device *link)
|
||||||
int dev_open = 0;
|
int dev_open = 0;
|
||||||
struct hostap_interface *iface = NULL;
|
struct hostap_interface *iface = NULL;
|
||||||
|
|
||||||
if (dev)
|
if (!dev)
|
||||||
iface = netdev_priv(dev);
|
return -ENODEV;
|
||||||
|
|
||||||
|
iface = netdev_priv(dev);
|
||||||
|
|
||||||
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
|
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
|
||||||
if (iface && iface->local)
|
if (iface && iface->local)
|
||||||
|
@ -798,8 +800,10 @@ static int hostap_cs_resume(struct pcmcia_device *link)
|
||||||
int dev_open = 0;
|
int dev_open = 0;
|
||||||
struct hostap_interface *iface = NULL;
|
struct hostap_interface *iface = NULL;
|
||||||
|
|
||||||
if (dev)
|
if (!dev)
|
||||||
iface = netdev_priv(dev);
|
return -ENODEV;
|
||||||
|
|
||||||
|
iface = netdev_priv(dev);
|
||||||
|
|
||||||
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
|
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
|
||||||
|
|
||||||
|
|
|
@ -449,7 +449,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
|
||||||
|
|
||||||
if (print_summary) {
|
if (print_summary) {
|
||||||
char *title;
|
char *title;
|
||||||
u32 rate;
|
int rate;
|
||||||
|
|
||||||
if (hundred)
|
if (hundred)
|
||||||
title = "100Frames";
|
title = "100Frames";
|
||||||
|
@ -487,7 +487,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
|
||||||
* but you can hack it to show more, if you'd like to. */
|
* but you can hack it to show more, if you'd like to. */
|
||||||
if (dataframe)
|
if (dataframe)
|
||||||
IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
|
IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
|
||||||
"len=%u, rssi=%d, chnl=%d, rate=%u, \n",
|
"len=%u, rssi=%d, chnl=%d, rate=%d, \n",
|
||||||
title, fc, header->addr1[5],
|
title, fc, header->addr1[5],
|
||||||
length, rssi, channel, rate);
|
length, rssi, channel, rate);
|
||||||
else {
|
else {
|
||||||
|
@ -588,8 +588,12 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
|
||||||
|
|
||||||
if (rate == -1)
|
if (rate == -1)
|
||||||
iwl3945_rt->rt_rate = 0;
|
iwl3945_rt->rt_rate = 0;
|
||||||
else
|
else {
|
||||||
|
if (stats->band == IEEE80211_BAND_5GHZ)
|
||||||
|
rate += IWL_FIRST_OFDM_RATE;
|
||||||
|
|
||||||
iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee;
|
iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee;
|
||||||
|
}
|
||||||
|
|
||||||
/* antenna number */
|
/* antenna number */
|
||||||
antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
|
antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
|
||||||
|
|
|
@ -3528,8 +3528,12 @@ static void iwl4965_add_radiotap(struct iwl_priv *priv,
|
||||||
|
|
||||||
if (rate == -1)
|
if (rate == -1)
|
||||||
iwl4965_rt->rt_rate = 0;
|
iwl4965_rt->rt_rate = 0;
|
||||||
else
|
else {
|
||||||
|
if (stats->band == IEEE80211_BAND_5GHZ)
|
||||||
|
rate += IWL_FIRST_OFDM_RATE;
|
||||||
|
|
||||||
iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
|
iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "antenna number"
|
* "antenna number"
|
||||||
|
|
|
@ -6687,7 +6687,8 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||||
|
|
||||||
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
|
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
|
||||||
IWL_DEBUG_MAC80211("leave - monitor\n");
|
IWL_DEBUG_MAC80211("leave - monitor\n");
|
||||||
return -1;
|
dev_kfree_skb_any(skb);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
|
IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
|
||||||
|
|
|
@ -6237,7 +6237,8 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||||
|
|
||||||
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
|
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
|
||||||
IWL_DEBUG_MAC80211("leave - monitor\n");
|
IWL_DEBUG_MAC80211("leave - monitor\n");
|
||||||
return -1;
|
dev_kfree_skb_any(skb);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
|
IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
|
||||||
|
|
|
@ -925,6 +925,7 @@ static struct usb_driver if_usb_driver = {
|
||||||
.id_table = if_usb_table,
|
.id_table = if_usb_table,
|
||||||
.suspend = if_usb_suspend,
|
.suspend = if_usb_suspend,
|
||||||
.resume = if_usb_resume,
|
.resume = if_usb_resume,
|
||||||
|
.reset_resume = if_usb_resume,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init if_usb_init_module(void)
|
static int __init if_usb_init_module(void)
|
||||||
|
|
|
@ -567,11 +567,11 @@ static int lbs_process_bss(struct bss_descriptor *bss,
|
||||||
pos += 8;
|
pos += 8;
|
||||||
|
|
||||||
/* beacon interval is 2 bytes long */
|
/* beacon interval is 2 bytes long */
|
||||||
bss->beaconperiod = le16_to_cpup((void *) pos);
|
bss->beaconperiod = get_unaligned_le16(pos);
|
||||||
pos += 2;
|
pos += 2;
|
||||||
|
|
||||||
/* capability information is 2 bytes long */
|
/* capability information is 2 bytes long */
|
||||||
bss->capability = le16_to_cpup((void *) pos);
|
bss->capability = get_unaligned_le16(pos);
|
||||||
lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability);
|
lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability);
|
||||||
pos += 2;
|
pos += 2;
|
||||||
|
|
||||||
|
|
|
@ -731,6 +731,17 @@ static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev)
|
||||||
(rt2x00dev->rx->data_size / 128));
|
(rt2x00dev->rx->data_size / 128));
|
||||||
rt2x00pci_register_write(rt2x00dev, CSR9, reg);
|
rt2x00pci_register_write(rt2x00dev, CSR9, reg);
|
||||||
|
|
||||||
|
rt2x00pci_register_read(rt2x00dev, CSR14, ®);
|
||||||
|
rt2x00_set_field32(®, CSR14_TSF_COUNT, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TSF_SYNC, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TBCN, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TCFP, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TATIMW, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_CFP_COUNT_PRELOAD, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TBCM_PRELOAD, 0);
|
||||||
|
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
|
||||||
|
|
||||||
rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000);
|
rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000);
|
||||||
|
|
||||||
rt2x00pci_register_read(rt2x00dev, ARCSR0, ®);
|
rt2x00pci_register_read(rt2x00dev, ARCSR0, ®);
|
||||||
|
|
|
@ -824,6 +824,17 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev)
|
||||||
rt2x00_set_field32(®, CSR11_CW_SELECT, 0);
|
rt2x00_set_field32(®, CSR11_CW_SELECT, 0);
|
||||||
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
|
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
|
||||||
|
|
||||||
|
rt2x00pci_register_read(rt2x00dev, CSR14, ®);
|
||||||
|
rt2x00_set_field32(®, CSR14_TSF_COUNT, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TSF_SYNC, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TBCN, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TCFP, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TATIMW, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_CFP_COUNT_PRELOAD, 0);
|
||||||
|
rt2x00_set_field32(®, CSR14_TBCM_PRELOAD, 0);
|
||||||
|
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
|
||||||
|
|
||||||
rt2x00pci_register_write(rt2x00dev, CNT3, 0);
|
rt2x00pci_register_write(rt2x00dev, CNT3, 0);
|
||||||
|
|
||||||
rt2x00pci_register_read(rt2x00dev, TXCSR8, ®);
|
rt2x00pci_register_read(rt2x00dev, TXCSR8, ®);
|
||||||
|
|
|
@ -801,6 +801,13 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
|
||||||
rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0);
|
rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0);
|
||||||
rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg);
|
rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg);
|
||||||
|
|
||||||
|
rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®);
|
||||||
|
rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 0);
|
||||||
|
rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, 0);
|
||||||
|
rt2x00_set_field16(®, TXRX_CSR19_TBCN, 0);
|
||||||
|
rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0);
|
||||||
|
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
|
||||||
|
|
||||||
rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f);
|
rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f);
|
||||||
rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d);
|
rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d);
|
||||||
|
|
||||||
|
|
|
@ -1201,6 +1201,15 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
|
||||||
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42);
|
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42);
|
||||||
rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg);
|
rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg);
|
||||||
|
|
||||||
|
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0);
|
||||||
|
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
|
||||||
|
|
||||||
rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
|
rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
|
||||||
|
|
||||||
rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff);
|
rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff);
|
||||||
|
|
|
@ -1006,6 +1006,15 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
|
||||||
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42);
|
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42);
|
||||||
rt73usb_register_write(rt2x00dev, TXRX_CSR8, reg);
|
rt73usb_register_write(rt2x00dev, TXRX_CSR8, reg);
|
||||||
|
|
||||||
|
rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
|
||||||
|
rt2x00_set_field32(®, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0);
|
||||||
|
rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg);
|
||||||
|
|
||||||
rt73usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
|
rt73usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
|
||||||
|
|
||||||
rt73usb_register_read(rt2x00dev, MAC_CSR6, ®);
|
rt73usb_register_read(rt2x00dev, MAC_CSR6, ®);
|
||||||
|
|
|
@ -765,6 +765,7 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
|
||||||
{
|
{
|
||||||
struct zd_mac *mac = zd_hw_mac(hw);
|
struct zd_mac *mac = zd_hw_mac(hw);
|
||||||
mac->type = IEEE80211_IF_TYPE_INVALID;
|
mac->type = IEEE80211_IF_TYPE_INVALID;
|
||||||
|
zd_set_beacon_interval(&mac->chip, 0);
|
||||||
zd_write_mac_addr(&mac->chip, NULL);
|
zd_write_mac_addr(&mac->chip, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,6 +64,7 @@ static struct usb_device_id usb_ids[] = {
|
||||||
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
|
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
|
||||||
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
|
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
|
||||||
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
|
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
|
||||||
|
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
|
||||||
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
|
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
|
||||||
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
|
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
|
||||||
{ USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
|
{ USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
|
||||||
|
|
|
@ -101,8 +101,8 @@ static int rio_device_probe(struct device *dev)
|
||||||
if (error >= 0) {
|
if (error >= 0) {
|
||||||
rdev->driver = rdrv;
|
rdev->driver = rdrv;
|
||||||
error = 0;
|
error = 0;
|
||||||
|
} else
|
||||||
rio_dev_put(rdev);
|
rio_dev_put(rdev);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,7 @@ struct fm3130 {
|
||||||
int alarm;
|
int alarm;
|
||||||
};
|
};
|
||||||
static const struct i2c_device_id fm3130_id[] = {
|
static const struct i2c_device_id fm3130_id[] = {
|
||||||
{ "fm3130-rtc", 0 },
|
{ "fm3130", 0 },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, fm3130_id);
|
MODULE_DEVICE_TABLE(i2c, fm3130_id);
|
||||||
|
|
|
@ -302,6 +302,7 @@ static int pcf8563_remove(struct i2c_client *client)
|
||||||
|
|
||||||
static const struct i2c_device_id pcf8563_id[] = {
|
static const struct i2c_device_id pcf8563_id[] = {
|
||||||
{ "pcf8563", 0 },
|
{ "pcf8563", 0 },
|
||||||
|
{ "rtc8564", 0 },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, pcf8563_id);
|
MODULE_DEVICE_TABLE(i2c, pcf8563_id);
|
||||||
|
|
|
@ -71,6 +71,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/libata.h>
|
#include <linux/libata.h>
|
||||||
|
#include <linux/hdreg.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
@ -4913,8 +4914,11 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
|
||||||
struct ipr_resource_entry *res;
|
struct ipr_resource_entry *res;
|
||||||
|
|
||||||
res = (struct ipr_resource_entry *)sdev->hostdata;
|
res = (struct ipr_resource_entry *)sdev->hostdata;
|
||||||
if (res && ipr_is_gata(res))
|
if (res && ipr_is_gata(res)) {
|
||||||
|
if (cmd == HDIO_GET_IDENTITY)
|
||||||
|
return -ENOTTY;
|
||||||
return ata_scsi_ioctl(sdev, cmd, arg);
|
return ata_scsi_ioctl(sdev, cmd, arg);
|
||||||
|
}
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,6 +207,15 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
|
||||||
*/
|
*/
|
||||||
blk_execute_rq(req->q, NULL, req, 1);
|
blk_execute_rq(req->q, NULL, req, 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some devices (USB mass-storage in particular) may transfer
|
||||||
|
* garbage data together with a residue indicating that the data
|
||||||
|
* is invalid. Prevent the garbage from being misinterpreted
|
||||||
|
* and prevent security leaks by zeroing out the excess data.
|
||||||
|
*/
|
||||||
|
if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
|
||||||
|
memset(buffer + (bufflen - req->data_len), 0, req->data_len);
|
||||||
|
|
||||||
ret = req->errors;
|
ret = req->errors;
|
||||||
out:
|
out:
|
||||||
blk_put_request(req);
|
blk_put_request(req);
|
||||||
|
|
|
@ -2623,6 +2623,9 @@ static struct console serial8250_console = {
|
||||||
|
|
||||||
static int __init serial8250_console_init(void)
|
static int __init serial8250_console_init(void)
|
||||||
{
|
{
|
||||||
|
if (nr_uarts > UART_NR)
|
||||||
|
nr_uarts = UART_NR;
|
||||||
|
|
||||||
serial8250_isa_init_ports();
|
serial8250_isa_init_ports();
|
||||||
register_console(&serial8250_console);
|
register_console(&serial8250_console);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -537,6 +537,13 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
|
||||||
int err = 0;
|
int err = 0;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
|
if (dev->bus->bustype != SSB_BUSTYPE_PCI) {
|
||||||
|
/* This SSB device is not on a PCI host-bus. So the IRQs are
|
||||||
|
* not routed through the PCI core.
|
||||||
|
* So we must not enable routing through the PCI core. */
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (!pdev)
|
if (!pdev)
|
||||||
goto out;
|
goto out;
|
||||||
bus = pdev->bus;
|
bus = pdev->bus;
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
* Bus Glue for AMD Alchemy Au1xxx
|
* Bus Glue for AMD Alchemy Au1xxx
|
||||||
*
|
*
|
||||||
* Written by Christopher Hoover <ch@hpl.hp.com>
|
* Written by Christopher Hoover <ch@hpl.hp.com>
|
||||||
* Based on fragments of previous driver by Rusell King et al.
|
* Based on fragments of previous driver by Russell King et al.
|
||||||
*
|
*
|
||||||
* Modified for LH7A404 from ohci-sa1111.c
|
* Modified for LH7A404 from ohci-sa1111.c
|
||||||
* by Durgesh Pattamatta <pattamattad@sharpsec.com>
|
* by Durgesh Pattamatta <pattamattad@sharpsec.com>
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
* Bus Glue for Sharp LH7A404
|
* Bus Glue for Sharp LH7A404
|
||||||
*
|
*
|
||||||
* Written by Christopher Hoover <ch@hpl.hp.com>
|
* Written by Christopher Hoover <ch@hpl.hp.com>
|
||||||
* Based on fragments of previous driver by Rusell King et al.
|
* Based on fragments of previous driver by Russell King et al.
|
||||||
*
|
*
|
||||||
* Modified for LH7A404 from ohci-sa1111.c
|
* Modified for LH7A404 from ohci-sa1111.c
|
||||||
* by Durgesh Pattamatta <pattamattad@sharpsec.com>
|
* by Durgesh Pattamatta <pattamattad@sharpsec.com>
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
* USB Bus Glue for Samsung S3C2410
|
* USB Bus Glue for Samsung S3C2410
|
||||||
*
|
*
|
||||||
* Written by Christopher Hoover <ch@hpl.hp.com>
|
* Written by Christopher Hoover <ch@hpl.hp.com>
|
||||||
* Based on fragments of previous driver by Rusell King et al.
|
* Based on fragments of previous driver by Russell King et al.
|
||||||
*
|
*
|
||||||
* Modified for S3C2410 from ohci-sa1111.c, ohci-omap.c and ohci-lh7a40.c
|
* Modified for S3C2410 from ohci-sa1111.c, ohci-omap.c and ohci-lh7a40.c
|
||||||
* by Ben Dooks, <ben@simtec.co.uk>
|
* by Ben Dooks, <ben@simtec.co.uk>
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
* SA1111 Bus Glue
|
* SA1111 Bus Glue
|
||||||
*
|
*
|
||||||
* Written by Christopher Hoover <ch@hpl.hp.com>
|
* Written by Christopher Hoover <ch@hpl.hp.com>
|
||||||
* Based on fragments of previous driver by Rusell King et al.
|
* Based on fragments of previous driver by Russell King et al.
|
||||||
*
|
*
|
||||||
* This file is licenced under the GPL.
|
* This file is licenced under the GPL.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -74,6 +74,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
|
||||||
{
|
{
|
||||||
struct fb_info *info = vma->vm_private_data;
|
struct fb_info *info = vma->vm_private_data;
|
||||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||||
|
struct page *cur;
|
||||||
|
|
||||||
/* this is a callback we get when userspace first tries to
|
/* this is a callback we get when userspace first tries to
|
||||||
write to the page. we schedule a workqueue. that workqueue
|
write to the page. we schedule a workqueue. that workqueue
|
||||||
|
@ -83,7 +84,24 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
|
||||||
|
|
||||||
/* protect against the workqueue changing the page list */
|
/* protect against the workqueue changing the page list */
|
||||||
mutex_lock(&fbdefio->lock);
|
mutex_lock(&fbdefio->lock);
|
||||||
list_add(&page->lru, &fbdefio->pagelist);
|
|
||||||
|
/* we loop through the pagelist before adding in order
|
||||||
|
to keep the pagelist sorted */
|
||||||
|
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
|
||||||
|
/* this check is to catch the case where a new
|
||||||
|
process could start writing to the same page
|
||||||
|
through a new pte. this new access can cause the
|
||||||
|
mkwrite even when the original ps's pte is marked
|
||||||
|
writable */
|
||||||
|
if (unlikely(cur == page))
|
||||||
|
goto page_already_added;
|
||||||
|
else if (cur->index > page->index)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_add_tail(&page->lru, &cur->lru);
|
||||||
|
|
||||||
|
page_already_added:
|
||||||
mutex_unlock(&fbdefio->lock);
|
mutex_unlock(&fbdefio->lock);
|
||||||
|
|
||||||
/* come back after delay to process the deferred IO */
|
/* come back after delay to process the deferred IO */
|
||||||
|
|
|
@ -1324,7 +1324,7 @@ static int fsl_diu_suspend(struct of_device *ofdev, pm_message_t state)
|
||||||
{
|
{
|
||||||
struct fsl_diu_data *machine_data;
|
struct fsl_diu_data *machine_data;
|
||||||
|
|
||||||
machine_data = dev_get_drvdata(&dev->dev);
|
machine_data = dev_get_drvdata(&ofdev->dev);
|
||||||
disable_lcdc(machine_data->fsl_diu_info[0]);
|
disable_lcdc(machine_data->fsl_diu_info[0]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1334,7 +1334,7 @@ static int fsl_diu_resume(struct of_device *ofdev)
|
||||||
{
|
{
|
||||||
struct fsl_diu_data *machine_data;
|
struct fsl_diu_data *machine_data;
|
||||||
|
|
||||||
machine_data = dev_get_drvdata(&dev->dev);
|
machine_data = dev_get_drvdata(&ofdev->dev);
|
||||||
enable_lcdc(machine_data->fsl_diu_info[0]);
|
enable_lcdc(machine_data->fsl_diu_info[0]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -34,11 +34,11 @@
|
||||||
static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
|
static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
|
||||||
{{1, 0, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, "null user"},
|
{{1, 0, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, "null user"},
|
||||||
{{1, 1, {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0} }, "nobody"},
|
{{1, 1, {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0} }, "nobody"},
|
||||||
{{1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11), 0, 0, 0, 0} }, "net-users"},
|
{{1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11), 0, 0, 0, 0} }, "net-users"},
|
||||||
{{1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(18), 0, 0, 0, 0} }, "sys"},
|
{{1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(18), 0, 0, 0, 0} }, "sys"},
|
||||||
{{1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(32), cpu_to_le32(544), 0, 0, 0} }, "root"},
|
{{1, 2, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(32), __constant_cpu_to_le32(544), 0, 0, 0} }, "root"},
|
||||||
{{1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(32), cpu_to_le32(545), 0, 0, 0} }, "users"},
|
{{1, 2, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(32), __constant_cpu_to_le32(545), 0, 0, 0} }, "users"},
|
||||||
{{1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(32), cpu_to_le32(546), 0, 0, 0} }, "guest"} }
|
{{1, 2, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(32), __constant_cpu_to_le32(546), 0, 0, 0} }, "guest"} }
|
||||||
;
|
;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -219,15 +219,15 @@ int cifs_get_inode_info_unix(struct inode **pinode,
|
||||||
rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &find_data,
|
rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &find_data,
|
||||||
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
|
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
|
||||||
CIFS_MOUNT_MAP_SPECIAL_CHR);
|
CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||||
if (rc) {
|
if (rc == -EREMOTE && !is_dfs_referral) {
|
||||||
if (rc == -EREMOTE && !is_dfs_referral) {
|
is_dfs_referral = true;
|
||||||
is_dfs_referral = true;
|
cFYI(DBG2, ("DFS ref"));
|
||||||
cFYI(DBG2, ("DFS ref"));
|
/* for DFS, server does not give us real inode data */
|
||||||
/* for DFS, server does not give us real inode data */
|
fill_fake_finddataunix(&find_data, sb);
|
||||||
fill_fake_finddataunix(&find_data, sb);
|
rc = 0;
|
||||||
rc = 0;
|
} else if (rc)
|
||||||
}
|
goto cgiiu_exit;
|
||||||
}
|
|
||||||
num_of_bytes = le64_to_cpu(find_data.NumOfBytes);
|
num_of_bytes = le64_to_cpu(find_data.NumOfBytes);
|
||||||
end_of_file = le64_to_cpu(find_data.EndOfFile);
|
end_of_file = le64_to_cpu(find_data.EndOfFile);
|
||||||
|
|
||||||
|
@ -236,7 +236,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
|
||||||
*pinode = new_inode(sb);
|
*pinode = new_inode(sb);
|
||||||
if (*pinode == NULL) {
|
if (*pinode == NULL) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto cgiiu_exit;
|
goto cgiiu_exit;
|
||||||
}
|
}
|
||||||
/* Is an i_ino of zero legal? */
|
/* Is an i_ino of zero legal? */
|
||||||
/* note ino incremented to unique num in new_inode */
|
/* note ino incremented to unique num in new_inode */
|
||||||
|
|
|
@ -610,7 +610,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
||||||
bprm->exec -= stack_shift;
|
bprm->exec -= stack_shift;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
vm_flags = vma->vm_flags;
|
vm_flags = VM_STACK_FLAGS;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Adjust stack execute permissions; explicitly enable for
|
* Adjust stack execute permissions; explicitly enable for
|
||||||
|
|
|
@ -204,7 +204,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
|
||||||
* Note: assumes we have exclusive access to this mapping either
|
* Note: assumes we have exclusive access to this mapping either
|
||||||
* through inode->i_mutex or some other mechanism.
|
* through inode->i_mutex or some other mechanism.
|
||||||
*/
|
*/
|
||||||
if (page->index == 0 && invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1) < 0) {
|
if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
|
||||||
/* Should never happen */
|
/* Should never happen */
|
||||||
nfs_zap_mapping(inode, inode->i_mapping);
|
nfs_zap_mapping(inode, inode->i_mapping);
|
||||||
}
|
}
|
||||||
|
|
|
@ -606,7 +606,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
|
||||||
|
|
||||||
res->last_used = 0;
|
res->last_used = 0;
|
||||||
|
|
||||||
|
spin_lock(&dlm->spinlock);
|
||||||
list_add_tail(&res->tracking, &dlm->tracking_list);
|
list_add_tail(&res->tracking, &dlm->tracking_list);
|
||||||
|
spin_unlock(&dlm->spinlock);
|
||||||
|
|
||||||
memset(res->lvb, 0, DLM_LVB_LEN);
|
memset(res->lvb, 0, DLM_LVB_LEN);
|
||||||
memset(res->refmap, 0, sizeof(res->refmap));
|
memset(res->refmap, 0, sizeof(res->refmap));
|
||||||
|
|
|
@ -1554,8 +1554,8 @@ out:
|
||||||
*/
|
*/
|
||||||
int ocfs2_file_lock(struct file *file, int ex, int trylock)
|
int ocfs2_file_lock(struct file *file, int ex, int trylock)
|
||||||
{
|
{
|
||||||
int ret, level = ex ? LKM_EXMODE : LKM_PRMODE;
|
int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
|
||||||
unsigned int lkm_flags = trylock ? LKM_NOQUEUE : 0;
|
unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct ocfs2_file_private *fp = file->private_data;
|
struct ocfs2_file_private *fp = file->private_data;
|
||||||
struct ocfs2_lock_res *lockres = &fp->fp_flock;
|
struct ocfs2_lock_res *lockres = &fp->fp_flock;
|
||||||
|
@ -1582,7 +1582,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
|
||||||
* Get the lock at NLMODE to start - that way we
|
* Get the lock at NLMODE to start - that way we
|
||||||
* can cancel the upconvert request if need be.
|
* can cancel the upconvert request if need be.
|
||||||
*/
|
*/
|
||||||
ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
|
ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1597,7 +1597,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
|
||||||
}
|
}
|
||||||
|
|
||||||
lockres->l_action = OCFS2_AST_CONVERT;
|
lockres->l_action = OCFS2_AST_CONVERT;
|
||||||
lkm_flags |= LKM_CONVERT;
|
lkm_flags |= DLM_LKF_CONVERT;
|
||||||
lockres->l_requested = level;
|
lockres->l_requested = level;
|
||||||
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
|
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
|
||||||
|
|
||||||
|
@ -1664,7 +1664,7 @@ void ocfs2_file_unlock(struct file *file)
|
||||||
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
|
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (lockres->l_level == LKM_NLMODE)
|
if (lockres->l_level == DLM_LOCK_NL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
|
mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
|
||||||
|
@ -1678,11 +1678,11 @@ void ocfs2_file_unlock(struct file *file)
|
||||||
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
|
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
|
||||||
lockres->l_blocking = DLM_LOCK_EX;
|
lockres->l_blocking = DLM_LOCK_EX;
|
||||||
|
|
||||||
gen = ocfs2_prepare_downconvert(lockres, LKM_NLMODE);
|
gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
|
||||||
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
|
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
|
||||||
spin_unlock_irqrestore(&lockres->l_lock, flags);
|
spin_unlock_irqrestore(&lockres->l_lock, flags);
|
||||||
|
|
||||||
ret = ocfs2_downconvert_lock(osb, lockres, LKM_NLMODE, 0, gen);
|
ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -45,6 +45,8 @@ void reiserfs_delete_inode(struct inode *inode)
|
||||||
goto out;
|
goto out;
|
||||||
reiserfs_update_inode_transaction(inode);
|
reiserfs_update_inode_transaction(inode);
|
||||||
|
|
||||||
|
reiserfs_discard_prealloc(&th, inode);
|
||||||
|
|
||||||
err = reiserfs_delete_object(&th, inode);
|
err = reiserfs_delete_object(&th, inode);
|
||||||
|
|
||||||
/* Do quota update inside a transaction for journaled quotas. We must do that
|
/* Do quota update inside a transaction for journaled quotas. We must do that
|
||||||
|
|
|
@ -2427,13 +2427,20 @@ restart:
|
||||||
if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
|
if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
|
||||||
xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
|
xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
|
||||||
|
|
||||||
/* If I'm the only one writing to this iclog, sync it to disk */
|
/*
|
||||||
if (atomic_read(&iclog->ic_refcnt) == 1) {
|
* If I'm the only one writing to this iclog, sync it to disk.
|
||||||
|
* We need to do an atomic compare and decrement here to avoid
|
||||||
|
* racing with concurrent atomic_dec_and_lock() calls in
|
||||||
|
* xlog_state_release_iclog() when there is more than one
|
||||||
|
* reference to the iclog.
|
||||||
|
*/
|
||||||
|
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
|
||||||
|
/* we are the only one */
|
||||||
spin_unlock(&log->l_icloglock);
|
spin_unlock(&log->l_icloglock);
|
||||||
if ((error = xlog_state_release_iclog(log, iclog)))
|
error = xlog_state_release_iclog(log, iclog);
|
||||||
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
} else {
|
} else {
|
||||||
atomic_dec(&iclog->ic_refcnt);
|
|
||||||
spin_unlock(&log->l_icloglock);
|
spin_unlock(&log->l_icloglock);
|
||||||
}
|
}
|
||||||
goto restart;
|
goto restart;
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
* Copyright (C) 2004-2006 Atmel Corporation
|
* Copyright (C) 2004-2006 Atmel Corporation
|
||||||
*
|
*
|
||||||
* Based on linux/include/asm-arm/setup.h
|
* Based on linux/include/asm-arm/setup.h
|
||||||
* Copyright (C) 1997-1999 Russel King
|
* Copyright (C) 1997-1999 Russell King
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
|
|
@ -87,7 +87,7 @@ do { \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define irqs_disabled() \
|
#define irqs_disabled() \
|
||||||
({unsigned long flags; local_save_flags(flags); flags; })
|
({unsigned long flags; local_save_flags(flags); !!flags; })
|
||||||
|
|
||||||
#define local_irq_save(flags) \
|
#define local_irq_save(flags) \
|
||||||
do { \
|
do { \
|
||||||
|
|
|
@ -49,12 +49,6 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
|
||||||
return pte_wrprotect(pte);
|
return pte_wrprotect(pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
ptep_set_wrprotect(mm, addr, ptep);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pte_t *ptep,
|
unsigned long addr, pte_t *ptep,
|
||||||
pte_t pte, int dirty)
|
pte_t pte, int dirty)
|
||||||
|
|
|
@ -314,6 +314,16 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||||
old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
|
old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||||
|
unsigned long addr, pte_t *ptep)
|
||||||
|
{
|
||||||
|
unsigned long old;
|
||||||
|
|
||||||
|
if ((pte_val(*ptep) & _PAGE_RW) == 0)
|
||||||
|
return;
|
||||||
|
old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We currently remove entries from the hashtable regardless of whether
|
* We currently remove entries from the hashtable regardless of whether
|
||||||
* the entry was young or dirty. The generic routines only flush if the
|
* the entry was young or dirty. The generic routines only flush if the
|
||||||
|
|
|
@ -223,6 +223,9 @@ extern char empty_zero_page[PAGE_SIZE];
|
||||||
#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
|
#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
|
||||||
#define __HAVE_ARCH_PTE_SPECIAL
|
#define __HAVE_ARCH_PTE_SPECIAL
|
||||||
|
|
||||||
|
/* Set of bits not changed in pte_modify */
|
||||||
|
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
|
||||||
|
|
||||||
/* Six different types of pages. */
|
/* Six different types of pages. */
|
||||||
#define _PAGE_TYPE_EMPTY 0x400
|
#define _PAGE_TYPE_EMPTY 0x400
|
||||||
#define _PAGE_TYPE_NONE 0x401
|
#define _PAGE_TYPE_NONE 0x401
|
||||||
|
@ -681,7 +684,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
|
||||||
*/
|
*/
|
||||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
{
|
{
|
||||||
pte_val(pte) &= PAGE_MASK;
|
pte_val(pte) &= _PAGE_CHG_MASK;
|
||||||
pte_val(pte) |= pgprot_val(newprot);
|
pte_val(pte) |= pgprot_val(newprot);
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
|
@ -188,8 +188,8 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
|
||||||
unsigned cpu = smp_processor_id();
|
unsigned cpu = smp_processor_id();
|
||||||
ldt_desc ldt;
|
ldt_desc ldt;
|
||||||
|
|
||||||
set_tssldt_descriptor(&ldt, (unsigned long)addr,
|
set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
|
||||||
DESC_LDT, entries * sizeof(ldt) - 1);
|
entries * LDT_ENTRY_SIZE - 1);
|
||||||
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
|
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
|
||||||
&ldt, DESC_LDT);
|
&ldt, DESC_LDT);
|
||||||
asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
|
asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
|
||||||
|
|
|
@ -189,6 +189,21 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
|
||||||
hw->io_ports.ctl_addr = ctl_addr;
|
hw->io_ports.ctl_addr = ctl_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* for IDE PCI controllers in legacy mode, temporary */
|
||||||
|
static inline int __ide_default_irq(unsigned long base)
|
||||||
|
{
|
||||||
|
switch (base) {
|
||||||
|
#ifdef CONFIG_IA64
|
||||||
|
case 0x1f0: return isa_irq_to_vector(14);
|
||||||
|
case 0x170: return isa_irq_to_vector(15);
|
||||||
|
#else
|
||||||
|
case 0x1f0: return 14;
|
||||||
|
case 0x170: return 15;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#include <asm/ide.h>
|
#include <asm/ide.h>
|
||||||
|
|
||||||
#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED)
|
#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED)
|
||||||
|
|
|
@ -339,6 +339,7 @@ struct xfrm_usersa_info {
|
||||||
#define XFRM_STATE_NOPMTUDISC 4
|
#define XFRM_STATE_NOPMTUDISC 4
|
||||||
#define XFRM_STATE_WILDRECV 8
|
#define XFRM_STATE_WILDRECV 8
|
||||||
#define XFRM_STATE_ICMP 16
|
#define XFRM_STATE_ICMP 16
|
||||||
|
#define XFRM_STATE_AF_UNSPEC 32
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xfrm_usersa_id {
|
struct xfrm_usersa_id {
|
||||||
|
|
|
@ -79,7 +79,7 @@ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
|
||||||
*
|
*
|
||||||
* For such cases, we now have a blacklist
|
* For such cases, we now have a blacklist
|
||||||
*/
|
*/
|
||||||
struct kprobe_blackpoint kprobe_blacklist[] = {
|
static struct kprobe_blackpoint kprobe_blacklist[] = {
|
||||||
{"preempt_schedule",},
|
{"preempt_schedule",},
|
||||||
{NULL} /* Terminator */
|
{NULL} /* Terminator */
|
||||||
};
|
};
|
||||||
|
|
|
@ -670,7 +670,7 @@ static int acquire_console_semaphore_for_printk(unsigned int cpu)
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char printk_recursion_bug_msg [] =
|
static const char printk_recursion_bug_msg [] =
|
||||||
KERN_CRIT "BUG: recent printk recursion!\n";
|
KERN_CRIT "BUG: recent printk recursion!\n";
|
||||||
static int printk_recursion_bug;
|
static int printk_recursion_bug;
|
||||||
|
|
||||||
|
|
|
@ -925,7 +925,15 @@ void rcu_offline_cpu(int cpu)
|
||||||
spin_unlock_irqrestore(&rdp->lock, flags);
|
spin_unlock_irqrestore(&rdp->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __devinit rcu_online_cpu(int cpu)
|
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
|
void rcu_offline_cpu(int cpu)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
|
void __cpuinit rcu_online_cpu(int cpu)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -934,18 +942,6 @@ void __devinit rcu_online_cpu(int cpu)
|
||||||
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
||||||
|
|
||||||
void rcu_offline_cpu(int cpu)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void __devinit rcu_online_cpu(int cpu)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
|
|
||||||
|
|
||||||
static void rcu_process_callbacks(struct softirq_action *unused)
|
static void rcu_process_callbacks(struct softirq_action *unused)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -5622,10 +5622,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||||
double_rq_lock(rq_src, rq_dest);
|
double_rq_lock(rq_src, rq_dest);
|
||||||
/* Already moved. */
|
/* Already moved. */
|
||||||
if (task_cpu(p) != src_cpu)
|
if (task_cpu(p) != src_cpu)
|
||||||
goto out;
|
goto done;
|
||||||
/* Affinity changed (again). */
|
/* Affinity changed (again). */
|
||||||
if (!cpu_isset(dest_cpu, p->cpus_allowed))
|
if (!cpu_isset(dest_cpu, p->cpus_allowed))
|
||||||
goto out;
|
goto fail;
|
||||||
|
|
||||||
on_rq = p->se.on_rq;
|
on_rq = p->se.on_rq;
|
||||||
if (on_rq)
|
if (on_rq)
|
||||||
|
@ -5636,8 +5636,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||||
activate_task(rq_dest, p, 0);
|
activate_task(rq_dest, p, 0);
|
||||||
check_preempt_curr(rq_dest, p);
|
check_preempt_curr(rq_dest, p);
|
||||||
}
|
}
|
||||||
|
done:
|
||||||
ret = 1;
|
ret = 1;
|
||||||
out:
|
fail:
|
||||||
double_rq_unlock(rq_src, rq_dest);
|
double_rq_unlock(rq_src, rq_dest);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
128
lib/vsprintf.c
128
lib/vsprintf.c
|
@ -22,6 +22,8 @@
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/kallsyms.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
#include <asm/page.h> /* for PAGE_SIZE */
|
#include <asm/page.h> /* for PAGE_SIZE */
|
||||||
#include <asm/div64.h>
|
#include <asm/div64.h>
|
||||||
|
@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
|
||||||
|
{
|
||||||
|
int len, i;
|
||||||
|
|
||||||
|
if ((unsigned long)s < PAGE_SIZE)
|
||||||
|
s = "<NULL>";
|
||||||
|
|
||||||
|
len = strnlen(s, precision);
|
||||||
|
|
||||||
|
if (!(flags & LEFT)) {
|
||||||
|
while (len < field_width--) {
|
||||||
|
if (buf < end)
|
||||||
|
*buf = ' ';
|
||||||
|
++buf;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (i = 0; i < len; ++i) {
|
||||||
|
if (buf < end)
|
||||||
|
*buf = *s;
|
||||||
|
++buf; ++s;
|
||||||
|
}
|
||||||
|
while (len < field_width--) {
|
||||||
|
if (buf < end)
|
||||||
|
*buf = ' ';
|
||||||
|
++buf;
|
||||||
|
}
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *dereference_function_descriptor(void *ptr)
|
||||||
|
{
|
||||||
|
#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
|
||||||
|
void *p;
|
||||||
|
if (!probe_kernel_address(ptr, p))
|
||||||
|
ptr = p;
|
||||||
|
#endif
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
|
||||||
|
{
|
||||||
|
unsigned long value = (unsigned long) ptr;
|
||||||
|
#ifdef CONFIG_KALLSYMS
|
||||||
|
char sym[KSYM_SYMBOL_LEN];
|
||||||
|
sprint_symbol(sym, value);
|
||||||
|
return string(buf, end, sym, field_width, precision, flags);
|
||||||
|
#else
|
||||||
|
field_width = 2*sizeof(void *);
|
||||||
|
flags |= SPECIAL | SMALL | ZEROPAD;
|
||||||
|
return number(buf, end, value, 16, field_width, precision, flags);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Show a '%p' thing. A kernel extension is that the '%p' is followed
|
||||||
|
* by an extra set of alphanumeric characters that are extended format
|
||||||
|
* specifiers.
|
||||||
|
*
|
||||||
|
* Right now we just handle 'F' (for symbolic Function descriptor pointers)
|
||||||
|
* and 'S' (for Symbolic direct pointers), but this can easily be
|
||||||
|
* extended in the future (network address types etc).
|
||||||
|
*
|
||||||
|
* The difference between 'S' and 'F' is that on ia64 and ppc64 function
|
||||||
|
* pointers are really function descriptors, which contain a pointer the
|
||||||
|
* real address.
|
||||||
|
*/
|
||||||
|
static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
|
||||||
|
{
|
||||||
|
switch (*fmt) {
|
||||||
|
case 'F':
|
||||||
|
ptr = dereference_function_descriptor(ptr);
|
||||||
|
/* Fallthrough */
|
||||||
|
case 'S':
|
||||||
|
return symbol_string(buf, end, ptr, field_width, precision, flags);
|
||||||
|
}
|
||||||
|
flags |= SMALL;
|
||||||
|
if (field_width == -1) {
|
||||||
|
field_width = 2*sizeof(void *);
|
||||||
|
flags |= ZEROPAD;
|
||||||
|
}
|
||||||
|
return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vsnprintf - Format a string and place it in a buffer
|
* vsnprintf - Format a string and place it in a buffer
|
||||||
* @buf: The buffer to place the result into
|
* @buf: The buffer to place the result into
|
||||||
|
@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
|
||||||
*/
|
*/
|
||||||
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
int len;
|
|
||||||
unsigned long long num;
|
unsigned long long num;
|
||||||
int i, base;
|
int base;
|
||||||
char *str, *end, c;
|
char *str, *end, c;
|
||||||
const char *s;
|
|
||||||
|
|
||||||
int flags; /* flags to number() */
|
int flags; /* flags to number() */
|
||||||
|
|
||||||
|
@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
case 's':
|
case 's':
|
||||||
s = va_arg(args, char *);
|
str = string(str, end, va_arg(args, char *), field_width, precision, flags);
|
||||||
if ((unsigned long)s < PAGE_SIZE)
|
|
||||||
s = "<NULL>";
|
|
||||||
|
|
||||||
len = strnlen(s, precision);
|
|
||||||
|
|
||||||
if (!(flags & LEFT)) {
|
|
||||||
while (len < field_width--) {
|
|
||||||
if (str < end)
|
|
||||||
*str = ' ';
|
|
||||||
++str;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (i = 0; i < len; ++i) {
|
|
||||||
if (str < end)
|
|
||||||
*str = *s;
|
|
||||||
++str; ++s;
|
|
||||||
}
|
|
||||||
while (len < field_width--) {
|
|
||||||
if (str < end)
|
|
||||||
*str = ' ';
|
|
||||||
++str;
|
|
||||||
}
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
case 'p':
|
case 'p':
|
||||||
flags |= SMALL;
|
str = pointer(fmt+1, str, end,
|
||||||
if (field_width == -1) {
|
va_arg(args, void *),
|
||||||
field_width = 2*sizeof(void *);
|
field_width, precision, flags);
|
||||||
flags |= ZEROPAD;
|
/* Skip all alphanumeric pointer suffixes */
|
||||||
}
|
while (isalnum(fmt[1]))
|
||||||
str = number(str, end,
|
fmt++;
|
||||||
(unsigned long) va_arg(args, void *),
|
|
||||||
16, field_width, precision, flags);
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
||||||
case 'n':
|
case 'n':
|
||||||
/* FIXME:
|
/* FIXME:
|
||||||
* What does C99 say about the overflow case here? */
|
* What does C99 say about the overflow case here? */
|
||||||
|
|
|
@ -1628,9 +1628,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
||||||
void **object;
|
void **object;
|
||||||
struct kmem_cache_cpu *c;
|
struct kmem_cache_cpu *c;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
unsigned int objsize;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
c = get_cpu_slab(s, smp_processor_id());
|
c = get_cpu_slab(s, smp_processor_id());
|
||||||
|
objsize = c->objsize;
|
||||||
if (unlikely(!c->freelist || !node_match(c, node)))
|
if (unlikely(!c->freelist || !node_match(c, node)))
|
||||||
|
|
||||||
object = __slab_alloc(s, gfpflags, node, addr, c);
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
||||||
|
@ -1643,7 +1645,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
if (unlikely((gfpflags & __GFP_ZERO) && object))
|
if (unlikely((gfpflags & __GFP_ZERO) && object))
|
||||||
memset(object, 0, c->objsize);
|
memset(object, 0, objsize);
|
||||||
|
|
||||||
return object;
|
return object;
|
||||||
}
|
}
|
||||||
|
|
|
@ -442,12 +442,16 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
|
||||||
|
|
||||||
void __exit br_cleanup_bridges(void)
|
void __exit br_cleanup_bridges(void)
|
||||||
{
|
{
|
||||||
struct net_device *dev, *nxt;
|
struct net_device *dev;
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
for_each_netdev_safe(&init_net, dev, nxt)
|
restart:
|
||||||
if (dev->priv_flags & IFF_EBRIDGE)
|
for_each_netdev(&init_net, dev) {
|
||||||
|
if (dev->priv_flags & IFF_EBRIDGE) {
|
||||||
del_br(dev->priv);
|
del_br(dev->priv);
|
||||||
|
goto restart;
|
||||||
|
}
|
||||||
|
}
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -205,12 +205,19 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
|
||||||
* -ENOBUFS on full driver queue (see net_xmit_errno())
|
* -ENOBUFS on full driver queue (see net_xmit_errno())
|
||||||
* -ENOMEM when local loopback failed at calling skb_clone()
|
* -ENOMEM when local loopback failed at calling skb_clone()
|
||||||
* -EPERM when trying to send on a non-CAN interface
|
* -EPERM when trying to send on a non-CAN interface
|
||||||
|
* -EINVAL when the skb->data does not contain a valid CAN frame
|
||||||
*/
|
*/
|
||||||
int can_send(struct sk_buff *skb, int loop)
|
int can_send(struct sk_buff *skb, int loop)
|
||||||
{
|
{
|
||||||
struct sk_buff *newskb = NULL;
|
struct sk_buff *newskb = NULL;
|
||||||
|
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
|
||||||
|
kfree_skb(skb);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (skb->dev->type != ARPHRD_CAN) {
|
if (skb->dev->type != ARPHRD_CAN) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
@ -605,6 +612,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
struct packet_type *pt, struct net_device *orig_dev)
|
struct packet_type *pt, struct net_device *orig_dev)
|
||||||
{
|
{
|
||||||
struct dev_rcv_lists *d;
|
struct dev_rcv_lists *d;
|
||||||
|
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||||
int matches;
|
int matches;
|
||||||
|
|
||||||
if (dev->type != ARPHRD_CAN || dev_net(dev) != &init_net) {
|
if (dev->type != ARPHRD_CAN || dev_net(dev) != &init_net) {
|
||||||
|
@ -612,6 +620,8 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8);
|
||||||
|
|
||||||
/* update statistics */
|
/* update statistics */
|
||||||
can_stats.rx_frames++;
|
can_stats.rx_frames++;
|
||||||
can_stats.rx_frames_delta++;
|
can_stats.rx_frames_delta++;
|
||||||
|
|
|
@ -298,7 +298,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
|
||||||
|
|
||||||
if (head->nframes) {
|
if (head->nframes) {
|
||||||
/* can_frames starting here */
|
/* can_frames starting here */
|
||||||
firstframe = (struct can_frame *) skb_tail_pointer(skb);
|
firstframe = (struct can_frame *)skb_tail_pointer(skb);
|
||||||
|
|
||||||
memcpy(skb_put(skb, datalen), frames, datalen);
|
memcpy(skb_put(skb, datalen), frames, datalen);
|
||||||
|
|
||||||
|
@ -826,6 +826,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||||
for (i = 0; i < msg_head->nframes; i++) {
|
for (i = 0; i < msg_head->nframes; i++) {
|
||||||
err = memcpy_fromiovec((u8 *)&op->frames[i],
|
err = memcpy_fromiovec((u8 *)&op->frames[i],
|
||||||
msg->msg_iov, CFSIZ);
|
msg->msg_iov, CFSIZ);
|
||||||
|
|
||||||
|
if (op->frames[i].can_dlc > 8)
|
||||||
|
err = -EINVAL;
|
||||||
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -858,6 +862,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||||
for (i = 0; i < msg_head->nframes; i++) {
|
for (i = 0; i < msg_head->nframes; i++) {
|
||||||
err = memcpy_fromiovec((u8 *)&op->frames[i],
|
err = memcpy_fromiovec((u8 *)&op->frames[i],
|
||||||
msg->msg_iov, CFSIZ);
|
msg->msg_iov, CFSIZ);
|
||||||
|
|
||||||
|
if (op->frames[i].can_dlc > 8)
|
||||||
|
err = -EINVAL;
|
||||||
|
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
if (op->frames != &op->sframe)
|
if (op->frames != &op->sframe)
|
||||||
kfree(op->frames);
|
kfree(op->frames);
|
||||||
|
@ -1164,9 +1172,12 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
|
||||||
|
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
skb->sk = sk;
|
skb->sk = sk;
|
||||||
can_send(skb, 1); /* send with loopback */
|
err = can_send(skb, 1); /* send with loopback */
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
return CFSIZ + MHSIZ;
|
return CFSIZ + MHSIZ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1185,6 +1196,10 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||||
if (!bo->bound)
|
if (!bo->bound)
|
||||||
return -ENOTCONN;
|
return -ENOTCONN;
|
||||||
|
|
||||||
|
/* check for valid message length from userspace */
|
||||||
|
if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* check for alternative ifindex for this bcm_op */
|
/* check for alternative ifindex for this bcm_op */
|
||||||
|
|
||||||
if (!ifindex && msg->msg_name) {
|
if (!ifindex && msg->msg_name) {
|
||||||
|
@ -1259,8 +1274,8 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TX_SEND:
|
case TX_SEND:
|
||||||
/* we need at least one can_frame */
|
/* we need exactly one can_frame behind the msg head */
|
||||||
if (msg_head.nframes < 1)
|
if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
else
|
else
|
||||||
ret = bcm_tx_send(msg, ifindex, sk);
|
ret = bcm_tx_send(msg, ifindex, sk);
|
||||||
|
|
|
@ -632,6 +632,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||||
} else
|
} else
|
||||||
ifindex = ro->ifindex;
|
ifindex = ro->ifindex;
|
||||||
|
|
||||||
|
if (size != sizeof(struct can_frame))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
dev = dev_get_by_index(&init_net, ifindex);
|
dev = dev_get_by_index(&init_net, ifindex);
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
|
@ -1359,17 +1359,17 @@ static int check_leaf(struct trie *t, struct leaf *l,
|
||||||
t->stats.semantic_match_miss++;
|
t->stats.semantic_match_miss++;
|
||||||
#endif
|
#endif
|
||||||
if (err <= 0)
|
if (err <= 0)
|
||||||
return plen;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return -1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
|
static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
|
||||||
struct fib_result *res)
|
struct fib_result *res)
|
||||||
{
|
{
|
||||||
struct trie *t = (struct trie *) tb->tb_data;
|
struct trie *t = (struct trie *) tb->tb_data;
|
||||||
int plen, ret = 0;
|
int ret;
|
||||||
struct node *n;
|
struct node *n;
|
||||||
struct tnode *pn;
|
struct tnode *pn;
|
||||||
int pos, bits;
|
int pos, bits;
|
||||||
|
@ -1393,10 +1393,7 @@ static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
|
||||||
|
|
||||||
/* Just a leaf? */
|
/* Just a leaf? */
|
||||||
if (IS_LEAF(n)) {
|
if (IS_LEAF(n)) {
|
||||||
plen = check_leaf(t, (struct leaf *)n, key, flp, res);
|
ret = check_leaf(t, (struct leaf *)n, key, flp, res);
|
||||||
if (plen < 0)
|
|
||||||
goto failed;
|
|
||||||
ret = 0;
|
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1421,11 +1418,9 @@ static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_LEAF(n)) {
|
if (IS_LEAF(n)) {
|
||||||
plen = check_leaf(t, (struct leaf *)n, key, flp, res);
|
ret = check_leaf(t, (struct leaf *)n, key, flp, res);
|
||||||
if (plen < 0)
|
if (ret > 0)
|
||||||
goto backtrace;
|
goto backtrace;
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -439,8 +439,8 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
|
||||||
unsigned int *len)
|
unsigned int *len)
|
||||||
{
|
{
|
||||||
unsigned long subid;
|
unsigned long subid;
|
||||||
unsigned int size;
|
|
||||||
unsigned long *optr;
|
unsigned long *optr;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
size = eoc - ctx->pointer + 1;
|
size = eoc - ctx->pointer + 1;
|
||||||
|
|
||||||
|
|
|
@ -255,6 +255,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/splice.h>
|
#include <linux/splice.h>
|
||||||
#include <linux/net.h>
|
#include <linux/net.h>
|
||||||
#include <linux/socket.h>
|
#include <linux/socket.h>
|
||||||
|
@ -1208,7 +1209,8 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||||
return -ENOTCONN;
|
return -ENOTCONN;
|
||||||
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
|
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
|
||||||
if (offset < skb->len) {
|
if (offset < skb->len) {
|
||||||
size_t used, len;
|
int used;
|
||||||
|
size_t len;
|
||||||
|
|
||||||
len = skb->len - offset;
|
len = skb->len - offset;
|
||||||
/* Stop reading if we hit a patch of urgent data */
|
/* Stop reading if we hit a patch of urgent data */
|
||||||
|
|
|
@ -224,7 +224,7 @@ static __init int tcpprobe_init(void)
|
||||||
if (bufsize < 0)
|
if (bufsize < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
tcp_probe.log = kcalloc(sizeof(struct tcp_log), bufsize, GFP_KERNEL);
|
tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
|
||||||
if (!tcp_probe.log)
|
if (!tcp_probe.log)
|
||||||
goto err0;
|
goto err0;
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue