From 24c32c3760a223204d95c21494aeb683bae8b0e7 Mon Sep 17 00:00:00 2001 From: Shaun Nuzzo Date: Sat, 12 Jan 2013 00:51:11 -0500 Subject: [PATCH 001/117] Enable Swap partition support --- arch/arm/configs/vigor_aosp_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index f2542816..f73dff59 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -44,7 +44,7 @@ CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_LZO is not set CONFIG_DEFAULT_HOSTNAME="(none)" -# CONFIG_SWAP is not set +CONFIG_SWAP=y # CONFIG_SYSVIPC is not set # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set From 16080cfd4a88fddd67758de9cc90a5097fef9b2c Mon Sep 17 00:00:00 2001 From: Dennis Date: Fri, 31 Aug 2012 16:43:37 -0400 Subject: [PATCH 002/117] Added ZRam and snappy comrpression. --- drivers/staging/Kconfig | 2 + drivers/staging/Makefile | 2 + drivers/staging/snappy/Kconfig | 5 + drivers/staging/snappy/Makefile | 5 + drivers/staging/snappy/csnappy.h | 125 ++++ drivers/staging/snappy/csnappy_compress.c | 497 +++++++++++++++ drivers/staging/snappy/csnappy_decompress.c | 321 ++++++++++ drivers/staging/snappy/csnappy_internal.h | 83 +++ drivers/staging/zram/Kconfig | 38 +- drivers/staging/zram/xvmalloc.c | 39 +- drivers/staging/zram/zram_drv.c | 672 +++++++++++++------- drivers/staging/zram/zram_drv.h | 21 +- drivers/staging/zram/zram_sysfs.c | 22 +- 13 files changed, 1546 insertions(+), 286 deletions(-) create mode 100644 drivers/staging/snappy/Kconfig create mode 100644 drivers/staging/snappy/Makefile create mode 100644 drivers/staging/snappy/csnappy.h create mode 100644 drivers/staging/snappy/csnappy_compress.c create mode 100644 drivers/staging/snappy/csnappy_decompress.c create mode 100644 drivers/staging/snappy/csnappy_internal.h diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d0554125..cb7389b9 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -104,6 +104,8 @@ source "drivers/staging/iio/Kconfig" source "drivers/staging/cs5535_gpio/Kconfig" +source "drivers/staging/snappy/Kconfig" + source "drivers/staging/zram/Kconfig" source "drivers/staging/zcache/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 55f8b5e1..80c92391 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -43,6 +43,8 @@ obj-$(CONFIG_VME_BUS) += vme/ obj-$(CONFIG_DX_SEP) += sep/ obj-$(CONFIG_IIO) += iio/ obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/ +obj-$(CONFIG_SNAPPY_COMPRESS) += snappy/ +obj-$(CONFIG_SNAPPY_DECOMPRESS) += snappy/ obj-$(CONFIG_ZRAM) += zram/ obj-$(CONFIG_XVMALLOC) += zram/ obj-$(CONFIG_ZCACHE) += zcache/ diff --git a/drivers/staging/snappy/Kconfig b/drivers/staging/snappy/Kconfig new file mode 100644 index 00000000..24f69085 --- /dev/null +++ b/drivers/staging/snappy/Kconfig @@ -0,0 +1,5 @@ +config SNAPPY_COMPRESS + tristate "Google Snappy Compression" + +config SNAPPY_DECOMPRESS + tristate "Google Snappy Decompression" diff --git a/drivers/staging/snappy/Makefile b/drivers/staging/snappy/Makefile new file mode 100644 index 00000000..399d070a --- /dev/null +++ b/drivers/staging/snappy/Makefile @@ -0,0 +1,5 @@ +snappy_compress-objs := csnappy_compress.o +snappy_decompress-objs := csnappy_decompress.o + +obj-$(CONFIG_SNAPPY_COMPRESS) += csnappy_compress.o +obj-$(CONFIG_SNAPPY_DECOMPRESS) += csnappy_decompress.o diff --git a/drivers/staging/snappy/csnappy.h b/drivers/staging/snappy/csnappy.h new file mode 100644 index 00000000..46ee7ecf --- /dev/null +++ b/drivers/staging/snappy/csnappy.h @@ -0,0 +1,125 @@ +#ifndef __CSNAPPY_H__ +#define __CSNAPPY_H__ +/* +File modified for the Linux Kernel by +Zeev Tarantov gmail.com> +*/ +#ifdef __cplusplus +extern "C" { +#endif + +#define CSNAPPY_VERSION 4 + +#define CSNAPPY_WORKMEM_BYTES_POWER_OF_TWO 15 +#define CSNAPPY_WORKMEM_BYTES (1 << CSNAPPY_WORKMEM_BYTES_POWER_OF_TWO) + +/* + * Returns the maximal size of the compressed representation of + * input data that is "source_len" bytes in length; + */ +uint32_t +csnappy_max_compressed_length(uint32_t source_len) __attribute__((const)); + +/* + * Flat array compression that does not emit the "uncompressed length" + * prefix. Compresses "input" array to the "output" array. + * + * REQUIRES: "input" is at most 32KiB long. + * REQUIRES: "output" points to an array of memory that is at least + * "csnappy_max_compressed_length(input_length)" in size. + * REQUIRES: working_memory has (1 << workmem_bytes_power_of_two) bytes. + * REQUIRES: 9 <= workmem_bytes_power_of_two <= 15. + * + * Returns an "end" pointer into "output" buffer. + * "end - output" is the compressed size of "input". + */ +char* +csnappy_compress_fragment( + const char *input, + const uint32_t input_length, + char *output, + void *working_memory, + const int workmem_bytes_power_of_two); + +/* + * REQUIRES: "compressed" must point to an area of memory that is at + * least "csnappy_max_compressed_length(input_length)" bytes in length. + * REQUIRES: working_memory has (1 << workmem_bytes_power_of_two) bytes. + * REQUIRES: 9 <= workmem_bytes_power_of_two <= 15. + * + * Takes the data stored in "input[0..input_length]" and stores + * it in the array pointed to by "compressed". + * + * "*out_compressed_length" is set to the length of the compressed output. + */ +void +csnappy_compress( + const char *input, + uint32_t input_length, + char *compressed, + uint32_t *out_compressed_length, + void *working_memory, + const int workmem_bytes_power_of_two); + +/* + * Reads header of compressed data to get stored length of uncompressed data. + * REQUIRES: start points to compressed data. + * REQUIRES: n is length of available compressed data. + * + * Returns SNAPPY_E_HEADER_BAD on error. + * Returns number of bytes read from input on success. + * Stores decoded length into *result. + */ +int +csnappy_get_uncompressed_length( + const char *start, + uint32_t n, + uint32_t *result); + +/* + * Safely decompresses all data from array "src" of length "src_len" containing + * entire compressed stream (with header) into array "dst" of size "dst_len". + * REQUIRES: dst_len is at least csnappy_get_uncompressed_length(...). + * + * Iff sucessful, returns CSNAPPY_E_OK. + * If recorded length in header is greater than dst_len, returns + * CSNAPPY_E_OUTPUT_INSUF. + * If compressed data is malformed, does not write more than dst_len into dst. + */ +int +csnappy_decompress( + const char *src, + uint32_t src_len, + char *dst, + uint32_t dst_len); + +/* + * Safely decompresses stream src_len bytes long read from src to dst. + * Amount of available space at dst must be provided in *dst_len by caller. + * If compressed stream needs more space, it will not overflow and return + * CSNAPPY_E_OUTPUT_OVERRUN. + * On success, sets *dst_len to actal number of bytes decompressed. + * Iff sucessful, returns CSNAPPY_E_OK. + */ +int +csnappy_decompress_noheader( + const char *src, + uint32_t src_len, + char *dst, + uint32_t *dst_len); + +/* + * Return values (< 0 = Error) + */ +#define CSNAPPY_E_OK 0 +#define CSNAPPY_E_HEADER_BAD (-1) +#define CSNAPPY_E_OUTPUT_INSUF (-2) +#define CSNAPPY_E_OUTPUT_OVERRUN (-3) +#define CSNAPPY_E_INPUT_NOT_CONSUMED (-4) +#define CSNAPPY_E_DATA_MALFORMED (-5) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/staging/snappy/csnappy_compress.c b/drivers/staging/snappy/csnappy_compress.c new file mode 100644 index 00000000..36792109 --- /dev/null +++ b/drivers/staging/snappy/csnappy_compress.c @@ -0,0 +1,497 @@ +/* +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +File modified for the Linux Kernel by +Zeev Tarantov gmail.com> +*/ + +#include "csnappy_internal.h" +#ifdef __KERNEL__ +#include +#include +#endif +#include "csnappy.h" + + +static inline char* +encode_varint32(char *sptr, uint32_t v) +{ + uint8_t* ptr = (uint8_t *)sptr; + static const int B = 128; + if (v < (1<<7)) { + *(ptr++) = v; + } else if (v < (1<<14)) { + *(ptr++) = v | B; + *(ptr++) = v>>7; + } else if (v < (1<<21)) { + *(ptr++) = v | B; + *(ptr++) = (v>>7) | B; + *(ptr++) = v>>14; + } else if (v < (1<<28)) { + *(ptr++) = v | B; + *(ptr++) = (v>>7) | B; + *(ptr++) = (v>>14) | B; + *(ptr++) = v>>21; + } else { + *(ptr++) = v | B; + *(ptr++) = (v>>7) | B; + *(ptr++) = (v>>14) | B; + *(ptr++) = (v>>21) | B; + *(ptr++) = v>>28; + } + return (char *)ptr; +} + + +/* + * Any hash function will produce a valid compressed bitstream, but a good + * hash function reduces the number of collisions and thus yields better + * compression for compressible input, and more speed for incompressible + * input. Of course, it doesn't hurt if the hash function is reasonably fast + * either, as it gets called a lot. + */ +static inline uint32_t HashBytes(uint32_t bytes, int shift) +{ + uint32_t kMul = 0x1e35a7bd; + return (bytes * kMul) >> shift; +} +static inline uint32_t Hash(const char *p, int shift) +{ + return HashBytes(UNALIGNED_LOAD32(p), shift); +} + + +/* + * *** DO NOT CHANGE THE VALUE OF kBlockSize *** + + * New Compression code chops up the input into blocks of at most + * the following size. This ensures that back-references in the + * output never cross kBlockSize block boundaries. This can be + * helpful in implementing blocked decompression. However the + * decompression code should not rely on this guarantee since older + * compression code may not obey it. + */ +#define kBlockLog 15 +#define kBlockSize (1 << kBlockLog) + + +/* + * Return the largest n such that + * + * s1[0,n-1] == s2[0,n-1] + * and n <= (s2_limit - s2). + * + * Does not read *s2_limit or beyond. + * Does not read *(s1 + (s2_limit - s2)) or beyond. + * Requires that s2_limit >= s2. + * + * Separate implementation for x86_64, for speed. Uses the fact that + * x86_64 is little endian. + */ +#if defined(__x86_64__) +static inline int +FindMatchLength(const char *s1, const char *s2, const char *s2_limit) +{ + uint64_t x; + int matched, matching_bits; + DCHECK_GE(s2_limit, s2); + matched = 0; + /* + * Find out how long the match is. We loop over the data 64 bits at a + * time until we find a 64-bit block that doesn't match; then we find + * the first non-matching bit and use that to calculate the total + * length of the match. + */ + while (likely(s2 <= s2_limit - 8)) { + if (unlikely(UNALIGNED_LOAD64(s1 + matched) == + UNALIGNED_LOAD64(s2))) { + s2 += 8; + matched += 8; + } else { + /* + * On current (mid-2008) Opteron models there is a 3% + * more efficient code sequence to find the first + * non-matching byte. However, what follows is ~10% + * better on Intel Core 2 and newer, and we expect AMD's + * bsf instruction to improve. + */ + x = UNALIGNED_LOAD64(s1 + matched) ^ + UNALIGNED_LOAD64(s2); + matching_bits = FindLSBSetNonZero64(x); + matched += matching_bits >> 3; + return matched; + } + } + while (likely(s2 < s2_limit)) { + if (likely(s1[matched] == *s2)) { + ++s2; + ++matched; + } else { + return matched; + } + } + return matched; +} +#else /* !defined(__x86_64__) */ +static inline int +FindMatchLength(const char *s1, const char *s2, const char *s2_limit) +{ + /* Implementation based on the x86-64 version, above. */ + int matched = 0; + DCHECK_GE(s2_limit, s2); + + while (s2 <= s2_limit - 4 && + UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) { + s2 += 4; + matched += 4; + } +#if defined(__LITTLE_ENDIAN) + if (s2 <= s2_limit - 4) { + uint32_t x = UNALIGNED_LOAD32(s1 + matched) ^ + UNALIGNED_LOAD32(s2); + int matching_bits = FindLSBSetNonZero(x); + matched += matching_bits >> 3; + } else { + while ((s2 < s2_limit) && (s1[matched] == *s2)) { + ++s2; + ++matched; + } + } +#else + while ((s2 < s2_limit) && (s1[matched] == *s2)) { + ++s2; + ++matched; + } +#endif + return matched; +} +#endif /* !defined(__x86_64__) */ + + +static inline char* +EmitLiteral(char *op, const char *literal, int len, int allow_fast_path) +{ + int n = len - 1; /* Zero-length literals are disallowed */ + if (n < 60) { + /* Fits in tag byte */ + *op++ = LITERAL | (n << 2); + /* + The vast majority of copies are below 16 bytes, for which a + call to memcpy is overkill. This fast path can sometimes + copy up to 15 bytes too much, but that is okay in the + main loop, since we have a bit to go on for both sides: + - The input will always have kInputMarginBytes = 15 extra + available bytes, as long as we're in the main loop, and + if not, allow_fast_path = false. + - The output will always have 32 spare bytes (see + snappy_max_compressed_length). + */ + if (allow_fast_path && len <= 16) { + UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal)); + UNALIGNED_STORE64(op + 8, + UNALIGNED_LOAD64(literal + 8)); + return op + len; + } + } else { + /* Encode in upcoming bytes */ + char *base = op; + int count = 0; + op++; + while (n > 0) { + *op++ = n & 0xff; + n >>= 8; + count++; + } + DCHECK_GE(count, 1); + DCHECK_LE(count, 4); + *base = LITERAL | ((59+count) << 2); + } + memcpy(op, literal, len); + return op + len; +} + +static inline char* +EmitCopyLessThan64(char *op, int offset, int len) +{ + DCHECK_LE(len, 64); + DCHECK_GE(len, 4); + DCHECK_LT(offset, 65536); + + if ((len < 12) && (offset < 2048)) { + int len_minus_4 = len - 4; + DCHECK_LT(len_minus_4, 8); /* Must fit in 3 bits */ + *op++ = COPY_1_BYTE_OFFSET | + ((len_minus_4) << 2) | + ((offset >> 8) << 5); + *op++ = offset & 0xff; + } else { + *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2); + put_unaligned_le16(offset, op); + op += 2; + } + return op; +} + +static inline char* +EmitCopy(char *op, int offset, int len) +{ + /* Emit 64 byte copies but make sure to keep at least four bytes + * reserved */ + while (len >= 68) { + op = EmitCopyLessThan64(op, offset, 64); + len -= 64; + } + + /* Emit an extra 60 byte copy if have too much data to fit in one + * copy */ + if (len > 64) { + op = EmitCopyLessThan64(op, offset, 60); + len -= 60; + } + + /* Emit remainder */ + op = EmitCopyLessThan64(op, offset, len); + return op; +} + + +/* + * For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will + * equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have + * empirically found that overlapping loads such as + * UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2) + * are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32_t. + */ +static inline uint32_t +GetUint32AtOffset(uint64_t v, int offset) +{ + DCHECK(0 <= offset && offset <= 4); +#ifdef __LITTLE_ENDIAN + return v >> (8 * offset); +#else + return v >> (32 - 8 * offset); +#endif +} + +#define kInputMarginBytes 15 +char* +csnappy_compress_fragment( + const char *input, + const uint32_t input_size, + char *op, + void *working_memory, + const int workmem_bytes_power_of_two) +{ + const char *ip, *ip_end, *base_ip, *next_emit, *ip_limit, *next_ip, + *candidate, *base; + uint16_t *table = (uint16_t *)working_memory; + uint64_t input_bytes; + uint32_t hash, next_hash, prev_hash, cur_hash, skip, candidate_bytes; + int shift, matched; + + DCHECK_GE(workmem_bytes_power_of_two, 9); + DCHECK_LE(workmem_bytes_power_of_two, 15); + /* Table of 2^X bytes, need (X-1) bits to address table of uint16_t. + * How many bits of 32bit hash function result are discarded? */ + shift = 33 - workmem_bytes_power_of_two; + /* "ip" is the input pointer, and "op" is the output pointer. */ + ip = input; + DCHECK_LE(input_size, kBlockSize); + ip_end = input + input_size; + base_ip = ip; + /* Bytes in [next_emit, ip) will be emitted as literal bytes. Or + [next_emit, ip_end) after the main loop. */ + next_emit = ip; + + if (unlikely(input_size < kInputMarginBytes)) + goto emit_remainder; + + memset(working_memory, 0, 1 << workmem_bytes_power_of_two); + + ip_limit = input + input_size - kInputMarginBytes; + next_hash = Hash(++ip, shift); + +main_loop: + DCHECK_LT(next_emit, ip); + /* + * The body of this loop calls EmitLiteral once and then EmitCopy one or + * more times. (The exception is that when we're close to exhausting + * the input we goto emit_remainder.) + * + * In the first iteration of this loop we're just starting, so + * there's nothing to copy, so calling EmitLiteral once is + * necessary. And we only start a new iteration when the + * current iteration has determined that a call to EmitLiteral will + * precede the next call to EmitCopy (if any). + * + * Step 1: Scan forward in the input looking for a 4-byte-long match. + * If we get close to exhausting the input then goto emit_remainder. + * + * Heuristic match skipping: If 32 bytes are scanned with no matches + * found, start looking only at every other byte. If 32 more bytes are + * scanned, look at every third byte, etc.. When a match is found, + * immediately go back to looking at every byte. This is a small loss + * (~5% performance, ~0.1% density) for compressible data due to more + * bookkeeping, but for non-compressible data (such as JPEG) it's a huge + * win since the compressor quickly "realizes" the data is incompressible + * and doesn't bother looking for matches everywhere. + * + * The "skip" variable keeps track of how many bytes there are since the + * last match; dividing it by 32 (ie. right-shifting by five) gives the + * number of bytes to move ahead for each iteration. + */ + skip = 32; + + next_ip = ip; + do { + ip = next_ip; + hash = next_hash; + DCHECK_EQ(hash, Hash(ip, shift)); + next_ip = ip + (skip++ >> 5); + if (unlikely(next_ip > ip_limit)) + goto emit_remainder; + next_hash = Hash(next_ip, shift); + candidate = base_ip + table[hash]; + DCHECK_GE(candidate, base_ip); + DCHECK_LT(candidate, ip); + + table[hash] = ip - base_ip; + } while (likely(UNALIGNED_LOAD32(ip) != + UNALIGNED_LOAD32(candidate))); + + /* + * Step 2: A 4-byte match has been found. We'll later see if more + * than 4 bytes match. But, prior to the match, input + * bytes [next_emit, ip) are unmatched. Emit them as "literal bytes." + */ + DCHECK_LE(next_emit + 16, ip_end); + op = EmitLiteral(op, next_emit, ip - next_emit, 1); + + /* + * Step 3: Call EmitCopy, and then see if another EmitCopy could + * be our next move. Repeat until we find no match for the + * input immediately after what was consumed by the last EmitCopy call. + * + * If we exit this loop normally then we need to call EmitLiteral next, + * though we don't yet know how big the literal will be. We handle that + * by proceeding to the next iteration of the main loop. We also can exit + * this loop via goto if we get close to exhausting the input. + */ + input_bytes = 0; + candidate_bytes = 0; + + do { + /* We have a 4-byte match at ip, and no need to emit any + "literal bytes" prior to ip. */ + base = ip; + matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end); + ip += matched; + DCHECK_EQ(0, memcmp(base, candidate, matched)); + op = EmitCopy(op, base - candidate, matched); + /* We could immediately start working at ip now, but to improve + compression we first update table[Hash(ip - 1, ...)]. */ + next_emit = ip; + if (unlikely(ip >= ip_limit)) + goto emit_remainder; + input_bytes = UNALIGNED_LOAD64(ip - 1); + prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift); + table[prev_hash] = ip - base_ip - 1; + cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift); + candidate = base_ip + table[cur_hash]; + candidate_bytes = UNALIGNED_LOAD32(candidate); + table[cur_hash] = ip - base_ip; + } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes); + + next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift); + ++ip; + goto main_loop; + +emit_remainder: + /* Emit the remaining bytes as a literal */ + if (next_emit < ip_end) + op = EmitLiteral(op, next_emit, ip_end - next_emit, 0); + + return op; +} +#if defined(__KERNEL__) && !defined(STATIC) +EXPORT_SYMBOL(csnappy_compress_fragment); +#endif + +uint32_t __attribute__((const)) +csnappy_max_compressed_length(uint32_t source_len) +{ + return 32 + source_len + source_len/6; +} +#if defined(__KERNEL__) && !defined(STATIC) +EXPORT_SYMBOL(csnappy_max_compressed_length); +#endif + +void +csnappy_compress( + const char *input, + uint32_t input_length, + char *compressed, + uint32_t *compressed_length, + void *working_memory, + const int workmem_bytes_power_of_two) +{ + int workmem_size; + int num_to_read; + uint32_t written = 0; + char *p = encode_varint32(compressed, input_length); + written += (p - compressed); + compressed = p; + while (input_length > 0) { + num_to_read = min(input_length, (uint32_t)kBlockSize); + workmem_size = workmem_bytes_power_of_two; + if (num_to_read < kBlockSize) { + for (workmem_size = 9; + workmem_size < workmem_bytes_power_of_two; + ++workmem_size) { + if ((1 << (workmem_size-1)) >= num_to_read) + break; + } + } + p = csnappy_compress_fragment( + input, num_to_read, compressed, + working_memory, workmem_size); + written += (p - compressed); + compressed = p; + input_length -= num_to_read; + input += num_to_read; + } + *compressed_length = written; +} +#if defined(__KERNEL__) && !defined(STATIC) +EXPORT_SYMBOL(csnappy_compress); + +MODULE_LICENSE("BSD"); +MODULE_DESCRIPTION("Snappy Compressor"); +#endif diff --git a/drivers/staging/snappy/csnappy_decompress.c b/drivers/staging/snappy/csnappy_decompress.c new file mode 100644 index 00000000..44df3116 --- /dev/null +++ b/drivers/staging/snappy/csnappy_decompress.c @@ -0,0 +1,321 @@ +/* +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +File modified for the Linux Kernel by +Zeev Tarantov gmail.com> +*/ + +#include "csnappy_internal.h" +#ifdef __KERNEL__ +#include +#include +#endif +#include "csnappy.h" + + +/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */ +static const uint32_t wordmask[] = { + 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu +}; + +/* + * Data stored per entry in lookup table: + * Range Bits-used Description + * ------------------------------------ + * 1..64 0..7 Literal/copy length encoded in opcode byte + * 0..7 8..10 Copy offset encoded in opcode byte / 256 + * 0..4 11..13 Extra bytes after opcode + * + * We use eight bits for the length even though 7 would have sufficed + * because of efficiency reasons: + * (1) Extracting a byte is faster than a bit-field + * (2) It properly aligns copy offset so we do not need a <<8 + */ +static const uint16_t char_table[256] = { + 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002, + 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004, + 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006, + 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008, + 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a, + 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c, + 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e, + 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010, + 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012, + 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014, + 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016, + 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018, + 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a, + 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c, + 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e, + 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020, + 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022, + 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024, + 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026, + 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028, + 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a, + 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c, + 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e, + 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030, + 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032, + 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034, + 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036, + 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038, + 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a, + 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c, + 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e, + 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040 +}; + +/* + * Copy "len" bytes from "src" to "op", one byte at a time. Used for + * handling COPY operations where the input and output regions may + * overlap. For example, suppose: + * src == "ab" + * op == src + 2 + * len == 20 + * After IncrementalCopy(src, op, len), the result will have + * eleven copies of "ab" + * ababababababababababab + * Note that this does not match the semantics of either memcpy() + * or memmove(). + */ +static inline void IncrementalCopy(const char *src, char *op, int len) +{ + DCHECK_GT(len, 0); + do { + *op++ = *src++; + } while (--len > 0); +} + +/* + * Equivalent to IncrementalCopy except that it can write up to ten extra + * bytes after the end of the copy, and that it is faster. + * + * The main part of this loop is a simple copy of eight bytes at a time until + * we've copied (at least) the requested amount of bytes. However, if op and + * src are less than eight bytes apart (indicating a repeating pattern of + * length < 8), we first need to expand the pattern in order to get the correct + * results. For instance, if the buffer looks like this, with the eight-byte + * and patterns marked as intervals: + * + * abxxxxxxxxxxxx + * [------] src + * [------] op + * + * a single eight-byte copy from to will repeat the pattern once, + * after which we can move two bytes without moving : + * + * ababxxxxxxxxxx + * [------] src + * [------] op + * + * and repeat the exercise until the two no longer overlap. + * + * This allows us to do very well in the special case of one single byte + * repeated many times, without taking a big hit for more general cases. + * + * The worst case of extra writing past the end of the match occurs when + * op - src == 1 and len == 1; the last copy will read from byte positions + * [0..7] and write to [4..11], whereas it was only supposed to write to + * position 1. Thus, ten excess bytes. + */ +static const int kMaxIncrementCopyOverflow = 10; +static inline void IncrementalCopyFastPath(const char *src, char *op, int len) +{ + while (op - src < 8) { + UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src)); + len -= op - src; + op += op - src; + } + while (len > 0) { + UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src)); + src += 8; + op += 8; + len -= 8; + } +} + + +/* A type that writes to a flat array. */ +struct SnappyArrayWriter { + char *base; + char *op; + char *op_limit; +}; + +static inline int +SAW__Append(struct SnappyArrayWriter *this, + const char *ip, uint32_t len, int allow_fast_path) +{ + char *op = this->op; + const int space_left = this->op_limit - op; + /*Fast path, used for the majority (about 90%) of dynamic invocations.*/ + if (allow_fast_path && len <= 16 && space_left >= 16) { + UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip)); + UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8)); + } else { + if (space_left < len) + return CSNAPPY_E_OUTPUT_OVERRUN; + memcpy(op, ip, len); + } + this->op = op + len; + return CSNAPPY_E_OK; +} + +static inline int +SAW__AppendFromSelf(struct SnappyArrayWriter *this, + uint32_t offset, uint32_t len) +{ + char *op = this->op; + const int space_left = this->op_limit - op; + /* -1u catches offset==0 */ + if (op - this->base <= offset - 1u) + return CSNAPPY_E_DATA_MALFORMED; + /* Fast path, used for the majority (70-80%) of dynamic invocations. */ + if (len <= 16 && offset >= 8 && space_left >= 16) { + UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset)); + UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8)); + } else if (space_left >= len + kMaxIncrementCopyOverflow) { + IncrementalCopyFastPath(op - offset, op, len); + } else { + if (space_left < len) + return CSNAPPY_E_OUTPUT_OVERRUN; + IncrementalCopy(op - offset, op, len); + } + this->op = op + len; + return CSNAPPY_E_OK; +} + + +int +csnappy_get_uncompressed_length( + const char *src, + uint32_t src_len, + uint32_t *result) +{ + const char *src_base = src; + uint32_t shift = 0; + uint8_t c; + /* Length is encoded in 1..5 bytes */ + *result = 0; + for (;;) { + if (shift >= 32) + goto err_out; + if (src_len == 0) + goto err_out; + c = *(const uint8_t *)src++; + src_len -= 1; + *result |= (uint32_t)(c & 0x7f) << shift; + if (c < 128) + break; + shift += 7; + } + return src - src_base; +err_out: + return CSNAPPY_E_HEADER_BAD; +} +#if defined(__KERNEL__) && !defined(STATIC) +EXPORT_SYMBOL(csnappy_get_uncompressed_length); +#endif + +int +csnappy_decompress_noheader( + const char *src, + uint32_t src_remaining, + char *dst, + uint32_t *dst_len) +{ + struct SnappyArrayWriter writer; + uint32_t length, trailer, opword, extra_bytes; + int ret; + uint8_t opcode; + char scratch[5]; + writer.op = writer.base = dst; + writer.op_limit = writer.op + *dst_len; + while (src_remaining) { + if (unlikely(src_remaining < 5)) { + memcpy(scratch, src, src_remaining); + src = scratch; + } + opcode = *(const uint8_t *)src++; + opword = char_table[opcode]; + extra_bytes = opword >> 11; + trailer = get_unaligned_le32(src) & wordmask[extra_bytes]; + src += extra_bytes; + src_remaining -= 1 + extra_bytes; + length = opword & 0xff; + if (opcode & 0x3) { + trailer += opword & 0x700; + ret = SAW__AppendFromSelf(&writer, trailer, length); + if (ret < 0) + return ret; + } else { + length += trailer; + if (unlikely(src_remaining < length)) + return CSNAPPY_E_DATA_MALFORMED; + ret = src_remaining >= 16; + ret = SAW__Append(&writer, src, length, ret); + if (ret < 0) + return ret; + src += length; + src_remaining -= length; + } + } + *dst_len = writer.op - writer.base; + return CSNAPPY_E_OK; +} +#if defined(__KERNEL__) && !defined(STATIC) +EXPORT_SYMBOL(csnappy_decompress_noheader); +#endif + +int +csnappy_decompress( + const char *src, + uint32_t src_len, + char *dst, + uint32_t dst_len) +{ + int n; + uint32_t olen = 0; + /* Read uncompressed length from the front of the compressed input */ + n = csnappy_get_uncompressed_length(src, src_len, &olen); + if (unlikely(n < CSNAPPY_E_OK)) + return n; + /* Protect against possible DoS attack */ + if (unlikely(olen > dst_len)) + return CSNAPPY_E_OUTPUT_INSUF; + return csnappy_decompress_noheader(src + n, src_len - n, dst, &olen); +} +#if defined(__KERNEL__) && !defined(STATIC) +EXPORT_SYMBOL(csnappy_decompress); + +MODULE_LICENSE("BSD"); +MODULE_DESCRIPTION("Snappy Decompressor"); +#endif diff --git a/drivers/staging/snappy/csnappy_internal.h b/drivers/staging/snappy/csnappy_internal.h new file mode 100644 index 00000000..c092217b --- /dev/null +++ b/drivers/staging/snappy/csnappy_internal.h @@ -0,0 +1,83 @@ +/* +Copyright 2011 Google Inc. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Various stubs for the open-source version of Snappy. + +File modified for the Linux Kernel by +Zeev Tarantov gmail.com> +*/ + +#ifndef CSNAPPY_INTERNAL_H_ +#define CSNAPPY_INTERNAL_H_ + +#ifndef __KERNEL__ +#include "csnappy_internal_userspace.h" +#else + +#include +#include +#include +#include +#include + +#ifdef DEBUG +#define DCHECK(cond) if (!(cond)) \ + printk(KERN_DEBUG "assert failed @ %s:%i\n", \ + __FILE__, __LINE__) +#else +#define DCHECK(cond) +#endif + +#define UNALIGNED_LOAD16(_p) get_unaligned((const uint16_t *)(_p)) +#define UNALIGNED_LOAD32(_p) get_unaligned((const uint32_t *)(_p)) +#define UNALIGNED_LOAD64(_p) get_unaligned((const uint64_t *)(_p)) +#define UNALIGNED_STORE16(_p, _val) put_unaligned((_val), (uint16_t *)(_p)) +#define UNALIGNED_STORE32(_p, _val) put_unaligned((_val), (uint32_t *)(_p)) +#define UNALIGNED_STORE64(_p, _val) put_unaligned((_val), (uint64_t *)(_p)) + +#define FindLSBSetNonZero(n) __builtin_ctz(n) +#define FindLSBSetNonZero64(n) __builtin_ctzll(n) + +#endif /* __KERNEL__ */ + +#define DCHECK_EQ(a, b) DCHECK(((a) == (b))) +#define DCHECK_NE(a, b) DCHECK(((a) != (b))) +#define DCHECK_GT(a, b) DCHECK(((a) > (b))) +#define DCHECK_GE(a, b) DCHECK(((a) >= (b))) +#define DCHECK_LT(a, b) DCHECK(((a) < (b))) +#define DCHECK_LE(a, b) DCHECK(((a) <= (b))) + +enum { + LITERAL = 0, + COPY_1_BYTE_OFFSET = 1, /* 3 bit length + 3 bits of offset in opcode */ + COPY_2_BYTE_OFFSET = 2, + COPY_4_BYTE_OFFSET = 3 +}; + +#endif /* CSNAPPY_INTERNAL_H_ */ diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig index 3bec4dba..f832016b 100644 --- a/drivers/staging/zram/Kconfig +++ b/drivers/staging/zram/Kconfig @@ -6,8 +6,6 @@ config ZRAM tristate "Compressed RAM block device support" depends on BLOCK && SYSFS select XVMALLOC - select LZO_COMPRESS - select LZO_DECOMPRESS default n help Creates virtual block devices called /dev/zramX (X = 0, 1, ...). @@ -21,6 +19,22 @@ config ZRAM See zram.txt for more information. Project home: http://compcache.googlecode.com/ +config ZRAM_NUM_DEVICES + int "Default number of zram devices" + depends on ZRAM + range 1 32 + default 1 + help + Select default number of zram devices. You can override this value + using 'num_devices' module parameter. + +config ZRAM_DEFAULT_DISKSIZE + int "Default size of zram in bytes" + depends on ZRAM + default 100663296 + help + Set default zram disk size (default ~ 96MB) + config ZRAM_DEBUG bool "Compressed RAM block device debug support" depends on ZRAM @@ -28,3 +42,23 @@ config ZRAM_DEBUG help This option adds additional debugging code to the compressed RAM block device driver. + +choice ZRAM_COMPRESS + prompt "compression method" + depends on ZRAM + default ZRAM_LZO + help + Select the compression method used by zram. + LZO is the default. Snappy compresses a bit worse (around ~2%) but + much (~2x) faster, at least on x86-64. +config ZRAM_LZO + bool "LZO compression" + select LZO_COMPRESS + select LZO_DECOMPRESS + +config ZRAM_SNAPPY + bool "Snappy compression" + depends on SNAPPY_COMPRESS + depends on SNAPPY_DECOMPRESS + +endchoice diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/zram/xvmalloc.c index 1f9c5082..93ba8e94 100644 --- a/drivers/staging/zram/xvmalloc.c +++ b/drivers/staging/zram/xvmalloc.c @@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag) * This is called from xv_malloc/xv_free path, so it * needs to be fast. */ -static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type) +static void *get_ptr_atomic(struct page *page, u16 offset) { unsigned char *base; - base = kmap_atomic(page, type); + base = kmap_atomic(page); return base + offset; } -static void put_ptr_atomic(void *ptr, enum km_type type) +static void put_ptr_atomic(void *ptr) { - kunmap_atomic(ptr, type); + kunmap_atomic(ptr); } static u32 get_blockprev(struct block_header *block) @@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset, if (block->link.next_page) { nextblock = get_ptr_atomic(block->link.next_page, - block->link.next_offset, KM_USER1); + block->link.next_offset); nextblock->link.prev_page = page; nextblock->link.prev_offset = offset; - put_ptr_atomic(nextblock, KM_USER1); + put_ptr_atomic(nextblock); /* If there was a next page then the free bits are set. */ return; } @@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset, if (block->link.prev_page) { tmpblock = get_ptr_atomic(block->link.prev_page, - block->link.prev_offset, KM_USER1); + block->link.prev_offset); tmpblock->link.next_page = block->link.next_page; tmpblock->link.next_offset = block->link.next_offset; - put_ptr_atomic(tmpblock, KM_USER1); + put_ptr_atomic(tmpblock); } if (block->link.next_page) { tmpblock = get_ptr_atomic(block->link.next_page, - block->link.next_offset, KM_USER1); + block->link.next_offset); tmpblock->link.prev_page = block->link.prev_page; tmpblock->link.prev_offset = block->link.prev_offset; - put_ptr_atomic(tmpblock, KM_USER1); + put_ptr_atomic(tmpblock); } /* Is this block is at the head of the freelist? */ @@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset, if (pool->freelist[slindex].page) { struct block_header *tmpblock; tmpblock = get_ptr_atomic(pool->freelist[slindex].page, - pool->freelist[slindex].offset, - KM_USER1); + pool->freelist[slindex].offset); tmpblock->link.prev_page = NULL; tmpblock->link.prev_offset = 0; - put_ptr_atomic(tmpblock, KM_USER1); + put_ptr_atomic(tmpblock); } else { /* This freelist bucket is empty */ __clear_bit(slindex % BITS_PER_LONG, @@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags) stat_inc(&pool->total_pages); spin_lock(&pool->lock); - block = get_ptr_atomic(page, 0, KM_USER0); + block = get_ptr_atomic(page, 0); block->size = PAGE_SIZE - XV_ALIGN; set_flag(block, BLOCK_FREE); @@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags) insert_block(pool, page, 0, block); - put_ptr_atomic(block, KM_USER0); + put_ptr_atomic(block); spin_unlock(&pool->lock); return 0; @@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, return -ENOMEM; } - block = get_ptr_atomic(*page, *offset, KM_USER0); + block = get_ptr_atomic(*page, *offset); remove_block(pool, *page, *offset, block, index); @@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, block->size = origsize; clear_flag(block, BLOCK_FREE); - put_ptr_atomic(block, KM_USER0); + put_ptr_atomic(block); spin_unlock(&pool->lock); *offset += XV_ALIGN; @@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) spin_lock(&pool->lock); - page_start = get_ptr_atomic(page, 0, KM_USER0); + page_start = get_ptr_atomic(page, 0); block = (struct block_header *)((char *)page_start + offset); /* Catch double free bugs */ @@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) /* No used objects in this page. Free it. */ if (block->size == PAGE_SIZE - XV_ALIGN) { - put_ptr_atomic(page_start, KM_USER0); + put_ptr_atomic(page_start); spin_unlock(&pool->lock); __free_page(page); @@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) set_blockprev(tmpblock, offset); } - put_ptr_atomic(page_start, KM_USER0); + put_ptr_atomic(page_start); spin_unlock(&pool->lock); } EXPORT_SYMBOL_GPL(xv_free); diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index aab4ec48..a23ad8bb 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -29,18 +29,62 @@ #include #include #include -#include #include #include #include "zram_drv.h" +#if defined(CONFIG_ZRAM_LZO) +#include +#define WMSIZE LZO1X_MEM_COMPRESS +#define COMPRESS(s, sl, d, dl, wm) \ + lzo1x_1_compress(s, sl, d, dl, wm) +#define DECOMPRESS(s, sl, d, dl) \ + lzo1x_decompress_safe(s, sl, d, dl) +#elif defined(CONFIG_ZRAM_SNAPPY) +#include "../snappy/csnappy.h" /* if built in drivers/staging */ +#define WMSIZE_ORDER ((PAGE_SHIFT > 14) ? (15) : (PAGE_SHIFT+1)) +#define WMSIZE (1 << WMSIZE_ORDER) +static int +snappy_compress_( + const unsigned char *src, + size_t src_len, + unsigned char *dst, + size_t *dst_len, + void *workmem) +{ + const unsigned char *end = csnappy_compress_fragment( + src, (uint32_t)src_len, dst, workmem, WMSIZE_ORDER); + *dst_len = end - dst; + return 0; +} +static int +snappy_decompress_( + const unsigned char *src, + size_t src_len, + unsigned char *dst, + size_t *dst_len) +{ + uint32_t dst_len_ = (uint32_t)*dst_len; + int ret = csnappy_decompress_noheader(src, src_len, dst, &dst_len_); + *dst_len = (size_t)dst_len_; + return ret; +} +#define COMPRESS(s, sl, d, dl, wm) \ + snappy_compress_(s, sl, d, dl, wm) +#define DECOMPRESS(s, sl, d, dl) \ + snappy_decompress_(s, sl, d, dl) +#else +#error either CONFIG_ZRAM_LZO or CONFIG_ZRAM_SNAPPY must be defined +#endif + + /* Globals */ static int zram_major; -struct zram *devices; +struct zram *zram_devices; /* Module params (documentation at end) */ -unsigned int num_devices; +unsigned int zram_num_devices; static void zram_stat_inc(u32 *v) { @@ -104,33 +148,19 @@ static int page_zero_filled(void *ptr) return 1; } -static void zram_set_disksize(struct zram *zram, size_t totalram_bytes) +static u64 zram_default_disksize_bytes(void) { - if (!zram->disksize) { - pr_info( - "disk size not provided. You can use disksize_kb module " - "param to specify size.\nUsing default: (%u%% of RAM).\n", - default_disksize_perc_ram - ); - zram->disksize = default_disksize_perc_ram * - (totalram_bytes / 100); - } - - if (zram->disksize > 2 * (totalram_bytes)) { - pr_info( - "There is little point creating a zram of greater than " - "twice the size of memory since we expect a 2:1 compression " - "ratio. Note that zram uses about 0.1%% of the size of " - "the disk when not in use so a huge zram is " - "wasteful.\n" - "\tMemory Size: %zu kB\n" - "\tSize you selected: %llu kB\n" - "Continuing anyway ...\n", - totalram_bytes >> 10, zram->disksize - ); - } +#if 0 + return ((totalram_pages << PAGE_SHIFT) * + default_disksize_perc_ram / 100) & PAGE_MASK; +#endif + return CONFIG_ZRAM_DEFAULT_DISKSIZE; +} - zram->disksize &= PAGE_MASK; +static void zram_set_disksize(struct zram *zram, u64 size_bytes) +{ + zram->disksize = size_bytes; + set_capacity(zram->disk, size_bytes >> SECTOR_SHIFT); } static void zram_free_page(struct zram *zram, size_t index) @@ -161,9 +191,9 @@ static void zram_free_page(struct zram *zram, size_t index) goto out; } - obj = kmap_atomic(page, KM_USER0) + offset; + obj = kmap_atomic(page) + offset; clen = xv_get_object_size(obj) - sizeof(struct zobj_header); - kunmap_atomic(obj, KM_USER0); + kunmap_atomic(obj); xv_free(zram->mem_pool, page, offset); if (clen <= PAGE_SIZE / 2) @@ -177,224 +207,357 @@ static void zram_free_page(struct zram *zram, size_t index) zram->table[index].offset = 0; } -static void handle_zero_page(struct page *page) +static void handle_zero_page(struct bio_vec *bvec) { + struct page *page = bvec->bv_page; void *user_mem; - user_mem = kmap_atomic(page, KM_USER0); - memset(user_mem, 0, PAGE_SIZE); - kunmap_atomic(user_mem, KM_USER0); + user_mem = kmap_atomic(page); + memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); + kunmap_atomic(user_mem); flush_dcache_page(page); } -static void handle_uncompressed_page(struct zram *zram, - struct page *page, u32 index) +static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec, + u32 index, int offset) { + struct page *page = bvec->bv_page; unsigned char *user_mem, *cmem; - user_mem = kmap_atomic(page, KM_USER0); - cmem = kmap_atomic(zram->table[index].page, KM_USER1) + - zram->table[index].offset; + user_mem = kmap_atomic(page); + cmem = kmap_atomic(zram->table[index].page); - memcpy(user_mem, cmem, PAGE_SIZE); - kunmap_atomic(user_mem, KM_USER0); - kunmap_atomic(cmem, KM_USER1); + memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); + kunmap_atomic(cmem); + kunmap_atomic(user_mem); flush_dcache_page(page); } -static void zram_read(struct zram *zram, struct bio *bio) +static inline int is_partial_io(struct bio_vec *bvec) { + return bvec->bv_len != PAGE_SIZE; +} - int i; - u32 index; - struct bio_vec *bvec; - - zram_stat64_inc(zram, &zram->stats.num_reads); - index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; +static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, + u32 index, int offset, struct bio *bio) +{ + int ret; + size_t clen; + struct page *page; + struct zobj_header *zheader; + unsigned char *user_mem, *cmem, *uncmem = NULL; - bio_for_each_segment(bvec, bio, i) { - int ret; - size_t clen; - struct page *page; - struct zobj_header *zheader; - unsigned char *user_mem, *cmem; + page = bvec->bv_page; - page = bvec->bv_page; + if (zram_test_flag(zram, index, ZRAM_ZERO)) { + handle_zero_page(bvec); + return 0; + } - if (zram_test_flag(zram, index, ZRAM_ZERO)) { - handle_zero_page(page); - index++; - continue; - } + /* Requested page is not present in compressed area */ + if (unlikely(!zram->table[index].page)) { + pr_debug("Read before write: sector=%lu, size=%u", + (ulong)(bio->bi_sector), bio->bi_size); + handle_zero_page(bvec); + return 0; + } - /* Requested page is not present in compressed area */ - if (unlikely(!zram->table[index].page)) { - pr_debug("Read before write: sector=%lu, size=%u", - (ulong)(bio->bi_sector), bio->bi_size); - handle_zero_page(page); - index++; - continue; - } + /* Page is stored uncompressed since it's incompressible */ + if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { + handle_uncompressed_page(zram, bvec, index, offset); + return 0; + } - /* Page is stored uncompressed since it's incompressible */ - if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { - handle_uncompressed_page(zram, page, index); - index++; - continue; + if (is_partial_io(bvec)) { + /* Use a temporary buffer to decompress the page */ + uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!uncmem) { + pr_info("Error allocating temp memory!\n"); + return -ENOMEM; } + } - user_mem = kmap_atomic(page, KM_USER0); - clen = PAGE_SIZE; + user_mem = kmap_atomic(page); + if (!is_partial_io(bvec)) + uncmem = user_mem; + clen = PAGE_SIZE; - cmem = kmap_atomic(zram->table[index].page, KM_USER1) + - zram->table[index].offset; + cmem = kmap_atomic(zram->table[index].page) + + zram->table[index].offset; - ret = lzo1x_decompress_safe( + ret = DECOMPRESS( cmem + sizeof(*zheader), xv_get_object_size(cmem) - sizeof(*zheader), - user_mem, &clen); + uncmem, &clen); - kunmap_atomic(user_mem, KM_USER0); - kunmap_atomic(cmem, KM_USER1); + if (is_partial_io(bvec)) { + memcpy(user_mem + bvec->bv_offset, uncmem + offset, + bvec->bv_len); + kfree(uncmem); + } - /* Should NEVER happen. Return bio error if it does. */ - if (unlikely(ret != LZO_E_OK)) { - pr_err("Decompression failed! err=%d, page=%u\n", - ret, index); - zram_stat64_inc(zram, &zram->stats.failed_reads); - goto out; - } + kunmap_atomic(cmem); + kunmap_atomic(user_mem); - flush_dcache_page(page); - index++; + /* Should NEVER happen. Return bio error if it does. */ + if (unlikely(ret)) { + pr_err("Decompression failed! err=%d, page=%u\n", ret, index); + zram_stat64_inc(zram, &zram->stats.failed_reads); + return ret; } - set_bit(BIO_UPTODATE, &bio->bi_flags); - bio_endio(bio, 0); - return; + flush_dcache_page(page); -out: - bio_io_error(bio); + return 0; } -static void zram_write(struct zram *zram, struct bio *bio) +static int zram_read_before_write(struct zram *zram, char *mem, u32 index) { - int i; - u32 index; - struct bio_vec *bvec; + int ret; + size_t clen = PAGE_SIZE; + struct zobj_header *zheader; + unsigned char *cmem; - zram_stat64_inc(zram, &zram->stats.num_writes); - index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; + if (zram_test_flag(zram, index, ZRAM_ZERO) || + !zram->table[index].page) { + memset(mem, 0, PAGE_SIZE); + return 0; + } - bio_for_each_segment(bvec, bio, i) { - int ret; - u32 offset; - size_t clen; - struct zobj_header *zheader; - struct page *page, *page_store; - unsigned char *user_mem, *cmem, *src; + cmem = kmap_atomic(zram->table[index].page) + + zram->table[index].offset; - page = bvec->bv_page; - src = zram->compress_buffer; + /* Page is stored uncompressed since it's incompressible */ + if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { + memcpy(mem, cmem, PAGE_SIZE); + kunmap_atomic(cmem); + return 0; + } - /* - * System overwrites unused sectors. Free memory associated - * with this sector now. - */ - if (zram->table[index].page || - zram_test_flag(zram, index, ZRAM_ZERO)) - zram_free_page(zram, index); - - mutex_lock(&zram->lock); - - user_mem = kmap_atomic(page, KM_USER0); - if (page_zero_filled(user_mem)) { - kunmap_atomic(user_mem, KM_USER0); - mutex_unlock(&zram->lock); - zram_stat_inc(&zram->stats.pages_zero); - zram_set_flag(zram, index, ZRAM_ZERO); - index++; - continue; - } + ret = DECOMPRESS(cmem + sizeof(*zheader), + xv_get_object_size(cmem) - sizeof(*zheader), + mem, &clen); + kunmap_atomic(cmem); + + /* Should NEVER happen. Return bio error if it does. */ + if (unlikely(ret)) { + pr_err("Decompression failed! err=%d, page=%u\n", ret, index); + zram_stat64_inc(zram, &zram->stats.failed_reads); + return ret; + } - ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen, - zram->compress_workmem); + return 0; +} - kunmap_atomic(user_mem, KM_USER0); +static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, + int offset) +{ + int ret; + u32 store_offset; + size_t clen; + struct zobj_header *zheader; + struct page *page, *page_store; + unsigned char *user_mem, *cmem, *src, *uncmem = NULL; - if (unlikely(ret != LZO_E_OK)) { - mutex_unlock(&zram->lock); - pr_err("Compression failed! err=%d\n", ret); - zram_stat64_inc(zram, &zram->stats.failed_writes); - goto out; - } + page = bvec->bv_page; + src = zram->compress_buffer; + if (is_partial_io(bvec)) { /* - * Page is incompressible. Store it as-is (uncompressed) - * since we do not want to return too many disk write - * errors which has side effect of hanging the system. + * This is a partial IO. We need to read the full page + * before to write the changes. */ - if (unlikely(clen > max_zpage_size)) { - clen = PAGE_SIZE; - page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM); - if (unlikely(!page_store)) { - mutex_unlock(&zram->lock); - pr_info("Error allocating memory for " - "incompressible page: %u\n", index); - zram_stat64_inc(zram, - &zram->stats.failed_writes); - goto out; - } - - offset = 0; - zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); - zram_stat_inc(&zram->stats.pages_expand); - zram->table[index].page = page_store; - src = kmap_atomic(page, KM_USER0); - goto memstore; + uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!uncmem) { + pr_info("Error allocating temp memory!\n"); + ret = -ENOMEM; + goto out; + } + ret = zram_read_before_write(zram, uncmem, index); + if (ret) { + kfree(uncmem); + goto out; } + } + + /* + * System overwrites unused sectors. Free memory associated + * with this sector now. + */ + if (zram->table[index].page || + zram_test_flag(zram, index, ZRAM_ZERO)) + zram_free_page(zram, index); + + user_mem = kmap_atomic(page); + + if (is_partial_io(bvec)) + memcpy(uncmem + offset, user_mem + bvec->bv_offset, + bvec->bv_len); + else + uncmem = user_mem; + + if (page_zero_filled(uncmem)) { + kunmap_atomic(user_mem); + if (is_partial_io(bvec)) + kfree(uncmem); + zram_stat_inc(&zram->stats.pages_zero); + zram_set_flag(zram, index, ZRAM_ZERO); + ret = 0; + goto out; + } + + COMPRESS(uncmem, PAGE_SIZE, src, &clen, + zram->compress_workmem); + ret = 0; - if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader), - &zram->table[index].page, &offset, - GFP_NOIO | __GFP_HIGHMEM)) { - mutex_unlock(&zram->lock); - pr_info("Error allocating memory for compressed " - "page: %u, size=%zu\n", index, clen); - zram_stat64_inc(zram, &zram->stats.failed_writes); + kunmap_atomic(user_mem); + if (is_partial_io(bvec)) + kfree(uncmem); + + if (unlikely(ret != 0)) { + pr_err("Compression failed! err=%d\n", ret); + goto out; + } + + /* + * Page is incompressible. Store it as-is (uncompressed) + * since we do not want to return too many disk write + * errors which has side effect of hanging the system. + */ + if (unlikely(clen > max_zpage_size)) { + clen = PAGE_SIZE; + page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM); + if (unlikely(!page_store)) { + pr_info("Error allocating memory for " + "incompressible page: %u\n", index); + ret = -ENOMEM; goto out; } + store_offset = 0; + zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); + zram_stat_inc(&zram->stats.pages_expand); + zram->table[index].page = page_store; + src = kmap_atomic(page); + goto memstore; + } + + if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader), + &zram->table[index].page, &store_offset, + GFP_NOIO | __GFP_HIGHMEM)) { + pr_info("Error allocating memory for compressed " + "page: %u, size=%zu\n", index, clen); + ret = -ENOMEM; + goto out; + } + memstore: - zram->table[index].offset = offset; + zram->table[index].offset = store_offset; - cmem = kmap_atomic(zram->table[index].page, KM_USER1) + - zram->table[index].offset; + cmem = kmap_atomic(zram->table[index].page) + + zram->table[index].offset; #if 0 - /* Back-reference needed for memory defragmentation */ - if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) { - zheader = (struct zobj_header *)cmem; - zheader->table_idx = index; - cmem += sizeof(*zheader); - } + /* Back-reference needed for memory defragmentation */ + if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) { + zheader = (struct zobj_header *)cmem; + zheader->table_idx = index; + cmem += sizeof(*zheader); + } #endif - memcpy(cmem, src, clen); + memcpy(cmem, src, clen); - kunmap_atomic(cmem, KM_USER1); - if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) - kunmap_atomic(src, KM_USER0); + kunmap_atomic(cmem); + if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) + kunmap_atomic(src); + + /* Update stats */ + zram_stat64_add(zram, &zram->stats.compr_size, clen); + zram_stat_inc(&zram->stats.pages_stored); + if (clen <= PAGE_SIZE / 2) + zram_stat_inc(&zram->stats.good_compress); + + return 0; + +out: + if (ret) + zram_stat64_inc(zram, &zram->stats.failed_writes); + return ret; +} + +static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, + int offset, struct bio *bio, int rw) +{ + int ret; + + if (rw == READ) { + down_read(&zram->lock); + ret = zram_bvec_read(zram, bvec, index, offset, bio); + up_read(&zram->lock); + } else { + down_write(&zram->lock); + ret = zram_bvec_write(zram, bvec, index, offset); + up_write(&zram->lock); + } + + return ret; +} + +static void update_position(u32 *index, int *offset, struct bio_vec *bvec) +{ + if (*offset + bvec->bv_len >= PAGE_SIZE) + (*index)++; + *offset = (*offset + bvec->bv_len) % PAGE_SIZE; +} - /* Update stats */ - zram_stat64_add(zram, &zram->stats.compr_size, clen); - zram_stat_inc(&zram->stats.pages_stored); - if (clen <= PAGE_SIZE / 2) - zram_stat_inc(&zram->stats.good_compress); +static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) +{ + int i, offset; + u32 index; + struct bio_vec *bvec; - mutex_unlock(&zram->lock); - index++; + switch (rw) { + case READ: + zram_stat64_inc(zram, &zram->stats.num_reads); + break; + case WRITE: + zram_stat64_inc(zram, &zram->stats.num_writes); + break; + } + + index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; + offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + + bio_for_each_segment(bvec, bio, i) { + int max_transfer_size = PAGE_SIZE - offset; + + if (bvec->bv_len > max_transfer_size) { + /* + * zram_bvec_rw() can only make operation on a single + * zram page. Split the bio vector. + */ + struct bio_vec bv; + + bv.bv_page = bvec->bv_page; + bv.bv_len = max_transfer_size; + bv.bv_offset = bvec->bv_offset; + + if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) + goto out; + + bv.bv_len = bvec->bv_len - max_transfer_size; + bv.bv_offset += max_transfer_size; + if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) + goto out; + } else + if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) + < 0) + goto out; + + update_position(&index, &offset, bvec); } set_bit(BIO_UPTODATE, &bio->bi_flags); @@ -406,14 +569,14 @@ static void zram_write(struct zram *zram, struct bio *bio) } /* - * Check if request is within bounds and page aligned. + * Check if request is within bounds and aligned on zram logical blocks. */ static inline int valid_io_request(struct zram *zram, struct bio *bio) { if (unlikely( (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) || - (bio->bi_sector & (SECTORS_PER_PAGE - 1)) || - (bio->bi_size & (PAGE_SIZE - 1)))) { + (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) || + (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) { return 0; } @@ -429,35 +592,34 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio) { struct zram *zram = queue->queuedata; + if (unlikely(!zram->init_done) && zram_init_device(zram)) + goto error; + + down_read(&zram->init_lock); + if (unlikely(!zram->init_done)) + goto error_unlock; + if (!valid_io_request(zram, bio)) { zram_stat64_inc(zram, &zram->stats.invalid_io); - bio_io_error(bio); - return 0; + goto error_unlock; } - if (unlikely(!zram->init_done) && zram_init_device(zram)) { - bio_io_error(bio); - return 0; - } + __zram_make_request(zram, bio, bio_data_dir(bio)); + up_read(&zram->init_lock); - switch (bio_data_dir(bio)) { - case READ: - zram_read(zram, bio); - break; - - case WRITE: - zram_write(zram, bio); - break; - } + return 0; +error_unlock: + up_read(&zram->init_lock); +error: + bio_io_error(bio); return 0; } -void zram_reset_device(struct zram *zram) +void __zram_reset_device(struct zram *zram) { size_t index; - mutex_lock(&zram->init_lock); zram->init_done = 0; /* Free various per-device buffers */ @@ -493,8 +655,14 @@ void zram_reset_device(struct zram *zram) /* Reset stats */ memset(&zram->stats, 0, sizeof(zram->stats)); - zram->disksize = 0; - mutex_unlock(&zram->init_lock); + zram_set_disksize(zram, zram_default_disksize_bytes()); +} + +void zram_reset_device(struct zram *zram) +{ + down_write(&zram->init_lock); + __zram_reset_device(zram); + up_write(&zram->init_lock); } int zram_init_device(struct zram *zram) @@ -502,41 +670,36 @@ int zram_init_device(struct zram *zram) int ret; size_t num_pages; - mutex_lock(&zram->init_lock); + down_write(&zram->init_lock); if (zram->init_done) { - mutex_unlock(&zram->init_lock); + up_write(&zram->init_lock); return 0; } - zram_set_disksize(zram, totalram_pages << PAGE_SHIFT); - - zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); + zram->compress_workmem = kzalloc(WMSIZE, GFP_KERNEL); if (!zram->compress_workmem) { pr_err("Error allocating compressor working memory!\n"); ret = -ENOMEM; - goto fail; + goto fail_no_table; } - zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1); + zram->compress_buffer = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); if (!zram->compress_buffer) { pr_err("Error allocating compressor buffer space\n"); ret = -ENOMEM; - goto fail; + goto fail_no_table; } num_pages = zram->disksize >> PAGE_SHIFT; zram->table = vzalloc(num_pages * sizeof(*zram->table)); if (!zram->table) { pr_err("Error allocating zram address table\n"); - /* To prevent accessing table entries during cleanup */ - zram->disksize = 0; ret = -ENOMEM; - goto fail; + goto fail_no_table; } - set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); - /* zram devices sort of resembles non-rotational disks */ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); @@ -548,20 +711,23 @@ int zram_init_device(struct zram *zram) } zram->init_done = 1; - mutex_unlock(&zram->init_lock); + up_write(&zram->init_lock); pr_debug("Initialization done!\n"); return 0; +fail_no_table: + /* To prevent accessing table entries during cleanup */ + zram->disksize = 0; fail: - mutex_unlock(&zram->init_lock); - zram_reset_device(zram); - + __zram_reset_device(zram); + up_write(&zram->init_lock); pr_err("Initialization failed: err=%d\n", ret); return ret; } -void zram_slot_free_notify(struct block_device *bdev, unsigned long index) +static void zram_slot_free_notify(struct block_device *bdev, + unsigned long index) { struct zram *zram; @@ -579,8 +745,8 @@ static int create_device(struct zram *zram, int device_id) { int ret = 0; - mutex_init(&zram->lock); - mutex_init(&zram->init_lock); + init_rwsem(&zram->lock); + init_rwsem(&zram->init_lock); spin_lock_init(&zram->stat64_lock); zram->queue = blk_alloc_queue(GFP_KERNEL); @@ -611,8 +777,12 @@ static int create_device(struct zram *zram, int device_id) zram->disk->private_data = zram; snprintf(zram->disk->disk_name, 16, "zram%d", device_id); - /* Actual capacity set using syfs (/sys/block/zram/disksize */ - set_capacity(zram->disk, 0); + /* + * Set some default disksize. To set another disksize, user + * must reset the device and then write a new disksize to + * corresponding device's sysfs node. + */ + zram_set_disksize(zram, zram_default_disksize_bytes()); /* * To ensure that we always get PAGE_SIZE aligned @@ -657,9 +827,17 @@ static int __init zram_init(void) { int ret, dev_id; - if (num_devices > max_num_devices) { + /* + * Module parameter not specified by user. Use default + * value as defined during kernel config. + */ + if (zram_num_devices == 0) { + zram_num_devices = CONFIG_ZRAM_NUM_DEVICES; + } + + if (zram_num_devices > max_num_devices) { pr_warning("Invalid value for num_devices: %u\n", - num_devices); + zram_num_devices); ret = -EINVAL; goto out; } @@ -671,21 +849,21 @@ static int __init zram_init(void) goto out; } - if (!num_devices) { + if (!zram_num_devices) { pr_info("num_devices not specified. Using default: 1\n"); - num_devices = 1; + zram_num_devices = 1; } /* Allocate the device array and initialize each one */ - pr_info("Creating %u devices ...\n", num_devices); - devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); - if (!devices) { + pr_info("Creating %u devices ...\n", zram_num_devices); + zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL); + if (!zram_devices) { ret = -ENOMEM; goto unregister; } - for (dev_id = 0; dev_id < num_devices; dev_id++) { - ret = create_device(&devices[dev_id], dev_id); + for (dev_id = 0; dev_id < zram_num_devices; dev_id++) { + ret = create_device(&zram_devices[dev_id], dev_id); if (ret) goto free_devices; } @@ -694,8 +872,8 @@ static int __init zram_init(void) free_devices: while (dev_id) - destroy_device(&devices[--dev_id]); - kfree(devices); + destroy_device(&zram_devices[--dev_id]); + kfree(zram_devices); unregister: unregister_blkdev(zram_major, "zram"); out: @@ -707,8 +885,8 @@ static void __exit zram_exit(void) int i; struct zram *zram; - for (i = 0; i < num_devices; i++) { - zram = &devices[i]; + for (i = 0; i < zram_num_devices; i++) { + zram = &zram_devices[i]; destroy_device(zram); if (zram->init_done) @@ -717,12 +895,12 @@ static void __exit zram_exit(void) unregister_blkdev(zram_major, "zram"); - kfree(devices); + kfree(zram_devices); pr_debug("Cleanup done!\n"); } -module_param(num_devices, uint, 0); -MODULE_PARM_DESC(num_devices, "Number of zram devices"); +module_param(zram_num_devices, uint, 0); +MODULE_PARM_DESC(zram_num_devices, "Number of zram devices"); module_init(zram_init); module_exit(zram_exit); diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h index 408b2c06..e5cd2469 100644 --- a/drivers/staging/zram/zram_drv.h +++ b/drivers/staging/zram/zram_drv.h @@ -47,7 +47,7 @@ static const unsigned default_disksize_perc_ram = 25; * Pages that compress to size greater than this are stored * uncompressed in memory. */ -static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3; +static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /* * NOTE: max_zpage_size must be less than or equal to: @@ -61,7 +61,10 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3; #define SECTOR_SIZE (1 << SECTOR_SHIFT) #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) -#define ZRAM_LOGICAL_BLOCK_SIZE 4096 +#define ZRAM_LOGICAL_BLOCK_SHIFT 12 +#define ZRAM_LOGICAL_BLOCK_SIZE (1 << ZRAM_LOGICAL_BLOCK_SHIFT) +#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \ + (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT)) /* Flags for zram pages (table[page_no].flags) */ enum zram_pageflags { @@ -104,13 +107,13 @@ struct zram { void *compress_buffer; struct table *table; spinlock_t stat64_lock; /* protect 64-bit stats */ - struct mutex lock; /* protect compression buffers against - * concurrent writes */ + struct rw_semaphore lock; /* protect compression buffers and table + * against concurrent read and writes */ struct request_queue *queue; struct gendisk *disk; int init_done; - /* Prevent concurrent execution of device init and reset */ - struct mutex init_lock; + /* Prevent concurrent execution of device init, reset and R/W request */ + struct rw_semaphore init_lock; /* * This is the limit on amount of *uncompressed* worth of data * we can store in a disk. @@ -120,13 +123,13 @@ struct zram { struct zram_stats stats; }; -extern struct zram *devices; -extern unsigned int num_devices; +extern struct zram *zram_devices; +extern unsigned int zram_num_devices; #ifdef CONFIG_SYSFS extern struct attribute_group zram_disk_attr_group; #endif extern int zram_init_device(struct zram *zram); -extern void zram_reset_device(struct zram *zram); +extern void __zram_reset_device(struct zram *zram); #endif diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c index a70cc010..0ea8ed29 100644 --- a/drivers/staging/zram/zram_sysfs.c +++ b/drivers/staging/zram/zram_sysfs.c @@ -34,8 +34,8 @@ static struct zram *dev_to_zram(struct device *dev) int i; struct zram *zram = NULL; - for (i = 0; i < num_devices; i++) { - zram = &devices[i]; + for (i = 0; i < zram_num_devices; i++) { + zram = &zram_devices[i]; if (disk_to_dev(zram->disk) == dev) break; } @@ -55,19 +55,23 @@ static ssize_t disksize_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; + u64 disksize; struct zram *zram = dev_to_zram(dev); + ret = strict_strtoull(buf, 10, &disksize); + if (ret) + return ret; + + down_write(&zram->init_lock); if (zram->init_done) { + up_write(&zram->init_lock); pr_info("Cannot change disksize for initialized device\n"); return -EBUSY; } - ret = strict_strtoull(buf, 10, &zram->disksize); - if (ret) - return ret; - - zram->disksize = PAGE_ALIGN(zram->disksize); + zram->disksize = PAGE_ALIGN(disksize); set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); + up_write(&zram->init_lock); return len; } @@ -106,8 +110,10 @@ static ssize_t reset_store(struct device *dev, if (bdev) fsync_bdev(bdev); + down_write(&zram->init_lock); if (zram->init_done) - zram_reset_device(zram); + __zram_reset_device(zram); + up_write(&zram->init_lock); return len; } From 3a12c26604e2567d825d9e6db441f5dddc56cd12 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Sat, 12 Jan 2013 14:39:51 -0500 Subject: [PATCH 003/117] Enable zRAM --- arch/arm/configs/vigor_aosp_defconfig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index f73dff59..7d3d3b16 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -2487,8 +2487,15 @@ CONFIG_ANDROID_EVENTS_LOG_SIZE=256 # CONFIG_LINE6_USB is not set # CONFIG_VT6656 is not set # CONFIG_IIO is not set +CONFIG_SNAPPY_COMPRESS=y +CONFIG_SNAPPY_DECOMPRESS=y # CONFIG_XVMALLOC is not set -# CONFIG_ZRAM is not set +CONFIG_ZRAM=y +CONFIG_ZRAM_NUM_DEVICES=1 +CONFIG_ZRAM_DEFAULT_DISKSIZE=100663296 +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_ZRAM_LZO is not set +CONFIG_ZRAM_SNAPPY=y # CONFIG_FB_SM7XX is not set CONFIG_MACH_NO_WESTBRIDGE=y # CONFIG_ATH6K_LEGACY is not set From abf392bda720d91f4a24abde0a199bb4c7107f20 Mon Sep 17 00:00:00 2001 From: Dennis Date: Thu, 30 Aug 2012 21:45:59 -0400 Subject: [PATCH 004/117] Add Force Fast Charge V2 - Coolexe. --- arch/arm/mach-msm/Kconfig | 7 + arch/arm/mach-msm/Makefile | 1 + arch/arm/mach-msm/fastchg.c | 257 +++++++++++++++++++++++++++ arch/arm/mach-msm/htc_battery_8x60.c | 22 +++ drivers/usb/otg/msm_otg.c | 39 +++- include/linux/fastchg.h | 54 ++++++ 6 files changed, 379 insertions(+), 1 deletion(-) create mode 100644 arch/arm/mach-msm/fastchg.c create mode 100644 include/linux/fastchg.h diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 53f8c9b0..5f5372af 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -2276,4 +2276,11 @@ config REMOVE_EBI1_FIXED_CLK help use for 2x Radio +config FORCE_FAST_CHARGE + bool "Force AC charge mode at will" + default y + help + A simple sysfs interface to force adapters that + are detected as USB to charge as AC. + endif diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index f7de361b..f90d9114 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -408,6 +408,7 @@ obj-$(CONFIG_MACH_HOLIDAY) += board-holiday-audio.o obj-$(CONFIG_MACH_HOLIDAY) += board-holiday-keypad.o obj-$(CONFIG_MACH_HOLIDAY) += htc_bdaddress.o htc_sleep_clk.o board-holiday-rfkill.o htc_fmtx_rfkill.o htc_util.o obj-$(CONFIG_MACH_HOLIDAY) += board-holiday-mmc.o board-holiday-wifi.o +obj-$(CONFIG_FORCE_FAST_CHARGE) += fastchg.o obj-$(CONFIG_MACH_VIGOR) += board-vigor.o mdm.o obj-$(CONFIG_MACH_VIGOR) += board-vigor-audio.o obj-$(CONFIG_MACH_VIGOR) += board-vigor-keypad.o diff --git a/arch/arm/mach-msm/fastchg.c b/arch/arm/mach-msm/fastchg.c new file mode 100644 index 00000000..489562ab --- /dev/null +++ b/arch/arm/mach-msm/fastchg.c @@ -0,0 +1,257 @@ +/* + * Author: Chad Froebel + * + * Ported to Sensation and extended : Jean-Pierre Rasquin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Possible values for "force_fast_charge" are : + * + * 0 - disabled (default) + * 1 - substitute AC to USB unconditional + * 2 - substitute AC to USB only if no USB peripheral is detected + * + * Possible values for "USB_peripheral_detected" are : + * + * 0 - No USB accessory currently attached (default) + * 1 - USB accessory currently attached + * + * Possible values for "USB_porttype_detected" are : + * + * 0 - invalid USB port + * 1 - standard downstream port + * 2 - dedicated charging port + * 3 - charging downstream port + * 4 - accessory charger adapter A + * 5 - accessory charger adapter B + * 6 - accessory charger adapter C + * 7 - accessory charger adapter dock + * 10 - nothing attached (default) + * + * Possible values for "is_fast_charge_forced" are : + * + * 0 - fast charging is currently not forced + * 1 - fast charging is currently forced + * + * Possible values for "current_charge_mode" are : + * + * 0 - Discharging + * 1 - AC + * 2 - USB + */ + +#include +#include +#include + +int force_fast_charge; +int USB_peripheral_detected; +int USB_porttype_detected; +int is_fast_charge_forced; +int current_charge_mode; + +/* sysfs interface for "force_fast_charge" */ +static ssize_t force_fast_charge_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ +return sprintf(buf, "%d\n", force_fast_charge); +} + +static ssize_t force_fast_charge_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + +int new_force_fast_charge; + +sscanf(buf, "%du", &new_force_fast_charge); + +if (new_force_fast_charge >= FAST_CHARGE_DISABLED && new_force_fast_charge <= FAST_CHARGE_FORCE_AC_IF_NO_USB) { + + /* update only if valid value provided */ + force_fast_charge = new_force_fast_charge; + +} + +return count; +} + +static struct kobj_attribute force_fast_charge_attribute = +__ATTR(force_fast_charge, 0666, force_fast_charge_show, force_fast_charge_store); + +static struct attribute *force_fast_charge_attrs[] = { +&force_fast_charge_attribute.attr, +NULL, +}; + +static struct attribute_group force_fast_charge_attr_group = { +.attrs = force_fast_charge_attrs, +}; + +/* sysfs interface for "USB_peripheral_detected" */ +static ssize_t USB_peripheral_detected_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + switch (USB_peripheral_detected) { + case USB_ACC_NOT_DETECTED: return sprintf(buf, "No\n"); + case USB_ACC_DETECTED: return sprintf(buf, "Yes\n"); + default: return sprintf(buf, "something went wrong\n"); + } +} + +static ssize_t USB_peripheral_detected_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ +/* no user change allowed */ +return count; +} + +static struct kobj_attribute USB_peripheral_detected_attribute = +__ATTR(USB_peripheral_detected, 0444, USB_peripheral_detected_show, USB_peripheral_detected_store); + +static struct attribute *USB_peripheral_detected_attrs[] = { +&USB_peripheral_detected_attribute.attr, +NULL, +}; + +static struct attribute_group USB_peripheral_detected_attr_group = { +.attrs = USB_peripheral_detected_attrs, +}; + +/* sysfs interface for "USB_porttype_detected" */ +static ssize_t USB_porttype_detected_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + switch (USB_porttype_detected) { + case USB_INVALID_DETECTED: return sprintf(buf, "Invalid Port\n"); + case USB_SDP_DETECTED: return sprintf(buf, "Standard Downstream Port\n"); + case USB_DCP_DETECTED: return sprintf(buf, "Dedicated Charging Port\n"); + case USB_CDP_DETECTED: return sprintf(buf, "Charging Downstream Port\n"); + case USB_ACA_A_DETECTED: return sprintf(buf, "Accessory Charger Adapter A\n"); + case USB_ACA_B_DETECTED: return sprintf(buf, "Accessory Charger Adapter B\n"); + case USB_ACA_C_DETECTED: return sprintf(buf, "Accessory Charger Adapter C\n"); + case USB_ACA_DOCK_DETECTED: return sprintf(buf, "Accessory Charger Adapter Dock\n"); + case NO_USB_DETECTED: return sprintf(buf, "No Port\n"); + default: return sprintf(buf, "something went wrong\n"); + } +} + +static ssize_t USB_porttype_detected_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ +/* no user change allowed */ +return count; +} + +static struct kobj_attribute USB_porttype_detected_attribute = +__ATTR(USB_porttype_detected, 0444, USB_porttype_detected_show, USB_porttype_detected_store); + +static struct attribute *USB_porttype_detected_attrs[] = { +&USB_porttype_detected_attribute.attr, +NULL, +}; + +static struct attribute_group USB_porttype_detected_attr_group = { +.attrs = USB_porttype_detected_attrs, +}; + +/* sysfs interface for "is_fast_charge_forced" */ +static ssize_t is_fast_charge_forced_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + switch (is_fast_charge_forced) { + case FAST_CHARGE_NOT_FORCED: return sprintf(buf, "No\n"); + case FAST_CHARGE_FORCED: return sprintf(buf, "Yes\n"); + default: return sprintf(buf, "something went wrong\n"); + } +} + +static ssize_t is_fast_charge_forced_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ +/* no user change allowed */ +return count; +} + +static struct kobj_attribute is_fast_charge_forced_attribute = +__ATTR(is_fast_charge_forced, 0444, is_fast_charge_forced_show, is_fast_charge_forced_store); + +static struct attribute *is_fast_charge_forced_attrs[] = { +&is_fast_charge_forced_attribute.attr, +NULL, +}; + +static struct attribute_group is_fast_charge_forced_attr_group = { +.attrs = is_fast_charge_forced_attrs, +}; + +/* sysfs interface for "current_charge_mode" */ +static ssize_t current_charge_mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + switch (current_charge_mode) { + case CURRENT_CHARGE_MODE_DISCHARGING: return sprintf(buf, "Discharging\n"); + case CURRENT_CHARGE_MODE_AC: return sprintf(buf, "AC\n"); + case CURRENT_CHARGE_MODE_USB: return sprintf(buf, "USB\n"); + default: return sprintf(buf, "something went wrong\n"); + } +} + +static ssize_t current_charge_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ +/* no user change allowed */ +return count; +} + +static struct kobj_attribute current_charge_mode_attribute = +__ATTR(current_charge_mode, 0444, current_charge_mode_show, current_charge_mode_store); + +static struct attribute *current_charge_mode_attrs[] = { +¤t_charge_mode_attribute.attr, +NULL, +}; + +static struct attribute_group current_charge_mode_attr_group = { +.attrs = current_charge_mode_attrs, +}; + +/* Initialize fast charge sysfs folder */ +static struct kobject *force_fast_charge_kobj; + +int force_fast_charge_init(void) +{ + int force_fast_charge_retval; + int USB_peripheral_detected_retval; + int USB_porttype_detected_retval; + int is_fast_charge_forced_retval; + int current_charge_mode_retval; + + force_fast_charge = FAST_CHARGE_DISABLED; /* Forced fast charge disabled by default */ + USB_peripheral_detected = USB_ACC_NOT_DETECTED; /* Consider no USB accessory detected so far */ + USB_porttype_detected = NO_USB_DETECTED; /* Consider no USB port is yet detected */ + is_fast_charge_forced = FAST_CHARGE_NOT_FORCED; /* Consider fast charge is not forced at start */ + current_charge_mode = CURRENT_CHARGE_MODE_DISCHARGING; /* Consider we are discharging at start */ + + force_fast_charge_kobj = kobject_create_and_add("fast_charge", kernel_kobj); + if (!force_fast_charge_kobj) { + return -ENOMEM; + } + force_fast_charge_retval = sysfs_create_group(force_fast_charge_kobj, &force_fast_charge_attr_group); + USB_peripheral_detected_retval = sysfs_create_group(force_fast_charge_kobj, &USB_peripheral_detected_attr_group); + USB_porttype_detected_retval = sysfs_create_group(force_fast_charge_kobj, &USB_porttype_detected_attr_group); + is_fast_charge_forced_retval = sysfs_create_group(force_fast_charge_kobj, &is_fast_charge_forced_attr_group); + current_charge_mode_retval = sysfs_create_group(force_fast_charge_kobj, ¤t_charge_mode_attr_group); + if (force_fast_charge_retval && USB_peripheral_detected_retval && USB_porttype_detected_retval && is_fast_charge_forced_retval && current_charge_mode_retval) + kobject_put(force_fast_charge_kobj); + return (force_fast_charge_retval && USB_peripheral_detected_retval && USB_porttype_detected_retval && is_fast_charge_forced_retval && current_charge_mode_retval); +} +/* end sysfs interface */ + +void force_fast_charge_exit(void) +{ + kobject_put(force_fast_charge_kobj); +} + +module_init(force_fast_charge_init); +module_exit(force_fast_charge_exit); + diff --git a/arch/arm/mach-msm/htc_battery_8x60.c b/arch/arm/mach-msm/htc_battery_8x60.c index 2af1094c..6105ff3c 100644 --- a/arch/arm/mach-msm/htc_battery_8x60.c +++ b/arch/arm/mach-msm/htc_battery_8x60.c @@ -37,6 +37,9 @@ #include #include #include +#ifdef CONFIG_FORCE_FAST_CHARGE +#include +#endif #include #define BATT_SUSPEND_CHECK_TIME 3600 @@ -315,9 +318,28 @@ static void cable_status_notifier_func(enum usb_connect_type online) switch (online) { case CONNECT_TYPE_USB: +#ifdef CONFIG_FORCE_FAST_CHARGE + /* If forced fast charge is enabled "always" or if no USB device detected, go AC */ + if ((force_fast_charge == FAST_CHARGE_FORCE_AC) || + (force_fast_charge == FAST_CHARGE_FORCE_AC_IF_NO_USB && + USB_peripheral_detected == USB_ACC_NOT_DETECTED )) { + BATT_LOG("cable USB forced to AC"); + is_fast_charge_forced = FAST_CHARGE_FORCED; + current_charge_mode = CURRENT_CHARGE_MODE_AC; + htc_batt_info.rep.charging_source = CHARGER_AC; + radio_set_cable_status(CHARGER_AC); + } else { + BATT_LOG("cable USB not forced to AC"); + is_fast_charge_forced = FAST_CHARGE_NOT_FORCED; + current_charge_mode = CURRENT_CHARGE_MODE_USB; + htc_batt_info.rep.charging_source = CHARGER_USB; + radio_set_cable_status(CHARGER_USB); + } +#else BATT_LOG("cable USB"); htc_batt_info.rep.charging_source = CHARGER_USB; radio_set_cable_status(CHARGER_USB); +#endif break; case CONNECT_TYPE_AC: BATT_LOG("cable AC"); diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c index dd2ae719..ef3d7c75 100644 --- a/drivers/usb/otg/msm_otg.c +++ b/drivers/usb/otg/msm_otg.c @@ -43,6 +43,10 @@ #include #include +#ifdef CONFIG_FORCE_FAST_CHARGE +#include +#endif + #define MSM_USB_BASE (motg->regs) #define DRIVER_NAME "msm_otg" @@ -60,6 +64,15 @@ static void send_usb_connect_notify(struct work_struct *w) motg->connect_type_ready = 1; USBH_INFO("send connect type %d\n", motg->connect_type); +#ifdef CONFIG_FORCE_FAST_CHARGE + if (motg->connect_type == CONNECT_TYPE_USB) { + USB_peripheral_detected = USB_ACC_DETECTED; /* Inform forced fast charge that a USB accessory has been attached */ + USBH_INFO("USB forced fast charge : USB device currently attached"); + } else { + USB_peripheral_detected = USB_ACC_NOT_DETECTED; /* Inform forced fast charge that a USB accessory has not been attached */ + USBH_INFO("USB forced fast charge : No USB device currently attached"); + } +#endif mutex_lock(¬ify_sem); list_for_each_entry(notifier, &g_lh_usb_notifier_list, notifier_link) { if (notifier->func != NULL) { @@ -685,7 +698,11 @@ static int msm_otg_reset(struct otg_transceiver *otg) u32 val = 0; u32 ulpi_val = 0; USBH_INFO("%s\n", __func__); - +#ifdef CONFIG_FORCE_FAST_CHARGE + USB_porttype_detected = NO_USB_DETECTED; /* No USB plugged, clear fast charge detected port value */ + is_fast_charge_forced = FAST_CHARGE_NOT_FORCED; /* No fast charge can be forced then... */ + current_charge_mode = CURRENT_CHARGE_MODE_DISCHARGING; /* ... and we are now on battery */ +#endif clk_enable(motg->clk); if (motg->pdata->phy_reset) ret = motg->pdata->phy_reset(); @@ -1756,6 +1773,26 @@ static void msm_chg_detect_work(struct work_struct *w) msm_chg_enable_aca_intr(motg); USBH_INFO("chg_type = %s\n", chg_to_string(motg->chg_type)); +#ifdef CONFIG_FORCE_FAST_CHARGE + switch (motg->chg_type) { + case USB_SDP_CHARGER: USB_porttype_detected = USB_SDP_DETECTED; + break; + case USB_DCP_CHARGER: USB_porttype_detected = USB_DCP_DETECTED; + break; + case USB_CDP_CHARGER: USB_porttype_detected = USB_CDP_DETECTED; + break; + case USB_ACA_A_CHARGER: USB_porttype_detected = USB_ACA_A_DETECTED; + break; + case USB_ACA_B_CHARGER: USB_porttype_detected = USB_ACA_B_DETECTED; + break; + case USB_ACA_C_CHARGER: USB_porttype_detected = USB_ACA_C_DETECTED; + break; + case USB_ACA_DOCK_CHARGER: USB_porttype_detected = USB_ACA_DOCK_DETECTED; + break; + default: USB_porttype_detected = USB_INVALID_DETECTED; + break; + } +#endif schedule_work(&motg->sm_work); queue_work(motg->usb_wq, &motg->notifier_work); diff --git a/include/linux/fastchg.h b/include/linux/fastchg.h new file mode 100644 index 00000000..ba8e68c1 --- /dev/null +++ b/include/linux/fastchg.h @@ -0,0 +1,54 @@ +/* + * Author: Chad Froebel + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#ifndef _LINUX_FASTCHG_H +#define _LINUX_FASTCHG_H + +extern int force_fast_charge; + +#define FAST_CHARGE_DISABLED 0 /* default */ +#define FAST_CHARGE_FORCE_AC 1 +#define FAST_CHARGE_FORCE_AC_IF_NO_USB 2 + +extern int USB_peripheral_detected; + +#define USB_ACC_NOT_DETECTED 0 /* default */ +#define USB_ACC_DETECTED 1 + +#define USB_INVALID_DETECTED 0 +#define USB_SDP_DETECTED 1 +#define USB_DCP_DETECTED 2 +#define USB_CDP_DETECTED 3 +#define USB_ACA_A_DETECTED 4 +#define USB_ACA_B_DETECTED 5 +#define USB_ACA_C_DETECTED 6 +#define USB_ACA_DOCK_DETECTED 7 +#define NO_USB_DETECTED 10 /* default */ + +extern int USB_porttype_detected; + +extern int is_fast_charge_forced; + +#define FAST_CHARGE_NOT_FORCED 0 +#define FAST_CHARGE_FORCED 1 + +extern int current_charge_mode; + +#define CURRENT_CHARGE_MODE_DISCHARGING 0 +#define CURRENT_CHARGE_MODE_AC 1 +#define CURRENT_CHARGE_MODE_USB 2 + +#endif + From b71d4b402a12a5d76ebf5f099eba472512cc006b Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Sat, 12 Jan 2013 20:52:10 -0500 Subject: [PATCH 005/117] Enable fast charge --- arch/arm/configs/vigor_aosp_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 7d3d3b16..bec3118a 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -475,6 +475,7 @@ CONFIG_MSM_QDSP6_APR=y CONFIG_MSM_SPM_V1=y # CONFIG_MSM_SPM_V2 is not set # CONFIG_FOOTPRINT_IRQ is not set +CONFIG_FORCE_FAST_CHARGE=y # # TI WLAN driver From 2a0b91e6ebb8f2544c81ee5639d4969a7606b102 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 14 Jan 2013 11:49:29 -0500 Subject: [PATCH 006/117] mpdecision updates --- arch/arm/configs/vigor_aosp_defconfig | 7 ++++- arch/arm/mach-msm/Kconfig | 30 ++++++++++++++++++ arch/arm/mach-msm/acpuclock-8x60.c | 44 ++++++++++++++++++++++++++- include/linux/cpufreq.h | 3 ++ 4 files changed, 82 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index bec3118a..2e73c46b 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -406,7 +406,12 @@ CONFIG_MSM_SDIO_SMEM=y CONFIG_MSM_DALRPC=y # CONFIG_MSM_DALRPC_TEST is not set CONFIG_MSM_MPDEC=y -# CONFIG_MSM_CPU_FREQ_SET_MIN_MAX is not set +CONFIG_CMDLINE_OPTIONS=y +CONFIG_MSM_MPDEC_ENABLED=y +# CONFIG_MSM_MPDEC_DISABLED is not set +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=1512000 +CONFIG_MSM_CPU_FREQ_MIN=192000 CONFIG_MSM_CPU_FREQ_ONDEMAND_MAX=384000 CONFIG_MSM_CPU_FREQ_ONDEMAND_MIN=245760 # CONFIG_MSM_AVS_HW is not set diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 5f5372af..391e017d 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -1529,6 +1529,36 @@ config MSM_MPDEC This enables kernel based multi core control. (up/down hotplug based on load) +config MSM_MPDEC + bool "Enable kernel based mpdecision" + depends on MSM_SMP + default n + help + This enables kernel based multi core control. + (up/down hotplug based on load) + +choice + prompt "Default MPdecision setting" + help + This option sets the MPdecision startup settings. + If in doubt, select 'enabled'. + +config MSM_MPDEC_ENABLED + bool "enabled" + depends on MSM_MPDEC + help + Build MPdecision into kernel with full control over + multi processor decisions by default. + +config MSM_MPDEC_DISABLED + bool "disabled" + depends on MSM_MPDEC + help + Build MPdecision into kernel with only control over + screen on/off multi processor decisions by default. + +endchoice + if CPU_FREQ_MSM config MSM_CPU_FREQ_SET_MIN_MAX diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c index f5855c15..d497774d 100644 --- a/arch/arm/mach-msm/acpuclock-8x60.c +++ b/arch/arm/mach-msm/acpuclock-8x60.c @@ -315,7 +315,7 @@ static struct clkctl_acpu_speed *acpu_freq_tbl; static struct clkctl_l2_speed *l2_freq_tbl = l2_freq_tbl_v2; static unsigned int l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_v2); -static unsigned long acpuclk_8x60_get_rate(int cpu) +unsigned long acpuclk_8x60_get_rate(int cpu) { return drv_state.current_speed[cpu]->acpuclk_khz; } @@ -870,6 +870,48 @@ static struct notifier_block __cpuinitdata acpuclock_cpu_notifier = { .notifier_call = acpuclock_cpu_callback, }; +#ifdef CONFIG_MSM_MPDEC +uint32_t acpu_check_khz_value(unsigned long khz) +{ + struct clkctl_acpu_speed *f; + + if (khz > 1512000) + return CONFIG_MSM_CPU_FREQ_MAX; + + if (khz < 192000) + return CONFIG_MSM_CPU_FREQ_MIN; + + for (f = acpu_freq_tbl_fast; f->acpuclk_khz != 0; f++) { + if (khz < 192000) { + if (f->acpuclk_khz == (khz*1000)) + return f->acpuclk_khz; + if ((khz*1000) > f->acpuclk_khz) { + f++; + if ((khz*1000) < f->acpuclk_khz) { + f--; + return f->acpuclk_khz; + } + f--; + } + } + if (f->acpuclk_khz == khz) { + return 1; + } + if (khz > f->acpuclk_khz) { + f++; + if (khz < f->acpuclk_khz) { + f--; + return f->acpuclk_khz; + } + f--; + } + } + + return 0; +} +EXPORT_SYMBOL(acpu_check_khz_value); +#endif + static unsigned int __init select_freq_plan(void) { uint32_t pte_efuse, speed_bin, pvs, max_khz; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 40cc1759..a2319eca 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -24,6 +24,9 @@ #define CPUFREQ_NAME_LEN 16 +#ifdef CONFIG_MSM_MPDEC +extern uint32_t acpu_check_khz_value(unsigned long khz); +#endif /********************************************************************* * CPUFREQ NOTIFIER INTERFACE * From a366e0cc875020bc3e84fbe0a087af0d1833bd00 Mon Sep 17 00:00:00 2001 From: showp1984 Date: Thu, 28 Jun 2012 05:13:58 +0200 Subject: [PATCH 007/117] mach-msm: mpdecision: don't turn off cpu1 if freq > idle This fixes cpu1 being turned off though it is still working with a high frequency. The idle_freq can be modified through sysfs. Signed-off-by: showp1984 Conflicts: arch/arm/mach-msm/msm_mpdecision.c --- arch/arm/mach-msm/msm_mpdecision.c | 37 ++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c index 1a5c3db7..ddfa4609 100644 --- a/arch/arm/mach-msm/msm_mpdecision.c +++ b/arch/arm/mach-msm/msm_mpdecision.c @@ -32,10 +32,11 @@ #include #include -#define MPDEC_TAG "[MPDEC]: " -#define MSM_MPDEC_STARTDELAY 40000 -#define MSM_MPDEC_DELAY 500 -#define MSM_MPDEC_PAUSE 10000 +#define MPDEC_TAG "[MPDEC]: " +#define MSM_MPDEC_STARTDELAY 40000 +#define MSM_MPDEC_DELAY 500 +#define MSM_MPDEC_PAUSE 10000 +#define MSM_MPDEC_IDLE_FREQ 486000 enum { MSM_MPDEC_DISABLED = 0, @@ -60,17 +61,21 @@ static struct msm_mpdec_tuners { unsigned int delay; unsigned int pause; bool scroff_single_core; + unsigned long int idle_freq; } msm_mpdec_tuners_ins = { .startdelay = MSM_MPDEC_STARTDELAY, .delay = MSM_MPDEC_DELAY, .pause = MSM_MPDEC_PAUSE, .scroff_single_core = true, + .idle_freq = MSM_MPDEC_IDLE_FREQ, }; static unsigned int NwNs_Threshold[4] = {20, 0, 0, 5}; static unsigned int TwTs_Threshold[4] = {250, 0, 0, 250}; extern unsigned int get_rq_info(void); +extern unsigned long acpuclk_8x60_get_rate(int); + unsigned int state = MSM_MPDEC_IDLE; bool was_paused = false; @@ -160,6 +165,9 @@ static void msm_mpdec_work_thread(struct work_struct *work) case MSM_MPDEC_DOWN: cpu = (CONFIG_NR_CPUS - 1); if (cpu < nr_cpu_ids) { + if (cpu_online(cpu)) + if (acpuclk_8x60_get_rate(cpu) > msm_mpdec_tuners_ins.idle_freq) + break; if ((per_cpu(msm_mpdec_cpudata, cpu).online == true) && (cpu_online(cpu))) { cpu_down(cpu); per_cpu(msm_mpdec_cpudata, cpu).online = false; @@ -261,6 +269,12 @@ show_one(delay, delay); show_one(pause, pause); show_one(scroff_single_core, scroff_single_core); +static ssize_t show_idle_freq (struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", msm_mpdec_tuners_ins.idle_freq); +} + static ssize_t show_enabled(struct kobject *a, struct attribute *b, char *buf) { @@ -346,6 +360,19 @@ static ssize_t store_pause(struct kobject *a, struct attribute *b, return count; } +static ssize_t store_idle_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + long unsigned int input; + int ret; + ret = sscanf(buf, "%lu", &input); + if (ret != 1) + return -EINVAL; + msm_mpdec_tuners_ins.idle_freq = acpu_check_khz_value(input); + + return count; +} + static ssize_t store_scroff_single_core(struct kobject *a, struct attribute *b, const char *buf, size_t count) { @@ -478,6 +505,7 @@ define_one_global_rw(startdelay); define_one_global_rw(delay); define_one_global_rw(pause); define_one_global_rw(scroff_single_core); +define_one_global_rw(idle_freq); define_one_global_rw(enabled); define_one_global_rw(nwns_threshold_up); define_one_global_rw(nwns_threshold_down); @@ -489,6 +517,7 @@ static struct attribute *msm_mpdec_attributes[] = { &delay.attr, &pause.attr, &scroff_single_core.attr, + &idle_freq.attr, &enabled.attr, &nwns_threshold_up.attr, &nwns_threshold_down.attr, From 3ba5c4007118af31fa5bcd69d6938e99a16c3ffd Mon Sep 17 00:00:00 2001 From: showp1984 Date: Thu, 28 Jun 2012 05:24:38 +0200 Subject: [PATCH 008/117] mach-msm: mpdecision: fixup: move idle block to mpdecision() This belongs in mp_decision() because this influences the decision mpdecision is about to make. Signed-off-by: showp1984 --- arch/arm/mach-msm/msm_mpdecision.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c index ddfa4609..5b238f2d 100644 --- a/arch/arm/mach-msm/msm_mpdecision.c +++ b/arch/arm/mach-msm/msm_mpdecision.c @@ -117,6 +117,10 @@ static int mp_decision(void) } else if (rq_depth <= NwNs_Threshold[index+1]) { if (total_time >= TwTs_Threshold[index+1] ) { new_state = MSM_MPDEC_DOWN; + if (cpu_online((CONFIG_NR_CPUS - 1))) + if (acpuclk_8x60_get_rate((CONFIG_NR_CPUS - 1)) > + msm_mpdec_tuners_ins.idle_freq) + new_state = MSM_MPDEC_IDLE; } } else { new_state = MSM_MPDEC_IDLE; @@ -165,9 +169,6 @@ static void msm_mpdec_work_thread(struct work_struct *work) case MSM_MPDEC_DOWN: cpu = (CONFIG_NR_CPUS - 1); if (cpu < nr_cpu_ids) { - if (cpu_online(cpu)) - if (acpuclk_8x60_get_rate(cpu) > msm_mpdec_tuners_ins.idle_freq) - break; if ((per_cpu(msm_mpdec_cpudata, cpu).online == true) && (cpu_online(cpu))) { cpu_down(cpu); per_cpu(msm_mpdec_cpudata, cpu).online = false; From 4a841d65860122c351cca2f90adec6ad0fc6352a Mon Sep 17 00:00:00 2001 From: showp1984 Date: Mon, 14 Jan 2013 21:05:43 -0500 Subject: [PATCH 009/117] mpdecision: don't turn on cpu1 when cpu0 is below idle_freq --- arch/arm/mach-msm/msm_mpdecision.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c index 5b238f2d..8cd7c5a0 100644 --- a/arch/arm/mach-msm/msm_mpdecision.c +++ b/arch/arm/mach-msm/msm_mpdecision.c @@ -113,6 +113,9 @@ static int mp_decision(void) if ((nr_cpu_online < 2) && (rq_depth >= NwNs_Threshold[index])) { if (total_time >= TwTs_Threshold[index]) { new_state = MSM_MPDEC_UP; + if (acpuclk_8x60_get_rate((CONFIG_NR_CPUS - 2)) <= + msm_mpdec_tuners_ins.idle_freq) + new_state = MSM_MPDEC_IDLE; } } else if (rq_depth <= NwNs_Threshold[index+1]) { if (total_time >= TwTs_Threshold[index+1] ) { From 3ef159f56c5c6efc6bc5c5c4ed0b5235eae24b18 Mon Sep 17 00:00:00 2001 From: Chad Froebel Date: Wed, 18 Jan 2012 23:04:54 -0500 Subject: [PATCH 010/117] SVS sysfs interface Conflicts: arch/arm/mach-msm/Kconfig --- arch/arm/configs/vigor_aosp_defconfig | 1 + arch/arm/configs/vigor_defconfig | 3 +- arch/arm/mach-msm/Kconfig | 4 ++ arch/arm/mach-msm/acpuclock-8x60.c | 48 ++++++++++++++- drivers/cpufreq/cpufreq.c | 85 ++++++++++++++++++++++++++- 5 files changed, 137 insertions(+), 4 deletions(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 2e73c46b..337d9da8 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -627,6 +627,7 @@ CONFIG_CMDLINE="" CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_TABLE=y CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_VOLTAGE_TABLE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set diff --git a/arch/arm/configs/vigor_defconfig b/arch/arm/configs/vigor_defconfig index 7da7c7f2..9dd9ea8e 100644 --- a/arch/arm/configs/vigor_defconfig +++ b/arch/arm/configs/vigor_defconfig @@ -574,6 +574,8 @@ CONFIG_CMDLINE="" CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_TABLE=y # CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_VOLTAGE_TABLE=y # CONFIG_CPU_FREQ_STAT_DETAILS is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set @@ -591,7 +593,6 @@ CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y CONFIG_CPU_FREQ_MSM=y - CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE=y CONFIG_PERFLOCK=y diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 391e017d..c120a12c 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -1557,6 +1557,10 @@ config MSM_MPDEC_DISABLED Build MPdecision into kernel with only control over screen on/off multi processor decisions by default. +config CPU_VOLTAGE_TABLE + bool "Enable CPU Voltage Table via sysfs for adjustements" + default n + endchoice if CPU_FREQ_MSM diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c index d497774d..a359e198 100644 --- a/arch/arm/mach-msm/acpuclock-8x60.c +++ b/arch/arm/mach-msm/acpuclock-8x60.c @@ -45,7 +45,8 @@ #define L_VAL_SCPLL_CAL_MIN 0x08 /* = 432 MHz with 27MHz source */ #define L_VAL_SCPLL_CAL_MAX 0x1C /* = 1512 MHz with 27MHz source */ -#define MAX_VDD_SC 1250000 /* uV */ +#define MAX_VDD_SC 1600000 /* uV */ +#define MIN_VDD_SC 800000 /* uV */ #define MAX_VDD_MEM 1250000 /* uV */ #define MAX_VDD_DIG 1200000 /* uV */ #define MAX_AXI 310500 /* KHz */ @@ -668,7 +669,6 @@ static int acpuclk_8x60_set_rate(int cpu, unsigned long rate, mutex_unlock(&drv_state.lock); return rc; } - #ifdef CONFIG_PERFLOCK unsigned int get_max_cpu_freq(void) { @@ -679,6 +679,50 @@ unsigned int get_max_cpu_freq(void) return f->acpuclk_khz;; } #endif +#ifdef CONFIG_CPU_VOLTAGE_TABLE + +ssize_t acpuclk_get_vdd_levels_str(char *buf) { + + int i, len = 0; + + if (buf) { + mutex_lock(&drv_state.lock); + + for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) { + /* updated to use uv required by 8x60 architecture - faux123 */ + len += sprintf(buf + len, "%8u: %8d\n", acpu_freq_tbl[i].acpuclk_khz, acpu_freq_tbl[i].vdd_sc ); + } + + mutex_unlock(&drv_state.lock); + } + return len; +} + +/* updated to use uv required by 8x60 architecture - faux123 */ +void acpuclk_set_vdd(unsigned int khz, int vdd_uv) { + + int i; + unsigned int new_vdd_uv; +// int vdd_uv; + +// vdd_uv = vdd_mv * 1000; + + mutex_lock(&drv_state.lock); + + for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) { + if (khz == 0) + new_vdd_uv = min(max((acpu_freq_tbl[i].vdd_sc + vdd_uv), (unsigned int)MIN_VDD_SC), (unsigned int)MAX_VDD_SC); + else if ( acpu_freq_tbl[i].acpuclk_khz == khz) + new_vdd_uv = min(max((unsigned int)vdd_uv, (unsigned int)MIN_VDD_SC), (unsigned int)MAX_VDD_SC); + else + continue; + + acpu_freq_tbl[i].vdd_sc = new_vdd_uv; + } + + mutex_unlock(&drv_state.lock); +} +#endif /* CONFIG_CPU_VOTALGE_TABLE */ static void __init scpll_init(int sc_pll) { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 124da3bb..856662c7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -575,6 +575,68 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); } +#ifdef CONFIG_CPU_VOLTAGE_TABLE + +extern ssize_t acpuclk_get_vdd_levels_str(char *buf); +extern void acpuclk_set_vdd(unsigned acpu_khz, int vdd); + +static ssize_t show_vdd_levels(struct kobject *a, struct attribute *b, char *buf) { + return acpuclk_get_vdd_levels_str(buf); +} + +static ssize_t store_vdd_levels(struct kobject *a, struct attribute *b, const char *buf, size_t count) { + + int i = 0, j; + int pair[2] = { 0, 0 }; + int sign = 0; + + if (count < 1) + return 0; + + if (buf[0] == '-') { + sign = -1; + i++; + } + else if (buf[0] == '+') { + sign = 1; + i++; + } + + for (j = 0; i < count; i++) { + + char c = buf[i]; + + if ((c >= '0') && (c <= '9')) { + pair[j] *= 10; + pair[j] += (c - '0'); + } + else if ((c == ' ') || (c == '\t')) { + if (pair[j] != 0) { + j++; + + if ((sign != 0) || (j > 1)) + break; + } + } + else + break; + } + + if (sign != 0) { + if (pair[0] > 0) + acpuclk_set_vdd(0, sign * pair[0]); + } + else { + if ((pair[0] > 0) && (pair[1] > 0)) + acpuclk_set_vdd((unsigned)pair[0], pair[1]); + else + return -EINVAL; + } + return count; +} + +#endif /* CONFIG_CPU_VOLTAGE_TABLE */ + cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); cpufreq_freq_attr_ro(cpuinfo_min_freq); cpufreq_freq_attr_ro(cpuinfo_max_freq); @@ -590,6 +652,10 @@ cpufreq_freq_attr_rw(scaling_max_freq); cpufreq_freq_attr_rw(scaling_governor); cpufreq_freq_attr_rw(scaling_setspeed); +#ifdef CONFIG_CPU_VOLTAGE_TABLE +define_one_global_rw(vdd_levels); +#endif + static struct attribute *default_attrs[] = { &cpuinfo_min_freq.attr, &cpuinfo_max_freq.attr, @@ -605,6 +671,18 @@ static struct attribute *default_attrs[] = { NULL }; +#ifdef CONFIG_CPU_VOLTAGE_TABLE +static struct attribute *vddtbl_attrs[] = { + &vdd_levels.attr, + NULL +}; + +static struct attribute_group vddtbl_attr_group = { + .attrs = vddtbl_attrs, + .name = "vdd_table", +}; +#endif /* CONFIG_CPU_VOLTAGE_TABLE */ + struct kobject *cpufreq_global_kobject; EXPORT_SYMBOL(cpufreq_global_kobject); @@ -1908,7 +1986,7 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); static int __init cpufreq_core_init(void) { - int cpu; + int cpu, rc; for_each_possible_cpu(cpu) { per_cpu(cpufreq_policy_cpu, cpu) = -1; @@ -1920,6 +1998,11 @@ static int __init cpufreq_core_init(void) BUG_ON(!cpufreq_global_kobject); register_syscore_ops(&cpufreq_syscore_ops); +#ifdef CONFIG_CPU_VOLTAGE_TABLE + rc = sysfs_create_group(cpufreq_global_kobject, &vddtbl_attr_group); +#endif /* CONFIG_CPU_VOLTAGE_TABLE */ + return 0; } core_initcall(cpufreq_core_init); + From 7549db7206d12f1076792ee86e8746833e0cf8ee Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 14 Jan 2013 12:33:31 -0500 Subject: [PATCH 011/117] Lower minimum and maximum voltages for undervolting --- arch/arm/mach-msm/acpuclock-8x60.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c index a359e198..11d4916e 100644 --- a/arch/arm/mach-msm/acpuclock-8x60.c +++ b/arch/arm/mach-msm/acpuclock-8x60.c @@ -45,8 +45,8 @@ #define L_VAL_SCPLL_CAL_MIN 0x08 /* = 432 MHz with 27MHz source */ #define L_VAL_SCPLL_CAL_MAX 0x1C /* = 1512 MHz with 27MHz source */ -#define MAX_VDD_SC 1600000 /* uV */ -#define MIN_VDD_SC 800000 /* uV */ +#define MAX_VDD_SC 1300000 /* uV */ +#define MIN_VDD_SC 600000 /* uV */ #define MAX_VDD_MEM 1250000 /* uV */ #define MAX_VDD_DIG 1200000 /* uV */ #define MAX_AXI 310500 /* KHz */ From c8e92f9c4aad128ff97fecc07ac774df53b9709e Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 14 Jan 2013 14:29:35 -0500 Subject: [PATCH 012/117] Update version --- scripts/mkcompile_h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 1579ceaa..6e308e6d 100644 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -74,7 +74,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" echo \#define LINUX_COMPILE_BY \"`echo shrike1978`\" - echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD_KERNEL`\" + echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD_13.01.14`\" echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\" ) > .tmpcompile From dda39c04ded7c315fb2d454071100393da1af948 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 14 Jan 2013 14:54:21 -0500 Subject: [PATCH 013/117] Fix config oops --- arch/arm/mach-msm/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index c120a12c..38f3102e 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -1557,12 +1557,12 @@ config MSM_MPDEC_DISABLED Build MPdecision into kernel with only control over screen on/off multi processor decisions by default. +endchoice + config CPU_VOLTAGE_TABLE bool "Enable CPU Voltage Table via sysfs for adjustements" default n -endchoice - if CPU_FREQ_MSM config MSM_CPU_FREQ_SET_MIN_MAX From 8243ce42b3153403df6c8054eeaf08db2594846d Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 15 Jan 2013 06:58:32 -0500 Subject: [PATCH 014/117] mpdecision: modify idle_freq decision points to improve efficiency In the idle case, the new_state assignment would happen twice. This moves both assignments inside the decision so the assignment only happens once. --- arch/arm/mach-msm/msm_mpdecision.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c index 8cd7c5a0..e761de0f 100644 --- a/arch/arm/mach-msm/msm_mpdecision.c +++ b/arch/arm/mach-msm/msm_mpdecision.c @@ -112,18 +112,25 @@ static int mp_decision(void) index = (nr_cpu_online - 1) * 2; if ((nr_cpu_online < 2) && (rq_depth >= NwNs_Threshold[index])) { if (total_time >= TwTs_Threshold[index]) { - new_state = MSM_MPDEC_UP; if (acpuclk_8x60_get_rate((CONFIG_NR_CPUS - 2)) <= - msm_mpdec_tuners_ins.idle_freq) + msm_mpdec_tuners_ins.idle_freq) { new_state = MSM_MPDEC_IDLE; + } + else { + new_state = MSM_MPDEC_UP; + } } } else if (rq_depth <= NwNs_Threshold[index+1]) { if (total_time >= TwTs_Threshold[index+1] ) { - new_state = MSM_MPDEC_DOWN; - if (cpu_online((CONFIG_NR_CPUS - 1))) - if (acpuclk_8x60_get_rate((CONFIG_NR_CPUS - 1)) > - msm_mpdec_tuners_ins.idle_freq) + if (cpu_online((CONFIG_NR_CPUS - 1))) { + if (acpuclk_8x60_get_rate((CONFIG_NR_CPUS - 1)) > + msm_mpdec_tuners_ins.idle_freq) { new_state = MSM_MPDEC_IDLE; + } + else { + new_state = MSM_MPDEC_DOWN; + } + } } } else { new_state = MSM_MPDEC_IDLE; From b0c630aa835e7790411c62cd43e4fbe995c9eb64 Mon Sep 17 00:00:00 2001 From: Tanya Brokhman Date: Tue, 11 Dec 2012 15:41:19 +0200 Subject: [PATCH 015/117] block: Adding ROW scheduling algorithm Signed-off-by: Tatyana Brokhman Conflicts: block/Makefile Conflicts: block/Makefile --- Documentation/block/row-iosched.txt | 134 ++++++ block/Kconfig.iosched | 21 + block/Makefile | 2 + block/row-iosched.c | 694 ++++++++++++++++++++++++++++ 4 files changed, 851 insertions(+) create mode 100644 Documentation/block/row-iosched.txt create mode 100644 block/row-iosched.c diff --git a/Documentation/block/row-iosched.txt b/Documentation/block/row-iosched.txt new file mode 100644 index 00000000..0d794eee --- /dev/null +++ b/Documentation/block/row-iosched.txt @@ -0,0 +1,134 @@ +Introduction +============ + +The ROW scheduling algorithm will be used in mobile devices as default +block layer IO scheduling algorithm. ROW stands for "READ Over WRITE" +which is the main requests dispatch policy of this algorithm. + +The ROW IO scheduler was developed with the mobile devices needs in +mind. In mobile devices we favor user experience upon everything else, +thus we want to give READ IO requests as much priority as possible. +The main idea of the ROW scheduling policy is just that: +- If there are READ requests in pipe - dispatch them, while write +starvation is considered. + +Software description +==================== +The elevator defines a registering mechanism for different IO scheduler +to implement. This makes implementing a new algorithm quite straight +forward and requires almost no changes to block/elevator framework. A +new IO scheduler just has to implement a set of callback functions +defined by the elevator. +These callbacks cover all the required IO operations such as +adding/removing request to/from the scheduler, merging two requests, +dispatching a request etc. + +Design +====== + +The requests are kept in queues according to their priority. The +dispatching of requests is done in a Round Robin manner with a +different slice for each queue. The dispatch quantum for a specific +queue is set according to the queues priority. READ queues are +given bigger dispatch quantum than the WRITE queues, within a dispatch +cycle. + +At the moment there are 6 types of queues the requests are +distributed to: +- High priority READ queue +- High priority Synchronous WRITE queue +- Regular priority READ queue +- Regular priority Synchronous WRITE queue +- Regular priority WRITE queue +- Low priority READ queue + +The marking of request as high/low priority will be done by the +application adding the request and not the scheduler. See TODO section. +If the request is not marked in any way (high/low) the scheduler +assigns it to one of the regular priority queues: +read/write/sync write. + +If in a certain dispatch cycle one of the queues was empty and didn't +use its quantum that queue will be marked as "un-served". If we're in +a middle of a dispatch cycle dispatching from queue Y and a request +arrives for queue X that was un-served in the previous cycle, if X's +priority is higher than Y's, queue X will be preempted in the favor of +queue Y. + +For READ request queues ROW IO scheduler allows idling within a +dispatch quantum in order to give the application a chance to insert +more requests. Idling means adding some extra time for serving a +certain queue even if the queue is empty. The idling is enabled if +the ROW IO scheduler identifies the application is inserting requests +in a high frequency. +Not all queues can idle. ROW scheduler exposes an enablement struct +for idling. +For idling on READ queues, the ROW IO scheduler uses timer mechanism. +When the timer expires we schedule a delayed work that will signal the +device driver to fetch another request for dispatch. + +ROW scheduler will support additional services for block devices that +supports Urgent Requests. That is, the scheduler may inform the +device driver upon urgent requests using a newly defined callback. +In addition it will support rescheduling of requests that were +interrupted. For example if the device driver issues a long write +request and a sudden urgent request is received by the scheduler. +The scheduler will inform the device driver about the urgent request, +so the device driver can stop the current write request and serve the +urgent request. In such a case the device driver may also insert back +to the scheduler the remainder of the interrupted write request, such +that the scheduler may continue sending urgent requests without the +need to interrupt the ongoing write again and again. The write +remainder will be sent later on according to the scheduler policy. + +SMP/multi-core +============== +At the moment the code is accessed from 2 contexts: +- Application context (from block/elevator layer): adding the requests. +- device driver thread: dispatching the requests and notifying on + completion. + +One lock is used to synchronize between the two. This lock is provided +by the block device driver along with the dispatch queue. + +Config options +============== +1. hp_read_quantum: dispatch quantum for the high priority READ queue + (default is 100 requests) +2. rp_read_quantum: dispatch quantum for the regular priority READ + queue (default is 100 requests) +3. hp_swrite_quantum: dispatch quantum for the high priority + Synchronous WRITE queue (default is 2 requests) +4. rp_swrite_quantum: dispatch quantum for the regular priority + Synchronous WRITE queue (default is 1 requests) +5. rp_write_quantum: dispatch quantum for the regular priority WRITE + queue (default is 1 requests) +6. lp_read_quantum: dispatch quantum for the low priority READ queue + (default is 1 requests) +7. lp_swrite_quantum: dispatch quantum for the low priority Synchronous + WRITE queue (default is 1 requests) +8. read_idle: how long to idle on read queue in Msec (in case idling + is enabled on that queue). (default is 5 Msec) +9. read_idle_freq: frequency of inserting READ requests that will + trigger idling. This is the time in Msec between inserting two READ + requests. (default is 8 Msec) + +Note: Dispatch quantum is number of requests that will be dispatched +from a certain queue in a dispatch cycle. + +To do +===== +The ROW algorithm takes the scheduling policy one step further, making +it a bit more "user-needs oriented", by allowing the application to +hint on the urgency of its requests. For example: even among the READ +requests several requests may be more urgent for completion than other. +The former will go to the High priority READ queue, that is given the +bigger dispatch quantum than any other queue. + +Still need to design the way applications will "hint" on the urgency of +their requests. May be done by ioctl(). We need to look into concrete +use-cases in order to determine the best solution for this. +This will be implemented as a second phase. + +Design and implement additional services for block devices that +supports High Priority Requests. diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 3199b76f..19c873ea 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -21,6 +21,16 @@ config IOSCHED_DEADLINE a new point in the service tree and doing a batch of IO from there in case of expiry. +config IOSCHED_ROW + tristate "ROW I/O scheduler" + ---help--- + The ROW I/O scheduler gives priority to READ requests over the + WRITE requests when dispatching, without starving WRITE requests. + Requests are kept in priority queues. Dispatching is done in a RR + manner when the dispatch quantum for each queue is calculated + according to queue priority. + Most suitable for mobile devices. + config IOSCHED_CFQ tristate "CFQ I/O scheduler" # If BLK_CGROUP is a module, CFQ has to be built as module. @@ -53,6 +63,16 @@ choice config DEFAULT_DEADLINE bool "Deadline" if IOSCHED_DEADLINE=y + config DEFAULT_ROW + bool "ROW" if IOSCHED_ROW=y + help + The ROW I/O scheduler gives priority to READ requests + over the WRITE requests when dispatching, without starving + WRITE requests. Requests are kept in priority queues. + Dispatching is done in a RR manner when the dispatch quantum + for each queue is defined according to queue priority. + Most suitable for mobile devices. + config DEFAULT_CFQ bool "CFQ" if IOSCHED_CFQ=y @@ -64,6 +84,7 @@ endchoice config DEFAULT_IOSCHED string default "deadline" if DEFAULT_DEADLINE + default "row" if DEFAULT_ROW default "cfq" if DEFAULT_CFQ default "noop" if DEFAULT_NOOP diff --git a/block/Makefile b/block/Makefile index 0fec4b3f..cdf7125b 100644 --- a/block/Makefile +++ b/block/Makefile @@ -12,6 +12,8 @@ obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o +obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o +obj-$(CONFIG_IOSCHED_ROW) += row-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o diff --git a/block/row-iosched.c b/block/row-iosched.c new file mode 100644 index 00000000..1f50180f --- /dev/null +++ b/block/row-iosched.c @@ -0,0 +1,694 @@ +/* + * ROW (Read Over Write) I/O scheduler. + * + * Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* See Documentation/block/row-iosched.txt */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * enum row_queue_prio - Priorities of the ROW queues + * + * This enum defines the priorities (and the number of queues) + * the requests will be disptributed to. The higher priority - + * the bigger is the dispatch quantum given to that queue. + * ROWQ_PRIO_HIGH_READ - is the higher priority queue. + * + */ +enum row_queue_prio { + ROWQ_PRIO_HIGH_READ = 0, + ROWQ_PRIO_REG_READ, + ROWQ_PRIO_HIGH_SWRITE, + ROWQ_PRIO_REG_SWRITE, + ROWQ_PRIO_REG_WRITE, + ROWQ_PRIO_LOW_READ, + ROWQ_PRIO_LOW_SWRITE, + ROWQ_MAX_PRIO, +}; + +/* Flags indicating whether idling is enabled on the queue */ +static const bool queue_idling_enabled[] = { + true, /* ROWQ_PRIO_HIGH_READ */ + true, /* ROWQ_PRIO_REG_READ */ + false, /* ROWQ_PRIO_HIGH_SWRITE */ + false, /* ROWQ_PRIO_REG_SWRITE */ + false, /* ROWQ_PRIO_REG_WRITE */ + false, /* ROWQ_PRIO_LOW_READ */ + false, /* ROWQ_PRIO_LOW_SWRITE */ +}; + +/* Default values for row queues quantums in each dispatch cycle */ +static const int queue_quantum[] = { + 100, /* ROWQ_PRIO_HIGH_READ */ + 100, /* ROWQ_PRIO_REG_READ */ + 2, /* ROWQ_PRIO_HIGH_SWRITE */ + 1, /* ROWQ_PRIO_REG_SWRITE */ + 1, /* ROWQ_PRIO_REG_WRITE */ + 1, /* ROWQ_PRIO_LOW_READ */ + 1 /* ROWQ_PRIO_LOW_SWRITE */ +}; + +/* Default values for idling on read queues */ +#define ROW_IDLE_TIME_MSEC 5 /* msec */ +#define ROW_READ_FREQ_MSEC 20 /* msec */ + +/** + * struct rowq_idling_data - parameters for idling on the queue + * @last_insert_time: time the last request was inserted + * to the queue + * @begin_idling: flag indicating wether we should idle + * + */ +struct rowq_idling_data { + ktime_t last_insert_time; + bool begin_idling; +}; + +/** + * struct row_queue - requests grouping structure + * @rdata: parent row_data structure + * @fifo: fifo of requests + * @prio: queue priority (enum row_queue_prio) + * @nr_dispatched: number of requests already dispatched in + * the current dispatch cycle + * @slice: number of requests to dispatch in a cycle + * @idle_data: data for idling on queues + * + */ +struct row_queue { + struct row_data *rdata; + struct list_head fifo; + enum row_queue_prio prio; + + unsigned int nr_dispatched; + unsigned int slice; + + /* used only for READ queues */ + struct rowq_idling_data idle_data; +}; + +/** + * struct idling_data - data for idling on empty rqueue + * @idle_time: idling duration (jiffies) + * @freq: min time between two requests that + * triger idling (msec) + * @idle_work: pointer to struct delayed_work + * + */ +struct idling_data { + unsigned long idle_time; + u32 freq; + + struct workqueue_struct *idle_workqueue; + struct delayed_work idle_work; +}; + +/** + * struct row_queue - Per block device rqueue structure + * @dispatch_queue: dispatch rqueue + * @row_queues: array of priority request queues with + * dispatch quantum per rqueue + * @curr_queue: index in the row_queues array of the + * currently serviced rqueue + * @read_idle: data for idling after READ request + * @nr_reqs: nr_reqs[0] holds the number of all READ requests in + * scheduler, nr_reqs[1] holds the number of all WRITE + * requests in scheduler + * @cycle_flags: used for marking unserved queueus + * + */ +struct row_data { + struct request_queue *dispatch_queue; + + struct { + struct row_queue rqueue; + int disp_quantum; + } row_queues[ROWQ_MAX_PRIO]; + + enum row_queue_prio curr_queue; + + struct idling_data read_idle; + unsigned int nr_reqs[2]; + + unsigned int cycle_flags; +}; + +#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0])) + +#define row_log(q, fmt, args...) \ + blk_add_trace_msg(q, "%s():" fmt , __func__, ##args) +#define row_log_rowq(rdata, rowq_id, fmt, args...) \ + blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \ + rowq_id, ##args) + +static inline void row_mark_rowq_unserved(struct row_data *rd, + enum row_queue_prio qnum) +{ + rd->cycle_flags |= (1 << qnum); +} + +static inline void row_clear_rowq_unserved(struct row_data *rd, + enum row_queue_prio qnum) +{ + rd->cycle_flags &= ~(1 << qnum); +} + +static inline int row_rowq_unserved(struct row_data *rd, + enum row_queue_prio qnum) +{ + return rd->cycle_flags & (1 << qnum); +} + +/******************** Static helper functions ***********************/ +/* + * kick_queue() - Wake up device driver queue thread + * @work: pointer to struct work_struct + * + * This is a idling delayed work function. It's purpose is to wake up the + * device driver in order for it to start fetching requests. + * + */ +static void kick_queue(struct work_struct *work) +{ + struct delayed_work *idle_work = to_delayed_work(work); + struct idling_data *read_data = + container_of(idle_work, struct idling_data, idle_work); + struct row_data *rd = + container_of(read_data, struct row_data, read_idle); + + row_log_rowq(rd, rd->curr_queue, "Performing delayed work"); + /* Mark idling process as done */ + rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false; + + if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) + row_log(rd->dispatch_queue, "No requests in scheduler"); + else { + spin_lock_irq(rd->dispatch_queue->queue_lock); + __blk_run_queue(rd->dispatch_queue); + spin_unlock_irq(rd->dispatch_queue->queue_lock); + } +} + +/* + * row_restart_disp_cycle() - Restart the dispatch cycle + * @rd: pointer to struct row_data + * + * This function restarts the dispatch cycle by: + * - Setting current queue to ROWQ_PRIO_HIGH_READ + * - For each queue: reset the number of requests dispatched in + * the cycle + */ +static inline void row_restart_disp_cycle(struct row_data *rd) +{ + int i; + + for (i = 0; i < ROWQ_MAX_PRIO; i++) + rd->row_queues[i].rqueue.nr_dispatched = 0; + + rd->curr_queue = ROWQ_PRIO_HIGH_READ; + row_log(rd->dispatch_queue, "Restarting cycle"); +} + +static inline void row_get_next_queue(struct row_data *rd) +{ + rd->curr_queue++; + if (rd->curr_queue == ROWQ_MAX_PRIO) + row_restart_disp_cycle(rd); +} + +/******************* Elevator callback functions *********************/ + +/* + * row_add_request() - Add request to the scheduler + * @q: requests queue + * @rq: request to add + * + */ +static void row_add_request(struct request_queue *q, + struct request *rq) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + struct row_queue *rqueue = RQ_ROWQ(rq); + + list_add_tail(&rq->queuelist, &rqueue->fifo); + rd->nr_reqs[rq_data_dir(rq)]++; + rq_set_fifo_time(rq, jiffies); /* for statistics*/ + + if (queue_idling_enabled[rqueue->prio]) { + if (delayed_work_pending(&rd->read_idle.idle_work)) + (void)cancel_delayed_work( + &rd->read_idle.idle_work); + if (ktime_to_ms(ktime_sub(ktime_get(), + rqueue->idle_data.last_insert_time)) < + rd->read_idle.freq) { + rqueue->idle_data.begin_idling = true; + row_log_rowq(rd, rqueue->prio, "Enable idling"); + } else { + rqueue->idle_data.begin_idling = false; + row_log_rowq(rd, rqueue->prio, "Disable idling"); + } + + rqueue->idle_data.last_insert_time = ktime_get(); + } + row_log_rowq(rd, rqueue->prio, "added request"); +} + +/* + * row_remove_request() - Remove given request from scheduler + * @q: requests queue + * @rq: request to remove + * + */ +static void row_remove_request(struct request_queue *q, + struct request *rq) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + + rq_fifo_clear(rq); + rd->nr_reqs[rq_data_dir(rq)]--; +} + +/* + * row_dispatch_insert() - move request to dispatch queue + * @rd: pointer to struct row_data + * + * This function moves the next request to dispatch from + * rd->curr_queue to the dispatch queue + * + */ +static void row_dispatch_insert(struct row_data *rd) +{ + struct request *rq; + + rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next); + row_remove_request(rd->dispatch_queue, rq); + elv_dispatch_add_tail(rd->dispatch_queue, rq); + rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++; + row_clear_rowq_unserved(rd, rd->curr_queue); + row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d", + rd->row_queues[rd->curr_queue].rqueue.nr_dispatched); +} + +/* + * row_choose_queue() - choose the next queue to dispatch from + * @rd: pointer to struct row_data + * + * Updates rd->curr_queue. Returns 1 if there are requests to + * dispatch, 0 if there are no requests in scheduler + * + */ +static int row_choose_queue(struct row_data *rd) +{ + int prev_curr_queue = rd->curr_queue; + + if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) { + row_log(rd->dispatch_queue, "No more requests in scheduler"); + return 0; + } + + row_get_next_queue(rd); + + /* + * Loop over all queues to find the next queue that is not empty. + * Stop when you get back to curr_queue + */ + while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo) + && rd->curr_queue != prev_curr_queue) { + /* Mark rqueue as unserved */ + row_mark_rowq_unserved(rd, rd->curr_queue); + row_get_next_queue(rd); + } + + return 1; +} + +/* + * row_dispatch_requests() - selects the next request to dispatch + * @q: requests queue + * @force: ignored + * + * Return 0 if no requests were moved to the dispatch queue. + * 1 otherwise + * + */ +static int row_dispatch_requests(struct request_queue *q, int force) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + int ret = 0, currq, i; + + currq = rd->curr_queue; + + /* + * Find the first unserved queue (with higher priority then currq) + * that is not empty + */ + for (i = 0; i < currq; i++) { + if (row_rowq_unserved(rd, i) && + !list_empty(&rd->row_queues[i].rqueue.fifo)) { + row_log_rowq(rd, currq, + " Preemting for unserved rowq%d", i); + rd->curr_queue = i; + row_dispatch_insert(rd); + ret = 1; + goto done; + } + } + + if (rd->row_queues[currq].rqueue.nr_dispatched >= + rd->row_queues[currq].disp_quantum) { + rd->row_queues[currq].rqueue.nr_dispatched = 0; + row_log_rowq(rd, currq, "Expiring rqueue"); + ret = row_choose_queue(rd); + if (ret) + row_dispatch_insert(rd); + goto done; + } + + /* Dispatch from curr_queue */ + if (list_empty(&rd->row_queues[currq].rqueue.fifo)) { + /* check idling */ + if (delayed_work_pending(&rd->read_idle.idle_work)) { + if (force) { + (void)cancel_delayed_work( + &rd->read_idle.idle_work); + row_log_rowq(rd, currq, + "Canceled delayed work - forced dispatch"); + } else { + row_log_rowq(rd, currq, + "Delayed work pending. Exiting"); + goto done; + } + } + + if (!force && queue_idling_enabled[currq] && + rd->row_queues[currq].rqueue.idle_data.begin_idling) { + if (!queue_delayed_work(rd->read_idle.idle_workqueue, + &rd->read_idle.idle_work, + rd->read_idle.idle_time)) { + row_log_rowq(rd, currq, + "Work already on queue!"); + pr_err("ROW_BUG: Work already on queue!"); + } else + row_log_rowq(rd, currq, + "Scheduled delayed work. exiting"); + goto done; + } else { + row_log_rowq(rd, currq, + "Currq empty. Choose next queue"); + ret = row_choose_queue(rd); + if (!ret) + goto done; + } + } + + ret = 1; + row_dispatch_insert(rd); + +done: + return ret; +} + +/* + * row_init_queue() - Init scheduler data structures + * @q: requests queue + * + * Return pointer to struct row_data to be saved in elevator for + * this dispatch queue + * + */ +static void *row_init_queue(struct request_queue *q) +{ + + struct row_data *rdata; + int i; + + rdata = kmalloc_node(sizeof(*rdata), + GFP_KERNEL | __GFP_ZERO, q->node); + if (!rdata) + return NULL; + + for (i = 0; i < ROWQ_MAX_PRIO; i++) { + INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo); + rdata->row_queues[i].disp_quantum = queue_quantum[i]; + rdata->row_queues[i].rqueue.rdata = rdata; + rdata->row_queues[i].rqueue.prio = i; + rdata->row_queues[i].rqueue.idle_data.begin_idling = false; + rdata->row_queues[i].rqueue.idle_data.last_insert_time = + ktime_set(0, 0); + } + + /* + * Currently idling is enabled only for READ queues. If we want to + * enable it for write queues also, note that idling frequency will + * be the same in both cases + */ + rdata->read_idle.idle_time = msecs_to_jiffies(ROW_IDLE_TIME_MSEC); + /* Maybe 0 on some platforms */ + if (!rdata->read_idle.idle_time) + rdata->read_idle.idle_time = 1; + rdata->read_idle.freq = ROW_READ_FREQ_MSEC; + rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!rdata->read_idle.idle_workqueue) + panic("Failed to create idle workqueue\n"); + INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue); + + rdata->curr_queue = ROWQ_PRIO_HIGH_READ; + rdata->dispatch_queue = q; + + rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0; + + return rdata; +} + +/* + * row_exit_queue() - called on unloading the RAW scheduler + * @e: poiner to struct elevator_queue + * + */ +static void row_exit_queue(struct elevator_queue *e) +{ + struct row_data *rd = (struct row_data *)e->elevator_data; + int i; + + for (i = 0; i < ROWQ_MAX_PRIO; i++) + BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo)); + (void)cancel_delayed_work_sync(&rd->read_idle.idle_work); + BUG_ON(delayed_work_pending(&rd->read_idle.idle_work)); + destroy_workqueue(rd->read_idle.idle_workqueue); + kfree(rd); +} + +/* + * row_merged_requests() - Called when 2 requests are merged + * @q: requests queue + * @rq: request the two requests were merged into + * @next: request that was merged + */ +static void row_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct row_queue *rqueue = RQ_ROWQ(next); + + list_del_init(&next->queuelist); + + rqueue->rdata->nr_reqs[rq_data_dir(rq)]--; +} + +/* + * get_queue_type() - Get queue type for a given request + * + * This is a helping function which purpose is to determine what + * ROW queue the given request should be added to (and + * dispatched from leter on) + * + * TODO: Right now only 3 queues are used REG_READ, REG_WRITE + * and REG_SWRITE + */ +static enum row_queue_prio get_queue_type(struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + const bool is_sync = rq_is_sync(rq); + + if (data_dir == READ) + return ROWQ_PRIO_REG_READ; + else if (is_sync) + return ROWQ_PRIO_REG_SWRITE; + else + return ROWQ_PRIO_REG_WRITE; +} + +/* + * row_set_request() - Set ROW data structures associated with this request. + * @q: requests queue + * @rq: pointer to the request + * @gfp_mask: ignored + * + */ +static int +row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + rq->elv.priv[0] = + (void *)(&rd->row_queues[get_queue_type(rq)]); + spin_unlock_irqrestore(q->queue_lock, flags); + + return 0; +} + +/********** Helping sysfs functions/defenitions for ROW attributes ******/ +static ssize_t row_var_show(int var, char *page) +{ + return snprintf(page, 100, "%d\n", var); +} + +static ssize_t row_var_store(int *var, const char *page, size_t count) +{ + int err; + err = kstrtoul(page, 10, (unsigned long *)var); + + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct row_data *rowd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return row_var_show(__data, (page)); \ +} +SHOW_FUNCTION(row_hp_read_quantum_show, + rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0); +SHOW_FUNCTION(row_rp_read_quantum_show, + rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0); +SHOW_FUNCTION(row_hp_swrite_quantum_show, + rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0); +SHOW_FUNCTION(row_rp_swrite_quantum_show, + rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0); +SHOW_FUNCTION(row_rp_write_quantum_show, + rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0); +SHOW_FUNCTION(row_lp_read_quantum_show, + rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0); +SHOW_FUNCTION(row_lp_swrite_quantum_show, + rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0); +SHOW_FUNCTION(row_read_idle_show, rowd->read_idle.idle_time, 1); +SHOW_FUNCTION(row_read_idle_freq_show, rowd->read_idle.freq, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, \ + const char *page, size_t count) \ +{ \ + struct row_data *rowd = e->elevator_data; \ + int __data; \ + int ret = row_var_store(&__data, (page), count); \ + if (__CONV) \ + __data = (int)msecs_to_jiffies(__data); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(row_hp_read_quantum_store, +&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0); +STORE_FUNCTION(row_rp_read_quantum_store, + &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_hp_swrite_quantum_store, + &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_rp_swrite_quantum_store, + &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_rp_write_quantum_store, + &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_lp_read_quantum_store, + &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_lp_swrite_quantum_store, + &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, + 1, INT_MAX, 1); +STORE_FUNCTION(row_read_idle_store, &rowd->read_idle.idle_time, 1, INT_MAX, 1); +STORE_FUNCTION(row_read_idle_freq_store, &rowd->read_idle.freq, 1, INT_MAX, 0); + +#undef STORE_FUNCTION + +#define ROW_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \ + row_##name##_store) + +static struct elv_fs_entry row_attrs[] = { + ROW_ATTR(hp_read_quantum), + ROW_ATTR(rp_read_quantum), + ROW_ATTR(hp_swrite_quantum), + ROW_ATTR(rp_swrite_quantum), + ROW_ATTR(rp_write_quantum), + ROW_ATTR(lp_read_quantum), + ROW_ATTR(lp_swrite_quantum), + ROW_ATTR(read_idle), + ROW_ATTR(read_idle_freq), + __ATTR_NULL +}; + +static struct elevator_type iosched_row = { + .ops = { + .elevator_merge_req_fn = row_merged_requests, + .elevator_dispatch_fn = row_dispatch_requests, + .elevator_add_req_fn = row_add_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_set_req_fn = row_set_request, + .elevator_init_fn = row_init_queue, + .elevator_exit_fn = row_exit_queue, + }, + + .elevator_attrs = row_attrs, + .elevator_name = "row", + .elevator_owner = THIS_MODULE, +}; + +static int __init row_init(void) +{ + elv_register(&iosched_row); + return 0; +} + +static void __exit row_exit(void) +{ + elv_unregister(&iosched_row); +} + +module_init(row_init); +module_exit(row_exit); + +MODULE_LICENSE("GPLv2"); +MODULE_DESCRIPTION("Read Over Write IO scheduler"); From 50291df4a915873c3cbe41ca5cdb29c09d95c55c Mon Sep 17 00:00:00 2001 From: edoko Date: Tue, 25 Dec 2012 13:01:12 +0900 Subject: [PATCH 016/117] =?UTF-8?q?row-iosched.c:=20Fix=20build=20errors?= =?UTF-8?q?=20Error=20log=20:=20block/row-iosched.c:=20In=20function=20?= =?UTF-8?q?=E2=80=98row=5Fadd=5Frequest=E2=80=99:=20block/row-iosched.c:25?= =?UTF-8?q?2:29:=20error:=20=E2=80=98struct=20request=E2=80=99=20has=20no?= =?UTF-8?q?=20member=20named=20=E2=80=98elv=E2=80=99=20block/row-iosched.c?= =?UTF-8?q?:=20In=20function=20=E2=80=98row=5Fmerged=5Frequests=E2=80=99:?= =?UTF-8?q?=20block/row-iosched.c:512:31:=20error:=20=E2=80=98struct=20req?= =?UTF-8?q?uest=E2=80=99=20has=20no=20member=20named=20=E2=80=98elv?= =?UTF-8?q?=E2=80=99=20block/row-iosched.c:=20In=20function=20=E2=80=98row?= =?UTF-8?q?=5Fset=5Frequest=E2=80=99:=20block/row-iosched.c:556:4:=20error?= =?UTF-8?q?:=20=E2=80=98struct=20request=E2=80=99=20has=20no=20member=20na?= =?UTF-8?q?med=20=E2=80=98elv=E2=80=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- block/row-iosched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/row-iosched.c b/block/row-iosched.c index 1f50180f..e60ff4ef 100644 --- a/block/row-iosched.c +++ b/block/row-iosched.c @@ -154,7 +154,7 @@ struct row_data { unsigned int cycle_flags; }; -#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0])) +#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elevator_private[0])) #define row_log(q, fmt, args...) \ blk_add_trace_msg(q, "%s():" fmt , __func__, ##args) @@ -553,7 +553,7 @@ row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - rq->elv.priv[0] = + rq->elevator_private[0] = (void *)(&rd->row_queues[get_queue_type(rq)]); spin_unlock_irqrestore(q->queue_lock, flags); From f5cd07b3ee752080866fc6736cb5c88b6bcc9e4b Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 15 Jan 2013 10:03:28 -0500 Subject: [PATCH 017/117] Enable ROW scheduler --- arch/arm/configs/vigor_aosp_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 337d9da8..b1edf68b 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -167,6 +167,8 @@ CONFIG_LBDAF=y CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_ROW=y +# CONFIG_DEFAULT_ROW is not set CONFIG_DEFAULT_DEADLINE=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set From d5cc63b3b3dc08f4b23b07168e3f0d25ecc5b50c Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 15 Jan 2013 13:53:14 -0500 Subject: [PATCH 018/117] Make ROW scheduler default --- arch/arm/configs/vigor_aosp_defconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index b1edf68b..ef466aa0 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -168,8 +168,8 @@ CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y CONFIG_IOSCHED_ROW=y -# CONFIG_DEFAULT_ROW is not set -CONFIG_DEFAULT_DEADLINE=y +CONFIG_DEFAULT_ROW=y +# CONFIG_DEFAULT_DEADLINE is not set # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="deadline" From 7679c2e4bebe4e2ade95b4c2d41666e94c568938 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Wed, 16 Jan 2013 13:26:24 -0500 Subject: [PATCH 019/117] Remove interactiveX2 governor This reverts commit 6f6de16037888aa174f7856839b13c28ba2085b0. Conflicts: arch/arm/configs/vigor_aosp_defconfig drivers/cpufreq/Kconfig drivers/cpufreq/Makefile include/linux/cpufreq.h --- arch/arm/configs/vigor_aosp_defconfig | 2 - drivers/cpufreq/Kconfig | 15 - drivers/cpufreq/Makefile | 1 - drivers/cpufreq/cpufreq_interactiveX2.c | 772 ------------------------ include/linux/cpufreq.h | 3 - 5 files changed, 793 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_interactiveX2.c diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index ef466aa0..06c8fc3d 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -636,7 +636,6 @@ CONFIG_CPU_VOLTAGE_TABLE=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX2 is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -644,7 +643,6 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE=y CONFIG_CPU_FREQ_GOV_INTERACTIVE=y -CONFIG_CPU_FREQ_GOV_INTERACTIVEX2=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_LIONHEART=y CONFIG_CPU_IDLE=y diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 5d4c7ef0..259b0a3a 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -109,12 +109,6 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE loading your cpufreq low-level hardware driver, using the 'interactive' governor for latency-sensitive workloads. -config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX2 - bool "interactiveX" - select CPU_FREQ_GOV_INTERACTIVEX2 - help - Use the CPU governor 'interactiveX2' by imoseyon as default. - config CPU_FREQ_DEFAULT_GOV_LIONHEART bool "lionheart" select CPU_FREQ_GOV_LIONHEART @@ -224,15 +218,6 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. -config CPU_FREQ_GOV_INTERACTIVEX2 - tristate "'interactiveX2' cpufreq governor" - depends on CPU_FREQ - help - select CPU_FREQ - Use the CPU governor 'interactiveX2' by imoseyon. - - If in doubt, say N. - config CPU_FREQ_GOV_LIONHEART tristate "'Lionheart' cpufreq governor" depends on CPU_FREQ diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index d104b7d8..eeb3e827 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX2) += cpufreq_interactiveX2.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o # CPUfreq cross-arch helpers diff --git a/drivers/cpufreq/cpufreq_interactiveX2.c b/drivers/cpufreq/cpufreq_interactiveX2.c deleted file mode 100644 index c1f2b768..00000000 --- a/drivers/cpufreq/cpufreq_interactiveX2.c +++ /dev/null @@ -1,772 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_interactivex2.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Mike Chan (mike@android.com) - * Modified for early suspend support and hotplugging by imoseyon (imoseyon@gmail.com) - * interactiveX V2 - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static atomic_t active_count = ATOMIC_INIT(0); -static unsigned long stored_timer_rate; - -struct cpufreq_interactivex_cpuinfo { - struct timer_list cpu_timer; - int timer_idlecancel; - u64 time_in_idle; - u64 idle_exit_time; - u64 timer_run_time; - int idling; - u64 target_set_time; - u64 target_set_time_in_idle; - struct cpufreq_policy *policy; - struct cpufreq_frequency_table *freq_table; - unsigned int target_freq; - int governor_enabled; -}; - -static DEFINE_PER_CPU(struct cpufreq_interactivex_cpuinfo, cpuinfo); - -/* Workqueues handle frequency scaling */ -static struct task_struct *up_task; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_down_work; -static cpumask_t up_cpumask; -static spinlock_t up_cpumask_lock; -static cpumask_t down_cpumask; -static spinlock_t down_cpumask_lock; -static struct mutex set_speed_lock; - -// used for suspend code -static unsigned int enabled = 0; -static unsigned int registration = 0; -static unsigned int suspendfreq = 700000; - -/* Hi speed to bump to from lo speed when load burst (default max) */ -static u64 hispeed_freq; - -/* Go to hi speed when CPU load at or above this value. */ -#define DEFAULT_GO_HISPEED_LOAD 95 -static unsigned long go_hispeed_load; - -/* - * The minimum amount of time to spend at a frequency before we can ramp down. - */ -#define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC -static unsigned long min_sample_time; - -/* - * The sample rate of the timer used to increase frequency - */ -#define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC -static unsigned long timer_rate; - -static int cpufreq_governor_interactivex2(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX2 -static -#endif -struct cpufreq_governor cpufreq_gov_interactivex2 = { - .name = "interactivex2", - .governor = cpufreq_governor_interactivex2, - .max_transition_latency = 10000000, - .owner = THIS_MODULE, -}; - -static void cpufreq_interactivex_timer(unsigned long data) -{ - unsigned int delta_idle; - unsigned int delta_time; - int cpu_load; - int load_since_change; - u64 time_in_idle; - u64 idle_exit_time; - struct cpufreq_interactivex_cpuinfo *pcpu = - &per_cpu(cpuinfo, data); - u64 now_idle; - unsigned int new_freq; - unsigned int index; - unsigned long flags; - - smp_rmb(); - - if (!pcpu->governor_enabled) - goto exit; - - /* - * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, - * this lets idle exit know the current idle time sample has - * been processed, and idle exit can generate a new sample and - * re-arm the timer. This prevents a concurrent idle - * exit on that CPU from writing a new set of info at the same time - * the timer function runs (the timer function can't use that info - * until more time passes). - */ - time_in_idle = pcpu->time_in_idle; - idle_exit_time = pcpu->idle_exit_time; - now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time); - smp_wmb(); - - /* If we raced with cancelling a timer, skip. */ - if (!idle_exit_time) - goto exit; - - delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - idle_exit_time); - - /* - * If timer ran less than 1ms after short-term sample started, retry. - */ - if (delta_time < 1000) - goto rearm; - - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (delta_time - delta_idle) / delta_time; - - delta_idle = (unsigned int) cputime64_sub(now_idle, - pcpu->target_set_time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - pcpu->target_set_time); - - if ((delta_time == 0) || (delta_idle > delta_time)) - load_since_change = 0; - else - load_since_change = - 100 * (delta_time - delta_idle) / delta_time; - - /* - * Choose greater of short-term load (since last idle timer - * started or timer function re-armed itself) or long-term load - * (since last frequency change). - */ - if (load_since_change > cpu_load) - cpu_load = load_since_change; - - if (cpu_load >= go_hispeed_load) { - if (pcpu->target_freq <= pcpu->policy->min) { - new_freq = hispeed_freq; - } else - new_freq = pcpu->policy->max * cpu_load / 100; - } else { - new_freq = pcpu->policy->cur * cpu_load / 100; - } - - if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, - new_freq, CPUFREQ_RELATION_H, - &index)) { - pr_warn_once("timer %d: cpufreq_frequency_table_target error\n", - (int) data); - goto rearm; - } - - new_freq = pcpu->freq_table[index].frequency; - - /* - * Do not scale down unless we have been at this frequency for the - * minimum sample time. - */ - if (new_freq < pcpu->target_freq) { - if (cputime64_sub(pcpu->timer_run_time, pcpu->target_set_time) - < min_sample_time) - goto rearm; - } - - if (pcpu->target_freq == new_freq) { - goto rearm_if_notmax; - } - - pcpu->target_set_time_in_idle = now_idle; - pcpu->target_set_time = pcpu->timer_run_time; - - if (new_freq < pcpu->target_freq) { - pcpu->target_freq = new_freq; - spin_lock_irqsave(&down_cpumask_lock, flags); - cpumask_set_cpu(data, &down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); - queue_work(down_wq, &freq_scale_down_work); - } else { - pcpu->target_freq = new_freq; - spin_lock_irqsave(&up_cpumask_lock, flags); - cpumask_set_cpu(data, &up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); - wake_up_process(up_task); - } - -rearm_if_notmax: - /* - * Already set max speed and don't see a need to change that, - * wait until next idle to re-evaluate, don't need timer. - */ - if (pcpu->target_freq == pcpu->policy->max) - goto exit; - -rearm: - if (!timer_pending(&pcpu->cpu_timer)) { - /* - * If already at min: if that CPU is idle, don't set timer. - * Else cancel the timer if that CPU goes idle. We don't - * need to re-evaluate speed until the next idle exit. - */ - if (pcpu->target_freq == pcpu->policy->min) { - smp_rmb(); - - if (pcpu->idling) - goto exit; - - pcpu->timer_idlecancel = 1; - } - - pcpu->time_in_idle = get_cpu_idle_time_us( - data, &pcpu->idle_exit_time); - mod_timer(&pcpu->cpu_timer, - jiffies + usecs_to_jiffies(timer_rate)); - } - -exit: - return; -} - -static void cpufreq_interactivex_idle_start(void) -{ - struct cpufreq_interactivex_cpuinfo *pcpu = - &per_cpu(cpuinfo, smp_processor_id()); - int pending; - - if (!pcpu->governor_enabled) - return; - - pcpu->idling = 1; - smp_wmb(); - pending = timer_pending(&pcpu->cpu_timer); - - if (pcpu->target_freq != pcpu->policy->min) { -#ifdef CONFIG_SMP - /* - * Entering idle while not at lowest speed. On some - * platforms this can hold the other CPU(s) at that speed - * even though the CPU is idle. Set a timer to re-evaluate - * speed so this idle CPU doesn't hold the other CPUs above - * min indefinitely. This should probably be a quirk of - * the CPUFreq driver. - */ - if (!pending) { - pcpu->time_in_idle = get_cpu_idle_time_us( - smp_processor_id(), &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, - jiffies + usecs_to_jiffies(timer_rate)); - } -#endif - } else { - /* - * If at min speed and entering idle after load has - * already been evaluated, and a timer has been set just in - * case the CPU suddenly goes busy, cancel that timer. The - * CPU didn't go busy; we'll recheck things upon idle exit. - */ - if (pending && pcpu->timer_idlecancel) { - del_timer(&pcpu->cpu_timer); - /* - * Ensure last timer run time is after current idle - * sample start time, so next idle exit will always - * start a new idle sampling period. - */ - pcpu->idle_exit_time = 0; - pcpu->timer_idlecancel = 0; - } - } - -} - -static void cpufreq_interactivex_idle_end(void) -{ - struct cpufreq_interactivex_cpuinfo *pcpu = - &per_cpu(cpuinfo, smp_processor_id()); - - pcpu->idling = 0; - smp_wmb(); - - /* - * Arm the timer for 1-2 ticks later if not already, and if the timer - * function has already processed the previous load sampling - * interval. (If the timer is not pending but has not processed - * the previous interval, it is probably racing with us on another - * CPU. Let it compute load based on the previous sample and then - * re-arm the timer for another interval when it's done, rather - * than updating the interval start time to be "now", which doesn't - * give the timer function enough time to make a decision on this - * run.) - */ - if (timer_pending(&pcpu->cpu_timer) == 0 && - pcpu->timer_run_time >= pcpu->idle_exit_time && - pcpu->governor_enabled) { - pcpu->time_in_idle = - get_cpu_idle_time_us(smp_processor_id(), - &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, - jiffies + usecs_to_jiffies(timer_rate)); - } - -} - -static int cpufreq_interactivex_up_task(void *data) -{ - unsigned int cpu; - cpumask_t tmp_mask; - unsigned long flags; - struct cpufreq_interactivex_cpuinfo *pcpu; - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock_irqsave(&up_cpumask_lock, flags); - - if (cpumask_empty(&up_cpumask)) { - spin_unlock_irqrestore(&up_cpumask_lock, flags); - schedule(); - - if (kthread_should_stop()) - break; - - spin_lock_irqsave(&up_cpumask_lock, flags); - } - - set_current_state(TASK_RUNNING); - tmp_mask = up_cpumask; - cpumask_clear(&up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); - - for_each_cpu(cpu, &tmp_mask) { - unsigned int j; - unsigned int max_freq = 0; - - pcpu = &per_cpu(cpuinfo, cpu); - smp_rmb(); - - if (!pcpu->governor_enabled) - continue; - - mutex_lock(&set_speed_lock); - - for_each_cpu(j, pcpu->policy->cpus) { - struct cpufreq_interactivex_cpuinfo *pjcpu = - &per_cpu(cpuinfo, j); - - if (pjcpu->target_freq > max_freq) - max_freq = pjcpu->target_freq; - } - - if (max_freq != pcpu->policy->cur) - __cpufreq_driver_target(pcpu->policy, - max_freq, - CPUFREQ_RELATION_H); - mutex_unlock(&set_speed_lock); - } - } - - return 0; -} - -static void cpufreq_interactivex_freq_down(struct work_struct *work) -{ - unsigned int cpu; - cpumask_t tmp_mask; - unsigned long flags; - struct cpufreq_interactivex_cpuinfo *pcpu; - - spin_lock_irqsave(&down_cpumask_lock, flags); - tmp_mask = down_cpumask; - cpumask_clear(&down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); - - for_each_cpu(cpu, &tmp_mask) { - unsigned int j; - unsigned int max_freq = 0; - - pcpu = &per_cpu(cpuinfo, cpu); - smp_rmb(); - - if (!pcpu->governor_enabled) - continue; - - mutex_lock(&set_speed_lock); - - for_each_cpu(j, pcpu->policy->cpus) { - struct cpufreq_interactivex_cpuinfo *pjcpu = - &per_cpu(cpuinfo, j); - - if (pjcpu->target_freq > max_freq) - max_freq = pjcpu->target_freq; - } - - if (max_freq != pcpu->policy->cur) - __cpufreq_driver_target(pcpu->policy, max_freq, - CPUFREQ_RELATION_H); - - mutex_unlock(&set_speed_lock); - } -} - -static ssize_t show_hispeed_freq(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%llu\n", hispeed_freq); -} - -static ssize_t store_hispeed_freq(struct kobject *kobj, - struct attribute *attr, const char *buf, - size_t count) -{ - int ret; - u64 val; - - ret = strict_strtoull(buf, 0, &val); - if (ret < 0) - return ret; - hispeed_freq = val; - return count; -} - -static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644, - show_hispeed_freq, store_hispeed_freq); - - -static ssize_t show_go_hispeed_load(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", go_hispeed_load); -} - -static ssize_t store_go_hispeed_load(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - go_hispeed_load = val; - return count; -} - -static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644, - show_go_hispeed_load, store_go_hispeed_load); - -static ssize_t show_min_sample_time(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", min_sample_time); -} - -static ssize_t store_min_sample_time(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - min_sample_time = val; - return count; -} - -static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, - show_min_sample_time, store_min_sample_time); - -static ssize_t show_timer_rate(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", timer_rate); -} - -static ssize_t store_timer_rate(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - timer_rate = val; - return count; -} - -static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, - show_timer_rate, store_timer_rate); - -static struct attribute *interactivex_attributes[] = { - &hispeed_freq_attr.attr, - &go_hispeed_load_attr.attr, - &min_sample_time_attr.attr, - &timer_rate_attr.attr, - NULL, -}; - -static struct attribute_group interactivex_attr_group = { - .attrs = interactivex_attributes, - .name = "interactivex", -}; - -static void interactivex_suspend(int suspend) -{ - unsigned int cpu; - cpumask_t tmp_mask; - struct cpufreq_interactivex_cpuinfo *pcpu; - - if (!enabled) return; - if (!suspend) { - mutex_lock(&set_speed_lock); - if (num_online_cpus() < 2) cpu_up(1); - for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); - smp_rmb(); - if (!pcpu->governor_enabled) - continue; - __cpufreq_driver_target(pcpu->policy, hispeed_freq, CPUFREQ_RELATION_L); - } - mutex_unlock(&set_speed_lock); - pr_info("[imoseyon] interactivex awake cpu1 up\n"); - } else { - mutex_lock(&set_speed_lock); - for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); - smp_rmb(); - if (!pcpu->governor_enabled) - continue; - __cpufreq_driver_target(pcpu->policy, suspendfreq, CPUFREQ_RELATION_H); - } - if (num_online_cpus() > 1) cpu_down(1); - mutex_unlock(&set_speed_lock); - pr_info("[imoseyon] interactivex suspended cpu1 down\n"); - } -} - -static void interactivex_early_suspend(struct early_suspend *handler) { - stored_timer_rate = timer_rate; - timer_rate = DEFAULT_TIMER_RATE * 10; - if (!registration) interactivex_suspend(1); -} - -static void interactivex_late_resume(struct early_suspend *handler) { - interactivex_suspend(0); - timer_rate = stored_timer_rate; -} - -static struct early_suspend interactivex_power_suspend = { - .suspend = interactivex_early_suspend, - .resume = interactivex_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int cpufreq_governor_interactivex2(struct cpufreq_policy *policy, - unsigned int event) -{ - int rc; - unsigned int j; - struct cpufreq_interactivex_cpuinfo *pcpu; - struct cpufreq_frequency_table *freq_table; - - switch (event) { - case CPUFREQ_GOV_START: - if (!cpu_online(policy->cpu)) - return -EINVAL; - - freq_table = - cpufreq_frequency_get_table(policy->cpu); - - for_each_cpu(j, policy->cpus) { - pcpu = &per_cpu(cpuinfo, j); - pcpu->policy = policy; - pcpu->target_freq = policy->cur; - pcpu->freq_table = freq_table; - pcpu->target_set_time_in_idle = - get_cpu_idle_time_us(j, - &pcpu->target_set_time); - pcpu->governor_enabled = 1; - smp_wmb(); - } - - if (!hispeed_freq) - hispeed_freq = policy->max; - - /* - * Do not register the idle hook and create sysfs - * entries if we have already done so. - */ - if (atomic_inc_return(&active_count) > 1) - return 0; - - rc = sysfs_create_group(cpufreq_global_kobject, - &interactivex_attr_group); - if (rc) - return rc; - - enabled = 1; - registration = 1; - register_early_suspend(&interactivex_power_suspend); - registration = 0; - pr_info("[imoseyon] interactivex start\n"); - break; - - case CPUFREQ_GOV_STOP: - for_each_cpu(j, policy->cpus) { - pcpu = &per_cpu(cpuinfo, j); - pcpu->governor_enabled = 0; - smp_wmb(); - del_timer_sync(&pcpu->cpu_timer); - - /* - * Reset idle exit time since we may cancel the timer - * before it can run after the last idle exit time, - * to avoid tripping the check in idle exit for a timer - * that is trying to run. - */ - pcpu->idle_exit_time = 0; - } - - flush_work(&freq_scale_down_work); - if (atomic_dec_return(&active_count) > 0) - return 0; - - sysfs_remove_group(cpufreq_global_kobject, - &interactivex_attr_group); - - enabled = 0; - unregister_early_suspend(&interactivex_power_suspend); - pr_info("[imoseyon] interactivex inactive\n"); - break; - - case CPUFREQ_GOV_LIMITS: - if (policy->max < policy->cur) - __cpufreq_driver_target(policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > policy->cur) - __cpufreq_driver_target(policy, - policy->min, CPUFREQ_RELATION_L); - break; - } - return 0; -} - -static int cpufreq_interactivex_idle_notifier(struct notifier_block *nb, - unsigned long val, - void *data) -{ - switch (val) { - case IDLE_START: - cpufreq_interactivex_idle_start(); - break; - case IDLE_END: - cpufreq_interactivex_idle_end(); - break; - } - - return 0; -} - -static struct notifier_block cpufreq_interactivex_idle_nb = { - .notifier_call = cpufreq_interactivex_idle_notifier, -}; - -static int __init cpufreq_interactivex_init(void) -{ - unsigned int i; - struct cpufreq_interactivex_cpuinfo *pcpu; - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - - go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; - min_sample_time = DEFAULT_MIN_SAMPLE_TIME; - timer_rate = DEFAULT_TIMER_RATE; - - /* Initalize per-cpu timers */ - for_each_possible_cpu(i) { - pcpu = &per_cpu(cpuinfo, i); - init_timer(&pcpu->cpu_timer); - pcpu->cpu_timer.function = cpufreq_interactivex_timer; - pcpu->cpu_timer.data = i; - } - - up_task = kthread_create(cpufreq_interactivex_up_task, NULL, - "kinteractivexup"); - if (IS_ERR(up_task)) - return PTR_ERR(up_task); - - sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); - get_task_struct(up_task); - - /* No rescuer thread, bind to CPU queuing the work for possibly - warm cache (probably doesn't matter much). */ - down_wq = alloc_workqueue("knteractive_down", 0, 1); - - if (!down_wq) - goto err_freeuptask; - - INIT_WORK(&freq_scale_down_work, - cpufreq_interactivex_freq_down); - - spin_lock_init(&up_cpumask_lock); - spin_lock_init(&down_cpumask_lock); - mutex_init(&set_speed_lock); - - idle_notifier_register(&cpufreq_interactivex_idle_nb); - - return cpufreq_register_governor(&cpufreq_gov_interactivex2); - -err_freeuptask: - put_task_struct(up_task); - return -ENOMEM; -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX2 -fs_initcall(cpufreq_interactivex_init); -#else -module_init(cpufreq_interactivex_init); -#endif - -static void __exit cpufreq_interactivex_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_interactivex2); - kthread_stop(up_task); - put_task_struct(up_task); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_interactivex_exit); - -MODULE_AUTHOR("Mike Chan "); -MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for " - "Latency sensitive workloads"); -MODULE_LICENSE("GPL"); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index a2319eca..d8d5dcfd 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -366,9 +366,6 @@ extern struct cpufreq_governor cpufreq_gov_conservative; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX2) -extern struct cpufreq_governor cpufreq_gov_interactivex2; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactivex2) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART) extern struct cpufreq_governor cpufreq_gov_lionheart; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lionheart) From e9a16e0f6a752d8a072c90300b0774f66299f999 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Wed, 16 Jan 2013 14:36:21 -0500 Subject: [PATCH 020/117] Set lionheart governor as default --- arch/arm/configs/vigor_aosp_defconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 06c8fc3d..163605d6 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -633,10 +633,10 @@ CONFIG_CPU_VOLTAGE_TABLE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART=y CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y From 2a9e8d093ed48d9620892b8f1421f8609171239f Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Wed, 16 Jan 2013 14:54:47 -0500 Subject: [PATCH 021/117] Update version --- scripts/mkcompile_h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 6e308e6d..bbb89e8c 100644 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -74,7 +74,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" echo \#define LINUX_COMPILE_BY \"`echo shrike1978`\" - echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD_13.01.14`\" + echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD_13.01.16`\" echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\" ) > .tmpcompile From 3555677abbcc46a8a97b8029a06d7e16715fe598 Mon Sep 17 00:00:00 2001 From: showp1984 Date: Mon, 19 Mar 2012 04:57:19 +0100 Subject: [PATCH 022/117] drivers: cpufreq: address issue where cpus forget their clock speeds --- drivers/cpufreq/cpufreq.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 856662c7..3f3996a3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -969,6 +969,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) unsigned long flags; unsigned int j; #ifdef CONFIG_HOTPLUG_CPU + struct cpufreq_policy *cp; int sibling; #endif @@ -1017,10 +1018,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) /* Set governor before ->init, so that driver could check it */ #ifdef CONFIG_HOTPLUG_CPU for_each_online_cpu(sibling) { - struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); + cp = per_cpu(cpufreq_cpu_data, sibling); if (cp && cp->governor && (cpumask_test_cpu(cpu, cp->related_cpus))) { policy->governor = cp->governor; + policy->min = cp->min; + policy->max = cp->max; + policy->user_policy.min = cp->user_policy.min; + policy->user_policy.max = cp->user_policy.max; found = 1; break; } @@ -1039,6 +1044,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; + if (found) { + /* Calling the driver can overwrite policy frequencies again */ + policy->min = cp->min; + policy->max = cp->max; + policy->user_policy.min = cp->user_policy.min; + policy->user_policy.max = cp->user_policy.max; + } + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); From bab5511dce53dc97762b1976f4d9db12cde2a5dd Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Sun, 9 Dec 2012 11:01:47 -0800 Subject: [PATCH 023/117] Increase bandwith limit at lower cpu/l2 clocks --- arch/arm/mach-msm/acpuclock-8x60.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c index 11d4916e..9a15688f 100644 --- a/arch/arm/mach-msm/acpuclock-8x60.c +++ b/arch/arm/mach-msm/acpuclock-8x60.c @@ -172,20 +172,20 @@ static uint32_t bus_perf_client; /* L2 frequencies = 2 * 27 MHz * L_VAL */ static struct clkctl_l2_speed l2_freq_tbl_v2[] = { [0] = { MAX_AXI, 0, 0, 1000000, 1100000, 0}, - [1] = { 432000, 1, 0x08, 1000000, 1100000, 0}, - [2] = { 486000, 1, 0x09, 1000000, 1100000, 0}, - [3] = { 540000, 1, 0x0A, 1000000, 1100000, 0}, - [4] = { 594000, 1, 0x0B, 1000000, 1100000, 0}, + [1] = { 432000, 1, 0x08, 1000000, 1100000, 1}, + [2] = { 486000, 1, 0x09, 1000000, 1100000, 1}, + [3] = { 540000, 1, 0x0A, 1000000, 1100000, 1}, + [4] = { 594000, 1, 0x0B, 1000000, 1100000, 1}, [5] = { 648000, 1, 0x0C, 1000000, 1100000, 1}, [6] = { 702000, 1, 0x0D, 1100000, 1100000, 1}, [7] = { 756000, 1, 0x0E, 1100000, 1100000, 1}, - [8] = { 810000, 1, 0x0F, 1100000, 1100000, 1}, - [9] = { 864000, 1, 0x10, 1100000, 1100000, 1}, + [8] = { 810000, 1, 0x0F, 1100000, 1100000, 2}, + [9] = { 864000, 1, 0x10, 1100000, 1100000, 2}, [10] = { 918000, 1, 0x11, 1100000, 1100000, 2}, [11] = { 972000, 1, 0x12, 1100000, 1100000, 2}, [12] = {1026000, 1, 0x13, 1100000, 1100000, 2}, - [13] = {1080000, 1, 0x14, 1100000, 1200000, 2}, - [14] = {1134000, 1, 0x15, 1100000, 1200000, 2}, + [13] = {1080000, 1, 0x14, 1100000, 1200000, 3}, + [14] = {1134000, 1, 0x15, 1100000, 1200000, 3}, [15] = {1188000, 1, 0x16, 1200000, 1200000, 3}, [16] = {1242000, 1, 0x17, 1200000, 1212500, 3}, [17] = {1296000, 1, 0x18, 1200000, 1225000, 3}, From 0a059bb10705607ed3587e14ae81e51ee76a6181 Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Mon, 14 Jan 2013 06:23:33 -0800 Subject: [PATCH 024/117] fs/dyn_sync_cntrl: dynamic sync control The dynamic sync control interface uses Android kernel's unique early suspend / lat resume interface. While screen is on, file sync is disabled when screen is off, a file sync is called to flush all outstanding writes and restore file sync operation as normal. Signed-off-by: Paul Reioux --- fs/Kconfig | 6 ++++++ fs/Makefile | 2 ++ fs/sync.c | 37 ++++++++++++++++++++++++++++++++++++- include/linux/fs.h | 1 + 4 files changed, 45 insertions(+), 1 deletion(-) diff --git a/fs/Kconfig b/fs/Kconfig index 88701cc0..83c09ba9 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -274,4 +274,10 @@ endif source "fs/nls/Kconfig" source "fs/dlm/Kconfig" +config DYNAMIC_FSYNC + bool "dynamic file sync control" + default n + help + An experimental file sync control using Android's early suspend / late resume drivers + endmenu diff --git a/fs/Makefile b/fs/Makefile index 2999b4d4..f4ff069b 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -127,3 +127,5 @@ obj-$(CONFIG_PSTORE) += pstore/ # Patched by YAFFS obj-$(CONFIG_YAFFS_FS) += yaffs2/ + +obj-$(CONFIG_DYNAMIC_FSYNC) += dyn_sync_cntrl.o diff --git a/fs/sync.c b/fs/sync.c index c38ec163..d615cc66 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -21,6 +21,10 @@ #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) +#ifdef CONFIG_DYNAMIC_FSYNC +extern bool early_suspend_active; +#endif + /* * Do the filesystem syncing work. For simple filesystems * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to @@ -87,7 +91,7 @@ static void sync_one_sb(struct super_block *sb, void *arg) * Sync all the data for all the filesystems (called by sys_sync() and * emergency sync) */ -static void sync_filesystems(int wait) +void sync_filesystems(int wait) { iterate_supers(sync_one_sb, &wait); } @@ -165,6 +169,11 @@ SYSCALL_DEFINE1(syncfs, int, fd) */ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (!early_suspend_active) + return 0; + else { +#endif struct address_space *mapping = file->f_mapping; int err, ret; @@ -187,6 +196,9 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) out: return ret; +#ifdef CONFIG_DYNAMIC_FSYNC + } +#endif } EXPORT_SYMBOL(vfs_fsync_range); @@ -219,11 +231,21 @@ static int do_fsync(unsigned int fd, int datasync) SYSCALL_DEFINE1(fsync, unsigned int, fd) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (!early_suspend_active) + return 0; + else +#endif return do_fsync(fd, 0); } SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (!early_suspend_active) + return 0; + else +#endif return do_fsync(fd, 1); } @@ -294,6 +316,11 @@ EXPORT_SYMBOL(generic_write_sync); SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, unsigned int flags) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (!early_suspend_active) + return 0; + else { +#endif int ret; struct file *file; struct address_space *mapping; @@ -373,6 +400,9 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, fput_light(file, fput_needed); out: return ret; +#ifdef CONFIG_DYNAMIC_FSYNC + } +#endif } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes, @@ -389,6 +419,11 @@ SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range); SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags, loff_t offset, loff_t nbytes) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (!early_suspend_active) + return 0; + else +#endif return sys_sync_file_range(fd, offset, nbytes, flags); } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS diff --git a/include/linux/fs.h b/include/linux/fs.h index 212ea7ba..8c82c570 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2058,6 +2058,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) } #endif extern int sync_filesystem(struct super_block *); +extern void sync_filesystems(int wait); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; extern const struct file_operations bad_sock_fops; From 016fd749f075f0258a4f007d9fdbf95c17b5a35f Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Mon, 14 Jan 2013 06:26:35 -0800 Subject: [PATCH 025/117] Set Dynamic Fsync:Added missing driver to git --- arch/arm/configs/vigor_defconfig | 1 + fs/dyn_sync_cntrl.c | 155 +++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) create mode 100644 fs/dyn_sync_cntrl.c diff --git a/arch/arm/configs/vigor_defconfig b/arch/arm/configs/vigor_defconfig index 9dd9ea8e..00eecf7d 100644 --- a/arch/arm/configs/vigor_defconfig +++ b/arch/arm/configs/vigor_defconfig @@ -1122,6 +1122,7 @@ CONFIG_TZCOM=y # CONFIG_SENSORS_LIS3_I2C is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set +CONFIG_DYNAMIC_FSYNC=y # # SCSI device support diff --git a/fs/dyn_sync_cntrl.c b/fs/dyn_sync_cntrl.c new file mode 100644 index 00000000..21befb74 --- /dev/null +++ b/fs/dyn_sync_cntrl.c @@ -0,0 +1,155 @@ +/* + * Author: Paul Reioux aka Faux123 + * + * Copyright 2012 Paul Reioux + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include + +#define DYN_FSYNC_VERSION 1 + +/* + * fsync_mutex protects dyn_fsync_active during early suspend / lat resume transitions + */ +static DEFINE_MUTEX(fsync_mutex); + +bool early_suspend_active = false; +static bool dyn_fsync_active = true; + +static ssize_t dyn_fsync_active_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", (dyn_fsync_active ? 1 : 0)); +} + +static ssize_t dyn_fsync_active_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + unsigned int data; + + if(sscanf(buf, "%u\n", &data) == 1) { + if (data == 1) { + pr_info("%s: dynamic fsync enabled\n", __FUNCTION__); + dyn_fsync_active = true; + } + else if (data == 0) { + pr_info("%s: dyanamic fsync disabled\n", __FUNCTION__); + dyn_fsync_active = false; + } + else + pr_info("%s: bad value: %u\n", __FUNCTION__, data); + } else + pr_info("%s: unknown input!\n", __FUNCTION__); + + return count; +} + +static ssize_t dyn_fsync_version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "version: %u\n", DYN_FSYNC_VERSION); +} + +static ssize_t dyn_fsync_earlysuspend_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "early suspend active: %u\n", early_suspend_active); +} + +static struct kobj_attribute dyn_fsync_active_attribute = + __ATTR(Dyn_fsync_active, 0666, dyn_fsync_active_show, dyn_fsync_active_store); + +static struct kobj_attribute dyn_fsync_version_attribute = + __ATTR(Dyn_fsync_version, 0444 , dyn_fsync_version_show, NULL); + +static struct kobj_attribute dyn_fsync_earlysuspend_attribute = + __ATTR(Dyn_fsync_earlysuspend, 0444 , dyn_fsync_earlysuspend_show, NULL); + +static struct attribute *dyn_fsync_active_attrs[] = + { + &dyn_fsync_active_attribute.attr, + &dyn_fsync_version_attribute.attr, + &dyn_fsync_earlysuspend_attribute.attr, + NULL, + }; + +static struct attribute_group dyn_fsync_active_attr_group = + { + .attrs = dyn_fsync_active_attrs, + }; + +static struct kobject *dyn_fsync_kobj; + +static void dyn_fsync_early_suspend(struct early_suspend *h) +{ + mutex_lock(&fsync_mutex); + if (dyn_fsync_active) { + early_suspend_active = true; +#if 1 + /* flush all outstanding buffers */ + wakeup_flusher_threads(0); + sync_filesystems(0); + sync_filesystems(1); +#endif + } + mutex_unlock(&fsync_mutex); +} + +static void dyn_fsync_late_resume(struct early_suspend *h) +{ + mutex_lock(&fsync_mutex); + early_suspend_active = false; + mutex_unlock(&fsync_mutex); +} + +static struct early_suspend dyn_fsync_early_suspend_handler = + { + .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN, + .suspend = dyn_fsync_early_suspend, + .resume = dyn_fsync_late_resume, + }; + +static int dyn_fsync_init(void) +{ + int sysfs_result; + + register_early_suspend(&dyn_fsync_early_suspend_handler); + + dyn_fsync_kobj = kobject_create_and_add("dyn_fsync", kernel_kobj); + if (!dyn_fsync_kobj) { + pr_err("%s dyn_fsync kobject create failed!\n", __FUNCTION__); + return -ENOMEM; + } + + sysfs_result = sysfs_create_group(dyn_fsync_kobj, &dyn_fsync_active_attr_group); + + if (sysfs_result) { + pr_info("%s dyn_fsync sysfs create failed!\n", __FUNCTION__); + kobject_put(dyn_fsync_kobj); + } + return sysfs_result; +} + +static void dyn_fsync_exit(void) +{ + unregister_early_suspend(&dyn_fsync_early_suspend_handler); + + if (dyn_fsync_kobj != NULL) + kobject_put(dyn_fsync_kobj); +} + +module_init(dyn_fsync_init); +module_exit(dyn_fsync_exit); + From 462b2d0576fef6826aa152f97ba81a17e67ba660 Mon Sep 17 00:00:00 2001 From: Mike Date: Wed, 26 Sep 2012 12:41:56 -0700 Subject: [PATCH 026/117] =?UTF-8?q?O3=5FFIX:=20git=20rid=20of=20a=20unneed?= =?UTF-8?q?ed=20warning=20(TODO:=20return=5Faddress=20should=20us=E2=80=A6?= =?UTF-8?q?=20=E2=80=A6e=20unwind=20tables)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- arch/arm/kernel/return_address.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 0b13a72f..af430c0b 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -58,7 +58,7 @@ void *return_address(unsigned int level) #else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ -#if defined(CONFIG_ARM_UNWIND) +#if 0 //defined(CONFIG_ARM_UNWIND) #warning "TODO: return_address should use unwind tables" #endif From 8c28bee0469a6735e543a6fb82ee79c7f90eef27 Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Tue, 11 Dec 2012 04:23:52 -0800 Subject: [PATCH 027/117] staging;android;lowmemorykiller.c from faux123 --- drivers/staging/android/lowmemorykiller.c | 156 +++++++--------------- 1 file changed, 47 insertions(+), 109 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index d483c15c..1d771dbe 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -1,16 +1,20 @@ /* drivers/misc/lowmemorykiller.c * * The lowmemorykiller driver lets user-space specify a set of memory thresholds - * where processes with a range of oom_adj values will get killed. Specify the - * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the - * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both - * files take a comma separated list of numbers in ascending order. + * where processes with a range of oom_score_adj values will get killed. Specify + * the minimum oom_score_adj values in + * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in + * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma + * separated list of numbers in ascending order. * * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and - * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes - * with a oom_adj value of 8 or higher when the free memory drops below 4096 pages - * and kill processes with a oom_adj value of 0 or higher when the free memory - * drops below 1024 pages. + * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill + * processes with a oom_adj value of 8 or higher when the free memory drops + * below 4096 pages and kill processes with a oom_adj value of 0 or higher + * when the free memory drops below 1024 pages. + * processes with a oom_score_adj value of 8 or higher when the free memory + * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or + * higher when the free memory drops below 1024 pages. * * The driver considers memory used for caches to be free, but if a large * percentage of the cached memory is locked this can be very inaccurate @@ -34,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -48,7 +53,7 @@ static int lowmem_adj[6] = { 12, }; static int lowmem_adj_size = 4; -static size_t lowmem_minfree[6] = { +static int lowmem_minfree[6] = { 3 * 512, /* 6MB */ 2 * 1024, /* 8MB */ 4 * 1024, /* 16MB */ @@ -68,7 +73,6 @@ static size_t lowmem_minfile[6] = { }; static int lowmem_minfile_size = 6; -static struct task_struct *lowmem_deathpending; static unsigned long lowmem_deathpending_timeout; static uint32_t lowmem_check_filepages = 0; @@ -80,63 +84,6 @@ static uint32_t lowmem_check_filepages = 0; } \ } while (0) -static int -task_notify_func(struct notifier_block *self, unsigned long val, void *data); - -static struct notifier_block task_nb = { - .notifier_call = task_notify_func, -}; - -static int -task_notify_func(struct notifier_block *self, unsigned long val, void *data) -{ - struct task_struct *task = data; - - if (task == lowmem_deathpending) { - lowmem_deathpending = NULL; - lowmem_print(2, "deathpending end %d (%s)\n", - task->pid, task->comm); - } - - return NOTIFY_OK; -} - -static void dump_deathpending(struct task_struct *t_deathpending) -{ - struct task_struct *p; - - if (lowmem_debug_level < DEBUG_LEVEL_DEATHPENDING) - return; - - BUG_ON(!t_deathpending); - lowmem_print(DEBUG_LEVEL_DEATHPENDING, "deathpending %d (%s)\n", - t_deathpending->pid, t_deathpending->comm); - - read_lock(&tasklist_lock); - for_each_process(p) { - struct mm_struct *mm; - struct signal_struct *sig; - int oom_adj; - int tasksize; - - task_lock(p); - mm = p->mm; - sig = p->signal; - if (!mm || !sig) { - task_unlock(p); - continue; - } - oom_adj = sig->oom_adj; - tasksize = get_mm_rss(mm); - task_unlock(p); - lowmem_print(DEBUG_LEVEL_DEATHPENDING, - " %d (%s), adj %d, size %d\n", - p->pid, p->comm, - oom_adj, tasksize); - } - read_unlock(&tasklist_lock); -} - #ifdef CONFIG_MEMORY_HOTPLUG static int lmk_hotplug_callback(struct notifier_block *self, unsigned long cmd, void *data) @@ -165,14 +112,14 @@ static int lmk_hotplug_callback(struct notifier_block *self, static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { - struct task_struct *p; + struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; - int min_adj = OOM_ADJUST_MAX + 1; + int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int selected_tasksize = 0; - int selected_oom_adj; + int selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES) - @@ -193,18 +140,6 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) } } } - /* - * If we already have a death outstanding, then - * bail out right away; indicating to vmscan - * that we have nothing further to offer on - * this pass. - * - */ - if (lowmem_deathpending && - time_before_eq(jiffies, lowmem_deathpending_timeout)) { - dump_deathpending(lowmem_deathpending); - return 0; - } if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; @@ -216,7 +151,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) (lowmem_check_filepages && (lru_file < lowmem_minfile[i]))) { - min_adj = lowmem_adj[i]; + min_score_adj = lowmem_adj[i]; break; } } @@ -224,65 +159,70 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, - min_adj); + min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); - if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { + if (sc->nr_to_scan <= 0 || min_score_adj == OOM_ADJUST_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } - selected_oom_adj = min_adj; + selected_oom_score_adj = min_score_adj; - read_lock(&tasklist_lock); - for_each_process(p) { - struct mm_struct *mm; - struct signal_struct *sig; - int oom_adj; + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *p; + int oom_score_adj; - task_lock(p); - mm = p->mm; - sig = p->signal; - if (!mm || !sig) { - task_unlock(p); + if (tsk->flags & PF_KTHREAD) + continue; + + p = find_lock_task_mm(tsk); + if (!p) continue; + + if (test_tsk_thread_flag(p, TIF_MEMDIE) && + time_before_eq(jiffies, lowmem_deathpending_timeout)) { + task_unlock(p); + rcu_read_unlock(); + return 0; } - oom_adj = sig->oom_adj; - if (oom_adj < min_adj) { + oom_score_adj = p->signal->oom_score_adj; + if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } - tasksize = get_mm_rss(mm); + tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { - if (oom_adj < selected_oom_adj) + if (oom_score_adj < selected_oom_score_adj) continue; - if (oom_adj == selected_oom_adj && + if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; - selected_oom_adj = oom_adj; + selected_oom_score_adj = oom_score_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", - p->pid, p->comm, oom_adj, tasksize); + p->pid, p->comm, oom_score_adj, tasksize); } if (selected) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, - selected_oom_adj, selected_tasksize); - lowmem_deathpending = selected; + selected_oom_score_adj, selected_tasksize); lowmem_deathpending_timeout = jiffies + HZ; - force_sig(SIGKILL, selected); + send_sig(SIGKILL, selected, 0); + set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; } lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); - read_unlock(&tasklist_lock); + rcu_read_unlock(); return rem; } @@ -293,7 +233,6 @@ static struct shrinker lowmem_shrinker = { static int __init lowmem_init(void) { - task_free_register(&task_nb); register_shrinker(&lowmem_shrinker); #ifdef CONFIG_MEMORY_HOTPLUG hotplug_memory_notifier(lmk_hotplug_callback, 0); @@ -304,7 +243,6 @@ static int __init lowmem_init(void) static void __exit lowmem_exit(void) { unregister_shrinker(&lowmem_shrinker); - task_free_unregister(&task_nb); } module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); From 2d054fb8757d2e3bdc93d45deebd1d72217c50e3 Mon Sep 17 00:00:00 2001 From: Mike Date: Sat, 1 Sep 2012 03:21:26 -0700 Subject: [PATCH 028/117] netfilter: xt_log.h: fix warning: value computed is not used --- include/net/netfilter/xt_log.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h index 0dfb34a5..595bddb7 100644 --- a/include/net/netfilter/xt_log.h +++ b/include/net/netfilter/xt_log.h @@ -47,7 +47,7 @@ static void sb_close(struct sbuff *m) if (likely(m != &emergency)) kfree(m); else { - xchg(&emergency_ptr, m); + (void) xchg(&emergency_ptr, m); local_bh_enable(); } } From 37be7389600b2a3f72143c607b50576edd769bd2 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 22 Jan 2013 09:11:56 -0500 Subject: [PATCH 029/117] Add intellidemand governor by faux123 --- arch/arm/configs/vigor_aosp_defconfig | 2 + drivers/cpufreq/Kconfig | 14 + drivers/cpufreq/Makefile | 1 + drivers/cpufreq/cpufreq_intellidemand.c | 1682 +++++++++++++++++++++++ include/linux/cpufreq.h | 3 + 5 files changed, 1702 insertions(+) create mode 100644 drivers/cpufreq/cpufreq_intellidemand.c diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 163605d6..a98052eb 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -637,6 +637,7 @@ CONFIG_CPU_VOLTAGE_TABLE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y @@ -645,6 +646,7 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE=y CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_LIONHEART=y +CONFIG_CPU_FREQ_GOV_INTELLIDEMAND=y CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 259b0a3a..1ec02f6d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -115,6 +115,12 @@ config CPU_FREQ_DEFAULT_GOV_LIONHEART help Use the CPU governor 'lionheart' as default. +config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND + bool "intellidemand" + select CPU_FREQ_GOV_INTELLIDEMAND + help + Use the CPU governor 'intellidemand' as default. + endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -226,6 +232,14 @@ config CPU_FREQ_GOV_LIONHEART If in doubt, say N. +config CPU_FREQ_GOV_INTELLIDEMAND + tristate "'intellidemand' cpufreq governor" + depends on CPU_FREQ + help + Use the CPU governor 'intellidemand'. + + If in doubt, say N. + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index eeb3e827..f1235382 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o +obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) +=cpufreq_intellidemand.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_intellidemand.c b/drivers/cpufreq/cpufreq_intellidemand.c new file mode 100644 index 00000000..dae3262c --- /dev/null +++ b/drivers/cpufreq/cpufreq_intellidemand.c @@ -0,0 +1,1682 @@ +/* + * drivers/cpufreq/cpufreq_intellidemand.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * Modified by faux123 as intellidemand + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_EARLYSUSPEND +#include +#endif + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (15000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) +#define DBS_INPUT_EVENT_MIN_FREQ (810000) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; +#ifdef CONFIG_EARLYSUSPEND +static unsigned long stored_sampling_rate; +#endif + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +#define POWERSAVE_BIAS_MAXLEVEL (1000) +#define POWERSAVE_BIAS_MINLEVEL (-1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND +static +#endif +struct cpufreq_governor cpufreq_gov_intellidemand = { + .name = "intellidemand", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + int cpu; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info); +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *input_wq; + +static DEFINE_PER_CPU(struct work_struct, dbs_refresh_work); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + int powersave_bias; + unsigned int io_is_busy; +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE + unsigned int two_phase_freq; +#endif +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE + .two_phase_freq = 0, +#endif +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + int freq_reduc; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static int intellidemand_powersave_bias_setspeed(struct cpufreq_policy *policy, + struct cpufreq_policy *altpolicy, + int level) +{ + if (level == POWERSAVE_BIAS_MAXLEVEL) { + /* maximum powersave; set to lowest frequency */ + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->min : policy->min, + CPUFREQ_RELATION_L); + return 1; + } else if (level == POWERSAVE_BIAS_MINLEVEL) { + /* minimum powersave; set to highest frequency */ + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->max : policy->max, + CPUFREQ_RELATION_H); + return 1; + } + return 0; +} + +static void intellidemand_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void intellidemand_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + intellidemand_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_intellidemand Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +//show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(down_differential, down_differential); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); + +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE +show_one(two_phase_freq, two_phase_freq); +#endif + +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ +void set_lmf_browser_state(bool onOff); +void set_lmf_active_max_freq(unsigned long freq); +void set_lmf_inactive_max_freq(unsigned long freq); +void set_lmf_active_load(unsigned long freq); +void set_lmf_inactive_load(unsigned long freq); +bool get_lmf_browser_state(void); +unsigned long get_lmf_active_max_freq(void); +unsigned long get_lmf_inactive_max_freq(void); +unsigned long get_lmf_active_load(void); +unsigned long get_lmf_inactive_load(void); + +static ssize_t show_lmf_browser(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", get_lmf_browser_state()); +} + +static ssize_t show_lmf_active_max_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%ld\n", get_lmf_active_max_freq()); +} + +static ssize_t show_lmf_inactive_max_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%ld\n", get_lmf_inactive_max_freq()); +} + +static ssize_t show_lmf_active_load(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%ld\n", get_lmf_active_load()); +} + +static ssize_t show_lmf_inactive_load(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%ld\n", get_lmf_inactive_load()); +} +#endif + +static ssize_t show_powersave_bias +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias); +} + +#if 0 +/** + * update_sampling_rate - update sampling rate effective immediately if needed. + * @new_rate: new sampling rate + * + * If new rate is smaller than the old, simply updaing + * dbs_tuners_int.sampling_rate might not be appropriate. For example, + * if the original sampling_rate was 1 second and the requested new sampling + * rate is 10 ms because the user needs immediate reaction from ondemand + * governor, but not sure if higher frequency will be required or not, + * then, the governor may change the sampling rate too late; up to 1 second + * later. Thus, if we are reducing the sampling rate, we need to make the + * new value effective immediately. + */ +static void update_sampling_rate(unsigned int new_rate) +{ + int cpu; + + dbs_tuners_ins.sampling_rate = new_rate + = max(new_rate, min_sampling_rate); + + for_each_online_cpu(cpu) { + struct cpufreq_policy *policy; + struct cpu_dbs_info_s *dbs_info; + unsigned long next_sampling, appointed_at; + + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); + cpufreq_cpu_put(policy); + + mutex_lock(&dbs_info->timer_mutex); + + if (!delayed_work_pending(&dbs_info->work)) { + mutex_unlock(&dbs_info->timer_mutex); + continue; + } + + next_sampling = jiffies + usecs_to_jiffies(new_rate); + appointed_at = dbs_info->work.timer.expires; + + + if (time_before(next_sampling, appointed_at)) { + + mutex_unlock(&dbs_info->timer_mutex); + cancel_delayed_work_sync(&dbs_info->work); + mutex_lock(&dbs_info->timer_mutex); + + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, + usecs_to_jiffies(new_rate)); + + } + mutex_unlock(&dbs_info->timer_mutex); + } +} + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} +#endif + +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE +static ssize_t store_two_phase_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.two_phase_freq = input; + + return count; +} +#endif + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= dbs_tuners_ins.up_threshold || + input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { + return -EINVAL; + } + + dbs_tuners_ins.down_differential = input; + + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + int input = 0; + int bypass = 0; + int ret, cpu, reenable_timer, j; + struct cpu_dbs_info_s *dbs_info; + + struct cpumask cpus_timer_done; + cpumask_clear(&cpus_timer_done); + + ret = sscanf(buf, "%d", &input); + + if (ret != 1) + return -EINVAL; + + if (input >= POWERSAVE_BIAS_MAXLEVEL) { + input = POWERSAVE_BIAS_MAXLEVEL; + bypass = 1; + } else if (input <= POWERSAVE_BIAS_MINLEVEL) { + input = POWERSAVE_BIAS_MINLEVEL; + bypass = 1; + } + + if (input == dbs_tuners_ins.powersave_bias) { + /* no change */ + return count; + } + + reenable_timer = ((dbs_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MAXLEVEL) || + (dbs_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MINLEVEL)); + + dbs_tuners_ins.powersave_bias = input; + if (!bypass) { + if (reenable_timer) { + /* reinstate dbs timer */ + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!dbs_info->cur_policy) { + printk(KERN_ERR + "%s Dbs policy is NULL\n", + __func__); + goto skip_this_cpu; + } + if (cpumask_test_cpu(j, dbs_info-> + cur_policy->cpus)) + goto skip_this_cpu; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + if (dbs_info->cur_policy) { + /* restart dbs timer */ + dbs_timer_init(dbs_info); + } +skip_this_cpu: + unlock_policy_rwsem_write(cpu); + } + } + intellidemand_powersave_bias_init(); + } else { + /* running at maximum or minimum frequencies; cancel + dbs timer as periodic load sampling is not necessary */ + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!dbs_info->cur_policy) { + printk(KERN_ERR + "%s Dbs policy is NULL\n", + __func__); + goto skip_this_cpu_bypass; + } + if (cpumask_test_cpu(j, dbs_info-> + cur_policy->cpus)) + goto skip_this_cpu_bypass; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + + if (dbs_info->cur_policy) { + /* cpu using intellidemand, cancel dbs timer */ + mutex_lock(&dbs_info->timer_mutex); + dbs_timer_exit(dbs_info); + + intellidemand_powersave_bias_setspeed( + dbs_info->cur_policy, + NULL, + input); + + mutex_unlock(&dbs_info->timer_mutex); + } +skip_this_cpu_bypass: + unlock_policy_rwsem_write(cpu); + } + } + + return count; +} + +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ +static ssize_t store_lmf_browser(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + set_lmf_browser_state(input); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_lmf_active_max_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned long input; + int ret; + + ret = sscanf(buf, "%ld", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + set_lmf_active_max_freq(input); + mutex_unlock(&dbs_mutex); + + return count; +} +static ssize_t store_lmf_inactive_max_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned long input; + int ret; + + ret = sscanf(buf, "%ld", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + set_lmf_inactive_max_freq(input); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_lmf_active_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned long input; + int ret; + + ret = sscanf(buf, "%ld", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + set_lmf_active_load(input); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_lmf_inactive_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned long input; + int ret; + + ret = sscanf(buf, "%ld", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + set_lmf_inactive_load(input); + mutex_unlock(&dbs_mutex); + + return count; +} +#endif + +//define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE +define_one_global_rw(two_phase_freq); +#endif + +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ +define_one_global_rw(lmf_browser); +define_one_global_rw(lmf_active_max_freq); +define_one_global_rw(lmf_inactive_max_freq); +define_one_global_rw(lmf_active_load); +define_one_global_rw(lmf_inactive_load); +#endif + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, +// &sampling_rate.attr, + &up_threshold.attr, + &down_differential.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE + &two_phase_freq.attr, +#endif +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ + &lmf_browser.attr, + &lmf_active_max_freq.attr, + &lmf_inactive_max_freq.attr, + &lmf_active_load.attr, + &lmf_inactive_load.attr, +#endif + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "intellidemand", +}; + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; + + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE +int id_set_two_phase_freq(int cpufreq) +{ + dbs_tuners_ins.two_phase_freq = cpufreq; + return 0; +} +#endif + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE + static unsigned int phase = 0; + static unsigned int counter = 0; +#endif + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of intellidemand, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + /* If switching to max speed, apply sampling_down_factor */ +#ifndef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); +#else + if (counter < 5) { + counter++; + if (counter > 2) { + /* change to busy phase */ + phase = 1; + } + } + if (dbs_tuners_ins.two_phase_freq != 0 && phase == 0) { + /* idle phase */ + dbs_freq_increase(policy, + (((dbs_tuners_ins.two_phase_freq)> (int)(policy->max*80/100)) + ?(dbs_tuners_ins.two_phase_freq) : (int)(policy->max*80/100)) ); + } else { + /* busy phase */ + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + } +#endif + return; + } +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE + if (counter > 0) { + counter--; + if (counter == 0) { + /* change to idle phase */ + phase = 0; + } + } +#endif + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ + +enum { + SET_MIN = 0, + SET_MAX +}; + +enum { + BOOT_CPU = 0, + NON_BOOT_CPU +}; + +#define SAMPLE_DURATION_MSEC (10*1000) // 10 secs >= 10000 msec +#define ACTIVE_DURATION_MSEC (3*60*1000) // 3 mins +#define INACTIVE_DURATION_MSEC (1*60*1000) // 1 mins +#define MAX_ACTIVE_FREQ_LIMIT 35 // % +#define MAX_INACTIVE_FREQ_LIMIT 25 // % +#define ACTIVE_MAX_FREQ CONFIG_INTELLI_MAX_ACTIVE_FREQ // 1.512GHZ +#define INACTIVE_MAX_FREQ CONFIG_INTELLI_MAX_INACTIVE_FREQ // 1.134GHZ + +#define NUM_ACTIVE_LOAD_ARRAY (ACTIVE_DURATION_MSEC/SAMPLE_DURATION_MSEC) +#define NUM_INACTIVE_LOAD_ARRAY (INACTIVE_DURATION_MSEC/SAMPLE_DURATION_MSEC) + +bool lmf_browser_state = false; +bool lmf_screen_state = true; + +static unsigned long lmf_active_max_limit = ACTIVE_MAX_FREQ; +static unsigned long lmf_inactive_max_limit = INACTIVE_MAX_FREQ; +static unsigned long lmf_active_load_limit = MAX_ACTIVE_FREQ_LIMIT; +static unsigned long lmf_inactive_load_limit = MAX_INACTIVE_FREQ_LIMIT; + +static unsigned long jiffies_old = 0; +static unsigned long time_int = 0; +static unsigned long time_int1 = 0; +static unsigned long load_state_total0 = 0; +static unsigned long load_state_total1 = 0; +static unsigned long load_limit_index = 0; +static unsigned long load_limit_total[NUM_ACTIVE_LOAD_ARRAY]; +static unsigned long msecs_limit_total = 0; +static bool active_state = true; +static bool lmf_old_state = false; + +extern int cpufreq_set_limits(int cpu, unsigned int limit, unsigned int value); +extern int cpufreq_set_limits_off(int cpu, unsigned int limit, unsigned int value); + +void set_lmf_browser_state(bool onOff) +{ + if (onOff) + lmf_browser_state = true; + else + lmf_browser_state = false; +} + +void set_lmf_active_max_freq(unsigned long freq) +{ + lmf_active_max_limit = freq; +} + +void set_lmf_inactive_max_freq(unsigned long freq) +{ + lmf_inactive_max_limit = freq; +} + +void set_lmf_active_load(unsigned long freq) +{ + lmf_active_load_limit = freq; +} + +void set_lmf_inactive_load(unsigned long freq) +{ + lmf_inactive_load_limit = freq; +} + +bool get_lmf_browser_state(void) +{ + return lmf_browser_state; +} + +unsigned long get_lmf_active_max_freq(void) +{ + return lmf_active_max_limit; +} + +unsigned long get_lmf_inactive_max_freq(void) +{ + return lmf_inactive_max_limit; +} + +unsigned long get_lmf_active_load(void) +{ + return lmf_active_load_limit; +} + +unsigned long get_lmf_inactive_load(void) +{ + return lmf_inactive_load_limit; +} +#endif + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + + int delay; + +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ + + if (!lmf_browser_state && lmf_screen_state) + { + if (cpu == BOOT_CPU) + { + if (lmf_old_state == true) + { + pr_warn("LMF: disabled!\n"); + lmf_old_state = false; + + if (lmf_screen_state == true) { + /* wake up the 2nd core */ + if (num_online_cpus() < 2) + cpu_up(1); + } + + } + + if (!active_state) + { + /* set freq to 1.5GHz */ + pr_info("LMF: CPU0 set max freq to: %lu\n", lmf_active_max_limit); + cpufreq_set_limits(BOOT_CPU, SET_MAX, lmf_active_max_limit); + + pr_info("LMF: CPU1 set max freq to: %lu\n", lmf_active_max_limit); + if (cpu_online(NON_BOOT_CPU)) + cpufreq_set_limits(NON_BOOT_CPU, SET_MAX, lmf_active_max_limit); + else + cpufreq_set_limits_off(NON_BOOT_CPU, SET_MAX, lmf_active_max_limit); + } + + jiffies_old = 0; + time_int = 0; + time_int1 = 0; + load_state_total0 = 0; + load_state_total1 = 0; + msecs_limit_total = 0; + load_limit_index = 0; + active_state = true; + } + } + else if (lmf_browser_state && lmf_screen_state) // lmf_browser_state -> TRUE + { + struct cpufreq_policy *policy; + unsigned long load_state_cpu = 0; + unsigned int delay_msec = 0; + unsigned long load_total = 0; + unsigned long jiffies_cur = jiffies; + + if (cpu == NON_BOOT_CPU) + { + delay_msec = (dbs_tuners_ins.sampling_rate * dbs_info->rate_mult) / 1000; + policy = dbs_info->cur_policy; + load_state_cpu = ((policy->cur) * delay_msec)/10000; + + time_int1 += delay_msec; + load_state_total1 += load_state_cpu; + } + else + { + if (lmf_old_state == false) + { + pr_warn("LMF: enabled!\n"); + lmf_old_state = true; + } + + if (jiffies_old == 0) + { + jiffies_old = jiffies_cur; + } + else + { + delay_msec = jiffies_to_msecs(jiffies_cur - jiffies_old); + jiffies_old = jiffies_cur; + policy = dbs_info->cur_policy; + load_state_cpu = ((policy->cur) * delay_msec)/10000; + + time_int += delay_msec; + load_state_total0 += load_state_cpu; + + /* average */ + if (time_int >= SAMPLE_DURATION_MSEC) + { + int i = 0; + unsigned long ave_max = 0; + unsigned long average = 0; + unsigned long average_dec = 0; + unsigned long total_load = 0; + + load_total = load_state_total0 + load_state_total1; + ave_max = (time_int / 10) * ((lmf_active_max_limit/1000) * 2); + average = (load_total * 100) / ave_max; + average_dec = (load_total * 100) % ave_max; + + msecs_limit_total += time_int; + load_limit_total[load_limit_index++] = average; + + //pr_warn("LMF: average = %ld.%ld, (%ld:%ld) (%ld:%ld) (%ld:%ld)\n", + // average, average_dec, time_int, time_int1, load_state_total0, load_state_total1, load_limit_index-1, msecs_limit_total); + + time_int = 0; + time_int1 = 0; + load_state_total0 = 0; + load_state_total1 = 0; + + /* active */ + if (active_state) + { + if (load_limit_index >= NUM_ACTIVE_LOAD_ARRAY) + { + load_limit_index = 0; + } + + if (msecs_limit_total > ACTIVE_DURATION_MSEC) + { + for (i=0; i lmf_active_load_limit) + { + msecs_limit_total = 0; + load_limit_index = 0; + active_state = false; + + if (lmf_screen_state == true) { + /* wake up the 2nd core */ + if (num_online_cpus() < 2) + cpu_up(1); + } + + /* set freq to 1.0GHz */ + pr_info("LMF: CPU0 set max freq to: %lu\n", lmf_inactive_max_limit); + cpufreq_set_limits(BOOT_CPU, SET_MAX, lmf_inactive_max_limit); + + pr_info("LMF: CPU1 set max freq to: %lu\n", lmf_inactive_max_limit); + if (cpu_online(NON_BOOT_CPU)) + cpufreq_set_limits(NON_BOOT_CPU, SET_MAX, lmf_inactive_max_limit); + else + cpufreq_set_limits_off(NON_BOOT_CPU, SET_MAX, lmf_inactive_max_limit); + } + else + { + msecs_limit_total = ACTIVE_DURATION_MSEC; // to prevent overflow + if (lmf_screen_state == true) { + /* take 2nd core offline */ + if (num_online_cpus() > 1) + cpu_down(1); + } + + } + } + } + else /* inactive */ + { + if (load_limit_index >= NUM_INACTIVE_LOAD_ARRAY) + { + load_limit_index = 0; + } + + if (msecs_limit_total > INACTIVE_DURATION_MSEC) + { + for (i=0; itimer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + } + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static void dbs_refresh_callback(struct work_struct *unused) +{ + struct cpufreq_policy *policy; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int cpu = smp_processor_id(); + + if (lock_policy_rwsem_write(cpu) < 0) + return; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + policy = this_dbs_info->cur_policy; + if (!policy) { + /* CPU not using intellidemand governor */ + unlock_policy_rwsem_write(cpu); + return; + } + + if (policy->cur < DBS_INPUT_EVENT_MIN_FREQ) { + /* + pr_info("%s: set cpufreq to DBS_INPUT_EVENT_MIN_FREQ(%d) directly due to input events!\n", __func__, DBS_INPUT_EVENT_MIN_FREQ); + */ + __cpufreq_driver_target(policy, DBS_INPUT_EVENT_MIN_FREQ, + CPUFREQ_RELATION_L); + this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu, + &this_dbs_info->prev_cpu_wall); + } + unlock_policy_rwsem_write(cpu); +} + +static unsigned int enable_dbs_input_event = 1; +static void dbs_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + int i; + + if (enable_dbs_input_event) { + + if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) || + (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) { + /* nothing to do */ + return; + } + + for_each_online_cpu(i) { + queue_work_on(i, input_wq, &per_cpu(dbs_refresh_work, i)); + } + } +} + +static int input_dev_filter(const char *input_dev_name) +{ + if (strstr(input_dev_name, "touchscreen") || strstr(input_dev_name, "-keypad") || + strstr(input_dev_name, "-nav") || strstr(input_dev_name, "-oj")) { + return 0; + } else { + return 1; + } +} + +static int dbs_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + /* filter out those input_dev that we don't care */ + if (input_dev_filter(dev->name)) + return -ENODEV; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "cpufreq"; + + error = input_register_handle(handle); + if (error) + goto err2; + + error = input_open_device(handle); + if (error) + goto err1; + + return 0; +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return error; +} + +static void dbs_input_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id dbs_ids[] = { + { .driver_info = 1 }, + { }, +}; + +static struct input_handler dbs_input_handler = { + .event = dbs_input_event, + .connect = dbs_input_connect, + .disconnect = dbs_input_disconnect, + .name = "cpufreq_ond", + .id_table = dbs_ids, +}; + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kstat_cpu(j).cpustat.nice; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + intellidemand_powersave_bias_init_cpu(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + if (!cpu) + rc = input_register_handler(&dbs_input_handler); + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + + if (!intellidemand_powersave_bias_setspeed( + this_dbs_info->cur_policy, + NULL, + dbs_tuners_ins.powersave_bias)) + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + /* If device is being removed, policy is no longer + * valid. */ + this_dbs_info->cur_policy = NULL; + if (!cpu) + input_unregister_handler(&dbs_input_handler); + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + else if (dbs_tuners_ins.powersave_bias != 0) + intellidemand_powersave_bias_setspeed( + this_dbs_info->cur_policy, + policy, + dbs_tuners_ins.powersave_bias); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +#ifdef CONFIG_EARLYSUSPEND +static void cpufreq_intellidemand_early_suspend(struct early_suspend *h) +{ + mutex_lock(&dbs_mutex); + stored_sampling_rate = min_sampling_rate; + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE * 2; + mutex_unlock(&dbs_mutex); +} + +static void cpufreq_intellidemand_late_resume(struct early_suspend *h) +{ + mutex_lock(&dbs_mutex); + min_sampling_rate = stored_sampling_rate; + mutex_unlock(&dbs_mutex); +} + +static struct early_suspend cpufreq_intellidemand_early_suspend_info = { + .suspend = cpufreq_intellidemand_early_suspend, + .resume = cpufreq_intellidemand_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB+1, +}; +#endif + +static int __init cpufreq_gov_dbs_init(void) +{ + cputime64_t wall; + u64 idle_time; + unsigned int i; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + input_wq = create_workqueue("iewq"); + if (!input_wq) { + printk(KERN_ERR "Failed to create iewq workqueue\n"); + return -EFAULT; + } + for_each_possible_cpu(i) { + INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback); + } + +#ifdef CONFIG_EARLYSUSPEND + register_early_suspend(&cpufreq_intellidemand_early_suspend_info); +#endif + return cpufreq_register_governor(&cpufreq_gov_intellidemand); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_intellidemand); +#ifdef CONFIG_EARLYSUSPEND + unregister_early_suspend(&cpufreq_intellidemand_early_suspend_info); +#endif + destroy_workqueue(input_wq); +} + +static int set_enable_dbs_input_event_param(const char *val, struct kernel_param *kp) +{ + int ret = 0; + + ret = param_set_uint(val, kp); + if (ret) + pr_err("%s: error setting value %d\n", __func__, ret); + + return ret; +} +module_param_call(enable_dbs_input_event, set_enable_dbs_input_event_param, param_get_uint, + &enable_dbs_input_event, S_IWUSR | S_IRUGO); + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_DESCRIPTION("'cpufreq_intellidemand' - An intelligent dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index d8d5dcfd..f1e5b710 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -369,6 +369,9 @@ extern struct cpufreq_governor cpufreq_gov_interactive; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART) extern struct cpufreq_governor cpufreq_gov_lionheart; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lionheart) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND) +extern struct cpufreq_governor cpufreq_gov_intellidemand; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_intellidemand) #endif From 50b7b781c099c9ce990b4806146a566435843f67 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 22 Jan 2013 09:29:47 -0500 Subject: [PATCH 030/117] Add wheatley governor by Ezekeel --- arch/arm/configs/vigor_aosp_defconfig | 2 + drivers/cpufreq/Kconfig | 14 + drivers/cpufreq/Makefile | 1 + drivers/cpufreq/cpufreq_wheatley.c | 839 ++++++++++++++++++++++++++ include/linux/cpufreq.h | 3 + 5 files changed, 859 insertions(+) create mode 100644 drivers/cpufreq/cpufreq_wheatley.c diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index a98052eb..1789c52a 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -638,6 +638,7 @@ CONFIG_CPU_VOLTAGE_TABLE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART=y # CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y @@ -647,6 +648,7 @@ CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_LIONHEART=y CONFIG_CPU_FREQ_GOV_INTELLIDEMAND=y +CONFIG_CPU_FREQ_GOV_WHEATLEY=y CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 1ec02f6d..028ad408 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -121,6 +121,12 @@ config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND help Use the CPU governor 'intellidemand' as default. +config CPU_FREQ_DEFAULT_GOV_WHEATLEY + bool "wheatley" + select CPU_FREQ_GOV_WHEATLEY + help + Use the CPU governor 'wheatley' as default. + endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -240,6 +246,14 @@ config CPU_FREQ_GOV_INTELLIDEMAND If in doubt, say N. +config CPU_FREQ_GOV_WHEATLEY + tristate "'wheatley' cpufreq governor" + depends on CPU_FREQ + help + Use the CPU governor 'wheatley'. + + If in doubt, say N. + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index f1235382..27f421d7 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) +=cpufreq_intellidemand.o +obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_wheatley.c b/drivers/cpufreq/cpufreq_wheatley.c new file mode 100644 index 00000000..8107ae06 --- /dev/null +++ b/drivers/cpufreq/cpufreq_wheatley.c @@ -0,0 +1,839 @@ +/* + * drivers/cpufreq/cpufreq_wheatley.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2012 Ezekeel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (88) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (25) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEF_TARGET_RESIDENCY (10000) +#define DEF_ALLOWED_MISSES (5) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate, num_misses; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY +static +#endif +struct cpufreq_governor cpufreq_gov_wheatley = { + .name = "wheatley", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + int cpu; + unsigned int sample_type:1; + unsigned long long prev_idletime; + unsigned long long prev_idleusage; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int powersave_bias; + unsigned int io_is_busy; + unsigned int target_residency; + unsigned int allowed_misses; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, + .target_residency = DEF_TARGET_RESIDENCY, + .allowed_misses = DEF_ALLOWED_MISSES, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void wheatley_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void wheatley_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + wheatley_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_wheatley Governor Tunables */ +#define show_one(file_name, object) \ + static ssize_t show_##file_name \ + (struct kobject *kobj, struct attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ + } +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); +show_one(target_residency, target_residency); +show_one(allowed_misses, allowed_misses); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + dbs_tuners_ins.powersave_bias = input; + wheatley_powersave_bias_init(); + return count; +} + +static ssize_t store_target_residency(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.target_residency = input; + return count; +} + +static ssize_t store_allowed_misses(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.allowed_misses = input; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(target_residency); +define_one_global_rw(allowed_misses); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &target_residency.attr, + &allowed_misses.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "wheatley", +}; + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; + + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + unsigned long total_idletime, total_usage; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + total_idletime = 0; + total_usage = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + struct cpuidle_device * j_cpuidle_dev = NULL; + struct cpuidle_state * deepidle_state = NULL; + unsigned long long deepidle_time, deepidle_usage; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of wheatley, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + + j_cpuidle_dev = per_cpu(cpuidle_devices, j); + + if (j_cpuidle_dev) + deepidle_state = &j_cpuidle_dev->states[j_cpuidle_dev->state_count - 1]; + + if (deepidle_state) { + deepidle_time = deepidle_state->time; + deepidle_usage = deepidle_state->usage; + + total_idletime += (unsigned long)(deepidle_time - j_dbs_info->prev_idletime); + total_usage += (unsigned long)(deepidle_usage - j_dbs_info->prev_idleusage); + + j_dbs_info->prev_idletime = deepidle_time; + j_dbs_info->prev_idleusage = deepidle_usage; + } + } + + if (total_usage > 0 && total_idletime / total_usage >= dbs_tuners_ins.target_residency) { + if (num_misses > 0) + num_misses--; + } else { + if (num_misses <= dbs_tuners_ins.allowed_misses) + num_misses++; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur + || num_misses <= dbs_tuners_ins.allowed_misses) { + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + } + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kstat_cpu(j).cpustat.nice; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + wheatley_powersave_bias_init_cpu(cpu); + num_misses = 0; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + return cpufreq_register_governor(&cpufreq_gov_wheatley); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_wheatley); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("Ezekeel "); +MODULE_DESCRIPTION("'cpufreq_wheatley' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index f1e5b710..fd01eb57 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -372,6 +372,9 @@ extern struct cpufreq_governor cpufreq_gov_lionheart; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND) extern struct cpufreq_governor cpufreq_gov_intellidemand; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_intellidemand) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY) +extern struct cpufreq_governor cpufreq_gov_wheatley; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_wheatley) #endif From f05e78f956ad6c8414c5b294b2cb323ff208f712 Mon Sep 17 00:00:00 2001 From: Coolexe Date: Sun, 30 Sep 2012 13:48:21 +0530 Subject: [PATCH 031/117] Linaro & -02 Fix --- arch/arm/mach-msm/include/mach/restart.h | 4 ++-- drivers/usb/gadget/f_projector.c | 2 +- lib/bitmap.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-msm/include/mach/restart.h b/arch/arm/mach-msm/include/mach/restart.h index 1f9d91ff..4bd2b010 100644 --- a/arch/arm/mach-msm/include/mach/restart.h +++ b/arch/arm/mach-msm/include/mach/restart.h @@ -46,8 +46,8 @@ enum RESTART_MODE { }; void set_ramdump_reason(const char *msg); -void soc_restart(char mode, const char *msg); -inline void notify_modem_cache_flush_done(void); +/*inline*/ void soc_restart(char mode, const char *msg); +/*inline*/ void notify_modem_cache_flush_done(void); int check_in_panic(void); extern void send_q6_nmi(void); void msm_set_restart_mode(int mode); diff --git a/drivers/usb/gadget/f_projector.c b/drivers/usb/gadget/f_projector.c index 557f9f5a..5d8feda9 100644 --- a/drivers/usb/gadget/f_projector.c +++ b/drivers/usb/gadget/f_projector.c @@ -414,7 +414,7 @@ static void projector_complete_out(struct usb_ep *ep, struct usb_request *req) { struct projector_dev *ctxt = &_projector_dev; unsigned char *data = req->buf; - int mouse_data[3]; + int mouse_data[3] = {0, 0, 0}; int i; DBG("%s: status %d, %d bytes\n", __func__, req->status, req->actual); diff --git a/lib/bitmap.c b/lib/bitmap.c index cf12bb86..62f304f2 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -388,7 +388,7 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ; for (; i >= 0; i -= CHUNKSZ) { - chunkmask = ((1ULL << chunksz) - 1); + chunkmask = ((1 << chunksz) - 1); word = i / BITS_PER_LONG; bit = i % BITS_PER_LONG; val = (maskp[word] >> bit) & chunkmask; From a2ef59c0fc62bb8326e8da14e639b8077940d737 Mon Sep 17 00:00:00 2001 From: Mircea Gherzan Date: Tue, 20 Mar 2012 12:27:46 -0700 Subject: [PATCH 032/117] ARM: net: JIT compiler for packet filters based of Matt Evans's PPC64 implementation. The compiler generates ARM instructions but interworking is supported for Thumb2 kernels. Supports both little and big endian. Unaligned loads are emitted for ARMv6+. Not all the BPF opcodes that deal with ancillary data are supported. The scratch memory of the filter lives on the stack. Hardware integer division is used if it is available. Enabled in the same way as for x86-64 and PPC64: echo 1 > /proc/sys/net/core/bpf_jit_enable A value greater than 1 enables opcode output. Signed-off-by: Mircea Gherzan --- Changes in v7: * fix the intruction generation for LDX_MSH, OR_X, LSH_K, RSH_K and JMP_JA * fix the condition for saving the A register * use fls() instead of the compiler builtin * punt to the interpreter on absolute loads with K < 0 * check for invalid data references * support the NEG opcode * clear X in the prologue based on a context flag * simplify the conditional jumps Changes in v6: * fix the code generation for the ANC_CPU opcode Changes in v5: * replace SEEN_LEN with SEEN_SKB * set ctx->seen when handling some ancillary data opcodes Changes in v4: * first check if the JIT compiler is enabled * fix the code generation for the LDX_MSH opcode Changes in v3: * no longer depend on EABI and !Thumb2 * add BLX "emulation" for ARMv4 without Thumb * use the integer divide instruction on Cortex-A15 * fix the handling of the DIV_K opcode * use a C wrapper for __aeabi_uidiv * fix the generation of the epilogue (non-FP case) Changes in v2: * enable the compiler only for ARMv5+ because of the BLX instruction * use the same comparison for the ARM version checks * use misaligned accesses on ARMv6 * fix the SEEN_MEM * fix the mem_words_used() --- arch/arm/Kconfig | 1 + arch/arm/Makefile | 1 + arch/arm/net/Makefile | 3 + arch/arm/net/bpf_jit_32.c | 913 ++++++++++++++++++++++++++++++++++++++ arch/arm/net/bpf_jit_32.h | 190 ++++++++ 5 files changed, 1108 insertions(+) create mode 100644 arch/arm/net/Makefile create mode 100644 arch/arm/net/bpf_jit_32.c create mode 100644 arch/arm/net/bpf_jit_32.h diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 679a96d5..d97e48d7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -22,6 +22,7 @@ config ARM select HAVE_GENERIC_HARDIRQS select HAVE_SPARSE_IRQ select GENERIC_IRQ_SHOW + select HAVE_BPF_JIT help The ARM series is a line of low-power-consumption RISC chip designs licensed by ARM Ltd and targeted at embedded applications and diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 96981603..89480796 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -252,6 +252,7 @@ core-$(CONFIG_VFP) += arch/arm/vfp/ # If we have a machine-specific directory, then include it in the build. core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ +core-y += arch/arm/net/ core-y += $(machdirs) $(platdirs) drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/ diff --git a/arch/arm/net/Makefile b/arch/arm/net/Makefile new file mode 100644 index 00000000..c2c10841 --- /dev/null +++ b/arch/arm/net/Makefile @@ -0,0 +1,3 @@ +# ARM-specific networking code + +obj-$(CONFIG_BPF_JIT) += bpf_jit_32.o diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c new file mode 100644 index 00000000..c0b88f49 --- /dev/null +++ b/arch/arm/net/bpf_jit_32.c @@ -0,0 +1,913 @@ +/* + * Just-In-Time compiler for BPF filters on 32bit ARM + * + * Copyright (c) 2011 Mircea Gherzan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_jit_32.h" + +/* + * ABI: + * + * r0 scratch register + * r4 BPF register A + * r5 BPF register X + * r6 pointer to the skb + * r7 skb->data + * r8 skb_headlen(skb) + */ + +#define r_scratch ARM_R0 +/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ +#define r_off ARM_R1 +#define r_A ARM_R4 +#define r_X ARM_R5 +#define r_skb ARM_R6 +#define r_skb_data ARM_R7 +#define r_skb_hl ARM_R8 + +#define SCRATCH_SP_OFFSET 0 +#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + (k)) + +#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) +#define SEEN_MEM_WORD(k) (1 << (k)) +#define SEEN_X (1 << BPF_MEMWORDS) +#define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) +#define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) +#define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) + +#define FLAG_NEED_X_RESET (1 << 0) + +struct jit_ctx { + const struct sk_filter *skf; + unsigned idx; + unsigned prologue_bytes; + int ret0_fp_idx; + u32 seen; + u32 flags; + u32 *offsets; + u32 *target; +#if __LINUX_ARM_ARCH__ < 7 + u16 epilogue_bytes; + u16 imm_count; + u32 *imms; +#endif +}; + +//changed to be enabled by default - show-p1984 +int bpf_jit_enable __read_mostly = 1; + +static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) +{ + u8 ret; + int err; + + err = skb_copy_bits(skb, offset, &ret, 1); + + return (u64)err << 32 | ret; +} + +static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) +{ + u16 ret; + int err; + + err = skb_copy_bits(skb, offset, &ret, 2); + + return (u64)err << 32 | ntohs(ret); +} + +static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) +{ + u32 ret; + int err; + + err = skb_copy_bits(skb, offset, &ret, 4); + + return (u64)err << 32 | ntohl(ret); +} + +/* + * Wrapper that handles both OABI and EABI and assures Thumb2 interworking + * (where the assembly routines like __aeabi_uidiv could cause problems). + */ +static u32 jit_udiv(u32 dividend, u32 divisor) +{ + return dividend / divisor; +} + +static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) +{ + if (ctx->target != NULL) + ctx->target[ctx->idx] = inst | (cond << 28); + + ctx->idx++; +} + +/* + * Emit an instruction that will be executed unconditionally. + */ +static inline void emit(u32 inst, struct jit_ctx *ctx) +{ + _emit(ARM_COND_AL, inst, ctx); +} + +static u16 saved_regs(struct jit_ctx *ctx) +{ + u16 ret = 0; + + if ((ctx->skf->len > 1) || + (ctx->skf->insns[0].code == BPF_S_RET_A)) + ret |= 1 << r_A; + +#ifdef CONFIG_FRAME_POINTER + ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); +#else + if (ctx->seen & SEEN_CALL) + ret |= 1 << ARM_LR; +#endif + if (ctx->seen & (SEEN_DATA | SEEN_SKB)) + ret |= 1 << r_skb; + if (ctx->seen & SEEN_DATA) + ret |= (1 << r_skb_data) | (1 << r_skb_hl); + if (ctx->seen & SEEN_X) + ret |= 1 << r_X; + + return ret; +} + +static inline int mem_words_used(struct jit_ctx *ctx) +{ + /* yes, we do waste some stack space IF there are "holes" in the set" */ + return fls(ctx->seen & SEEN_MEM); +} + +static inline bool is_load_to_a(u16 inst) +{ + switch (inst) { + case BPF_S_LD_W_LEN: + case BPF_S_LD_W_ABS: + case BPF_S_LD_H_ABS: + case BPF_S_LD_B_ABS: + case BPF_S_ANC_CPU: + case BPF_S_ANC_IFINDEX: + case BPF_S_ANC_MARK: + case BPF_S_ANC_PROTOCOL: + case BPF_S_ANC_RXHASH: + case BPF_S_ANC_QUEUE: + return true; + default: + return false; + } +} + +static void build_prologue(struct jit_ctx *ctx) +{ + u16 reg_set = saved_regs(ctx); + u16 first_inst = ctx->skf->insns[0].code; + u16 off; + +#ifdef CONFIG_FRAME_POINTER + emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); + emit(ARM_PUSH(reg_set), ctx); + emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); +#else + if (reg_set) + emit(ARM_PUSH(reg_set), ctx); +#endif + + if (ctx->seen & (SEEN_DATA | SEEN_SKB)) + emit(ARM_MOV_R(r_skb, ARM_R0), ctx); + + if (ctx->seen & SEEN_DATA) { + off = offsetof(struct sk_buff, data); + emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); + /* headlen = len - data_len */ + off = offsetof(struct sk_buff, len); + emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); + off = offsetof(struct sk_buff, data_len); + emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); + emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); + } + + if (ctx->flags & FLAG_NEED_X_RESET) + emit(ARM_MOV_I(r_X, 0), ctx); + + /* do not leak kernel data to userspace */ + if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) + emit(ARM_MOV_I(r_A, 0), ctx); + + /* stack space for the BPF_MEM words */ + if (ctx->seen & SEEN_MEM) + emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + u16 reg_set = saved_regs(ctx); + + if (ctx->seen & SEEN_MEM) + emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); + + reg_set &= ~(1 << ARM_LR); + +#ifdef CONFIG_FRAME_POINTER + /* the first instruction of the prologue was: mov ip, sp */ + reg_set &= ~(1 << ARM_IP); + reg_set |= (1 << ARM_SP); + emit(ARM_LDM(ARM_SP, reg_set), ctx); +#else + if (reg_set) { + if (ctx->seen & SEEN_CALL) + reg_set |= 1 << ARM_PC; + emit(ARM_POP(reg_set), ctx); + } + + if (!(ctx->seen & SEEN_CALL)) + emit(ARM_BX(ARM_LR), ctx); +#endif +} + +static int16_t imm8m(u32 x) +{ + u32 rot; + + for (rot = 0; rot < 16; rot++) + if ((x & ~ror32(0xff, 2 * rot)) == 0) + return rol32(x, 2 * rot) | (rot << 8); + + return -1; +} + +#if __LINUX_ARM_ARCH__ < 7 + +static u16 imm_offset(u32 k, struct jit_ctx *ctx) +{ + unsigned i = 0, offset; + u16 imm; + + /* on the "fake" run we just count them (duplicates included) */ + if (ctx->target == NULL) { + ctx->imm_count++; + return 0; + } + + while ((i < ctx->imm_count) && ctx->imms[i]) { + if (ctx->imms[i] == k) + break; + i++; + } + + if (ctx->imms[i] == 0) + ctx->imms[i] = k; + + /* constants go just after the epilogue */ + offset = ctx->offsets[ctx->skf->len]; + offset += ctx->prologue_bytes; + offset += ctx->epilogue_bytes; + offset += i * 4; + + ctx->target[offset / 4] = k; + + /* PC in ARM mode == address of the instruction + 8 */ + imm = offset - (8 + ctx->idx * 4); + + return imm; +} + +#endif /* __LINUX_ARM_ARCH__ */ + +/* + * Move an immediate that's not an imm8m to a core register. + */ +static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ < 7 + emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); +#else + emit(ARM_MOVW(rd, val & 0xffff), ctx); + if (val > 0xffff) + emit(ARM_MOVT(rd, val >> 16), ctx); +#endif +} + +static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) +{ + int imm12 = imm8m(val); + + if (imm12 >= 0) + emit(ARM_MOV_I(rd, imm12), ctx); + else + emit_mov_i_no8m(rd, val, ctx); +} + +#if __LINUX_ARM_ARCH__ < 6 + +static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) +{ + _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); + _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); + _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); + _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); + _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); + _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); + _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); + _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); +} + +static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) +{ + _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); + _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); + _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); +} + +static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) +{ + emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); + emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); + emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); + emit(ARM_LSL_R(r_dst, r_dst, 8), ctx); +} + +#else /* ARMv6+ */ + +static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) +{ + _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); +#ifdef __LITTLE_ENDIAN + _emit(cond, ARM_REV(r_res, r_res), ctx); +#endif +} + +static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) +{ + _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); +#ifdef __LITTLE_ENDIAN + _emit(cond, ARM_REV16(r_res, r_res), ctx); +#endif +} + +static inline void emit_swap16(u8 r_dst __maybe_unused, + u8 r_src __maybe_unused, + struct jit_ctx *ctx __maybe_unused) +{ +#ifdef __LITTLE_ENDIAN + emit(ARM_REV16(r_dst, r_src), ctx); +#endif +} + +#endif /* __LINUX_ARM_ARCH__ < 6 */ + + +/* Compute the immediate value for a PC-relative branch. */ +static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) +{ + u32 imm; + + if (ctx->target == NULL) + return 0; + /* + * BPF allows only forward jumps and the offset of the target is + * still the one computed during the first pass. + */ + imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); + + return imm >> 2; +} + +#define OP_IMM3(op, r1, r2, imm_val, ctx) \ + do { \ + imm12 = imm8m(imm_val); \ + if (imm12 < 0) { \ + emit_mov_i_no8m(r_scratch, imm_val, ctx); \ + emit(op ## _R((r1), (r2), r_scratch), ctx); \ + } else { \ + emit(op ## _I((r1), (r2), imm12), ctx); \ + } \ + } while (0) + +static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) +{ + if (ctx->ret0_fp_idx >= 0) { + _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); + /* NOP to keep the size constant between passes */ + emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); + } else { + _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); + _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); + } +} + +static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ < 5 + emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); + + if (elf_hwcap & HWCAP_THUMB) + emit(ARM_BX(tgt_reg), ctx); + else + emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); +#else + emit(ARM_BLX_R(tgt_reg), ctx); +#endif +} + +static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ == 7 + if (elf_hwcap & HWCAP_IDIVA) { + emit(ARM_UDIV(rd, rm, rn), ctx); + return; + } +#endif + if (rm != ARM_R0) + emit(ARM_MOV_R(ARM_R0, rm), ctx); + if (rn != ARM_R1) + emit(ARM_MOV_R(ARM_R1, rn), ctx); + + ctx->seen |= SEEN_CALL; + emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); + emit_blx_r(ARM_R3, ctx); + + if (rd != ARM_R0) + emit(ARM_MOV_R(rd, ARM_R0), ctx); +} + +static inline void update_on_xread(struct jit_ctx *ctx) +{ + if (!(ctx->seen & SEEN_X)) + ctx->flags |= FLAG_NEED_X_RESET; + + ctx->seen |= SEEN_X; +} + +static int build_body(struct jit_ctx *ctx) +{ + void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; + const struct sk_filter *prog = ctx->skf; + const struct sock_filter *inst; + unsigned i, load_order, off, condt; + int imm12; + u32 k; + + for (i = 0; i < prog->len; i++) { + inst = &(prog->insns[i]); + /* K as an immediate value operand */ + k = inst->k; + + /* compute offsets only in the fake pass */ + if (ctx->target == NULL) + ctx->offsets[i] = ctx->idx * 4; + + switch (inst->code) { + case BPF_S_LD_IMM: + emit_mov_i(r_A, k, ctx); + break; + case BPF_S_LD_W_LEN: + ctx->seen |= SEEN_SKB; + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); + emit(ARM_LDR_I(r_A, r_skb, + offsetof(struct sk_buff, len)), ctx); + break; + case BPF_S_LD_MEM: + /* A = scratch[k] */ + ctx->seen |= SEEN_MEM_WORD(k); + emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); + break; + case BPF_S_LD_W_ABS: + load_order = 2; + goto load; + case BPF_S_LD_H_ABS: + load_order = 1; + goto load; + case BPF_S_LD_B_ABS: + load_order = 0; +load: + /* the interpreter will deal with the negative K */ + if (k < 0) + return -1; + emit_mov_i(r_off, k, ctx); +load_common: + ctx->seen |= SEEN_DATA | SEEN_CALL; + + if (load_order > 0) { + emit(ARM_SUB_I(r_scratch, r_skb_hl, + 1 << load_order), ctx); + emit(ARM_CMP_R(r_scratch, r_off), ctx); + condt = ARM_COND_HS; + } else { + emit(ARM_CMP_R(r_skb_hl, r_off), ctx); + condt = ARM_COND_HI; + } + + _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), + ctx); + + if (load_order == 0) + _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), + ctx); + else if (load_order == 1) + emit_load_be16(condt, r_A, r_scratch, ctx); + else if (load_order == 2) + emit_load_be32(condt, r_A, r_scratch, ctx); + + _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); + + /* the slowpath */ + emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); + emit(ARM_MOV_R(ARM_R0, r_skb), ctx); + /* the offset is already in R1 */ + emit_blx_r(ARM_R3, ctx); + /* check the result of skb_copy_bits */ + emit(ARM_CMP_I(ARM_R1, 0), ctx); + emit_err_ret(ARM_COND_NE, ctx); + emit(ARM_MOV_R(r_A, ARM_R0), ctx); + break; + case BPF_S_LD_W_IND: + load_order = 2; + goto load_ind; + case BPF_S_LD_H_IND: + load_order = 1; + goto load_ind; + case BPF_S_LD_B_IND: + load_order = 0; +load_ind: + OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); + goto load_common; + case BPF_S_LDX_IMM: + ctx->seen |= SEEN_X; + emit_mov_i(r_X, k, ctx); + break; + case BPF_S_LDX_W_LEN: + ctx->seen |= SEEN_X | SEEN_SKB; + emit(ARM_LDR_I(r_X, r_skb, + offsetof(struct sk_buff, len)), ctx); + break; + case BPF_S_LDX_MEM: + ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); + emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); + break; + case BPF_S_LDX_B_MSH: + /* x = ((*(frame + k)) & 0xf) << 2; */ + ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; + /* the interpreter should deal with the negative K */ + if (k < 0) + return -1; + /* offset in r1: we might have to take the slow path */ + emit_mov_i(r_off, k, ctx); + emit(ARM_CMP_R(r_skb_hl, r_off), ctx); + + /* load in r0: common with the slowpath */ + _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, + ARM_R1), ctx); + /* + * emit_mov_i() might generate one or two instructions, + * the same holds for emit_blx_r() + */ + _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); + + emit(ARM_MOV_R(ARM_R0, r_skb), ctx); + /* r_off is r1 */ + emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); + emit_blx_r(ARM_R3, ctx); + + emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); + emit(ARM_LSL_I(r_X, r_X, 2), ctx); + break; + case BPF_S_ST: + ctx->seen |= SEEN_MEM_WORD(k); + emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); + break; + case BPF_S_STX: + update_on_xread(ctx); + ctx->seen |= SEEN_MEM_WORD(k); + emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); + break; + case BPF_S_ALU_ADD_K: + /* A += K */ + OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); + break; + case BPF_S_ALU_ADD_X: + update_on_xread(ctx); + emit(ARM_ADD_R(r_A, r_A, r_X), ctx); + break; + case BPF_S_ALU_SUB_K: + /* A -= K */ + OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); + break; + case BPF_S_ALU_SUB_X: + update_on_xread(ctx); + emit(ARM_SUB_R(r_A, r_A, r_X), ctx); + break; + case BPF_S_ALU_MUL_K: + /* A *= K */ + emit_mov_i(r_scratch, k, ctx); + emit(ARM_MUL(r_A, r_A, r_scratch), ctx); + break; + case BPF_S_ALU_MUL_X: + update_on_xread(ctx); + emit(ARM_MUL(r_A, r_A, r_X), ctx); + break; + case BPF_S_ALU_DIV_K: + /* current k == reciprocal_value(userspace k) */ + emit_mov_i(r_scratch, k, ctx); + /* A = top 32 bits of the product */ + emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx); + break; + case BPF_S_ALU_DIV_X: + update_on_xread(ctx); + emit(ARM_CMP_I(r_X, 0), ctx); + emit_err_ret(ARM_COND_EQ, ctx); + emit_udiv(r_A, r_A, r_X, ctx); + break; + case BPF_S_ALU_OR_K: + /* A |= K */ + OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); + break; + case BPF_S_ALU_OR_X: + update_on_xread(ctx); + emit(ARM_ORR_R(r_A, r_A, r_X), ctx); + break; + case BPF_S_ALU_AND_K: + /* A &= K */ + OP_IMM3(ARM_AND, r_A, r_A, k, ctx); + break; + case BPF_S_ALU_AND_X: + update_on_xread(ctx); + emit(ARM_AND_R(r_A, r_A, r_X), ctx); + break; + case BPF_S_ALU_LSH_K: + if (unlikely(k > 31)) + return -1; + emit(ARM_LSL_I(r_A, r_A, k), ctx); + break; + case BPF_S_ALU_LSH_X: + update_on_xread(ctx); + emit(ARM_LSL_R(r_A, r_A, r_X), ctx); + break; + case BPF_S_ALU_RSH_K: + if (unlikely(k > 31)) + return -1; + emit(ARM_LSR_I(r_A, r_A, k), ctx); + break; + case BPF_S_ALU_RSH_X: + update_on_xread(ctx); + emit(ARM_LSR_R(r_A, r_A, r_X), ctx); + break; + case BPF_S_ALU_NEG: + /* A = -A */ + emit(ARM_RSB_I(r_A, r_A, 0), ctx); + break; + case BPF_S_JMP_JA: + /* pc += K */ + emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); + break; + case BPF_S_JMP_JEQ_K: + /* pc += (A == K) ? pc->jt : pc->jf */ + condt = ARM_COND_EQ; + goto cmp_imm; + case BPF_S_JMP_JGT_K: + /* pc += (A > K) ? pc->jt : pc->jf */ + condt = ARM_COND_HI; + goto cmp_imm; + case BPF_S_JMP_JGE_K: + /* pc += (A >= K) ? pc->jt : pc->jf */ + condt = ARM_COND_HS; +cmp_imm: + imm12 = imm8m(k); + if (imm12 < 0) { + emit_mov_i_no8m(r_scratch, k, ctx); + emit(ARM_CMP_R(r_A, r_scratch), ctx); + } else { + emit(ARM_CMP_I(r_A, imm12), ctx); + } +cond_jump: + if (inst->jt) + _emit(condt, ARM_B(b_imm(i + inst->jt + 1, + ctx)), ctx); + if (inst->jf) + _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, + ctx)), ctx); + break; + case BPF_S_JMP_JEQ_X: + /* pc += (A == X) ? pc->jt : pc->jf */ + condt = ARM_COND_EQ; + goto cmp_x; + case BPF_S_JMP_JGT_X: + /* pc += (A > X) ? pc->jt : pc->jf */ + condt = ARM_COND_HI; + goto cmp_x; + case BPF_S_JMP_JGE_X: + /* pc += (A >= X) ? pc->jt : pc->jf */ + condt = ARM_COND_CS; +cmp_x: + update_on_xread(ctx); + emit(ARM_CMP_R(r_A, r_X), ctx); + goto cond_jump; + case BPF_S_JMP_JSET_K: + /* pc += (A & K) ? pc->jt : pc->jf */ + condt = ARM_COND_NE; + /* not set iff all zeroes iff Z==1 iff EQ */ + + imm12 = imm8m(k); + if (imm12 < 0) { + emit_mov_i_no8m(r_scratch, k, ctx); + emit(ARM_TST_R(r_A, r_scratch), ctx); + } else { + emit(ARM_TST_I(r_A, imm12), ctx); + } + goto cond_jump; + case BPF_S_JMP_JSET_X: + /* pc += (A & X) ? pc->jt : pc->jf */ + update_on_xread(ctx); + condt = ARM_COND_NE; + emit(ARM_TST_R(r_A, r_X), ctx); + goto cond_jump; + case BPF_S_RET_A: + emit(ARM_MOV_R(ARM_R0, r_A), ctx); + goto b_epilogue; + case BPF_S_RET_K: + if ((k == 0) && (ctx->ret0_fp_idx < 0)) + ctx->ret0_fp_idx = i; + emit_mov_i(ARM_R0, k, ctx); +b_epilogue: + if (i != ctx->skf->len - 1) + emit(ARM_B(b_imm(prog->len, ctx)), ctx); + break; + case BPF_S_MISC_TAX: + /* X = A */ + ctx->seen |= SEEN_X; + emit(ARM_MOV_R(r_X, r_A), ctx); + break; + case BPF_S_MISC_TXA: + /* A = X */ + update_on_xread(ctx); + emit(ARM_MOV_R(r_A, r_X), ctx); + break; + case BPF_S_ANC_PROTOCOL: + /* A = ntohs(skb->protocol) */ + ctx->seen |= SEEN_SKB; + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + protocol) != 2); + off = offsetof(struct sk_buff, protocol); + emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); + emit_swap16(r_A, r_scratch, ctx); + break; + case BPF_S_ANC_CPU: + /* r_scratch = current_thread_info() */ + OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); + /* A = current_thread_info()->cpu */ + BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); + off = offsetof(struct thread_info, cpu); + emit(ARM_LDR_I(r_A, r_scratch, off), ctx); + break; + case BPF_S_ANC_IFINDEX: + /* A = skb->dev->ifindex */ + ctx->seen |= SEEN_SKB; + off = offsetof(struct sk_buff, dev); + emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); + + emit(ARM_CMP_I(r_scratch, 0), ctx); + emit_err_ret(ARM_COND_EQ, ctx); + + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + ifindex) != 4); + off = offsetof(struct net_device, ifindex); + emit(ARM_LDR_I(r_A, r_scratch, off), ctx); + break; + case BPF_S_ANC_MARK: + ctx->seen |= SEEN_SKB; + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + off = offsetof(struct sk_buff, mark); + emit(ARM_LDR_I(r_A, r_skb, off), ctx); + break; + case BPF_S_ANC_RXHASH: + ctx->seen |= SEEN_SKB; + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); + off = offsetof(struct sk_buff, rxhash); + emit(ARM_LDR_I(r_A, r_skb, off), ctx); + break; + case BPF_S_ANC_QUEUE: + ctx->seen |= SEEN_SKB; + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + queue_mapping) != 2); + BUILD_BUG_ON(offsetof(struct sk_buff, + queue_mapping) > 0xff); + off = offsetof(struct sk_buff, queue_mapping); + emit(ARM_LDRH_I(r_A, r_skb, off), ctx); + break; + default: + return -1; + } + } + + /* compute offsets only during the first pass */ + if (ctx->target == NULL) + ctx->offsets[i] = ctx->idx * 4; + + return 0; +} + + +void bpf_jit_compile(struct sk_filter *fp) +{ + struct jit_ctx ctx; + unsigned tmp_idx; + unsigned alloc_size; + + if (!bpf_jit_enable) + return; + + memset(&ctx, 0, sizeof(ctx)); + ctx.skf = fp; + ctx.ret0_fp_idx = -1; + + ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1)); + if (ctx.offsets == NULL) + return; + + /* fake pass to fill in the ctx->seen */ + if (unlikely(build_body(&ctx))) + goto out; + + tmp_idx = ctx.idx; + build_prologue(&ctx); + ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; + +#if __LINUX_ARM_ARCH__ < 7 + tmp_idx = ctx.idx; + build_epilogue(&ctx); + ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; + + ctx.idx += ctx.imm_count; + if (ctx.imm_count) { + ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count); + if (ctx.imms == NULL) + goto out; + } +#else + /* there's nothing after the epilogue on ARMv7 */ + build_epilogue(&ctx); +#endif + + alloc_size = 4 * ctx.idx; + ctx.target = module_alloc(max(sizeof(struct work_struct), + alloc_size)); + if (unlikely(ctx.target == NULL)) + goto out; + + ctx.idx = 0; + build_prologue(&ctx); + build_body(&ctx); + build_epilogue(&ctx); + + flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); + +#if __LINUX_ARM_ARCH__ < 7 + if (ctx.imm_count) + kfree(ctx.imms); +#endif + + if (bpf_jit_enable > 1) + print_hex_dump(KERN_INFO, "BPF JIT code: ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx.target, + alloc_size, false); + + fp->bpf_func = (void *)ctx.target; +out: + kfree(ctx.offsets); + return; +} + +static void bpf_jit_free_worker(struct work_struct *work) +{ + module_free(NULL, work); +} + +void bpf_jit_free(struct sk_filter *fp) +{ + struct work_struct *work; + + if (fp->bpf_func != sk_run_filter) { + work = (struct work_struct *)fp->bpf_func; + + INIT_WORK(work, bpf_jit_free_worker); + schedule_work(work); + } +} + diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h new file mode 100644 index 00000000..6c87fbe3 --- /dev/null +++ b/arch/arm/net/bpf_jit_32.h @@ -0,0 +1,190 @@ +/* + * Just-In-Time compiler for BPF filters on 32bit ARM + * + * Copyright (c) 2011 Mircea Gherzan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License. + */ + +#ifndef PFILTER_OPCODES_ARM_H +#define PFILTER_OPCODES_ARM_H + +#define ARM_R0 0 +#define ARM_R1 1 +#define ARM_R2 2 +#define ARM_R3 3 +#define ARM_R4 4 +#define ARM_R5 5 +#define ARM_R6 6 +#define ARM_R7 7 +#define ARM_R8 8 +#define ARM_R9 9 +#define ARM_R10 10 +#define ARM_FP 11 +#define ARM_IP 12 +#define ARM_SP 13 +#define ARM_LR 14 +#define ARM_PC 15 + +#define ARM_COND_EQ 0x0 +#define ARM_COND_NE 0x1 +#define ARM_COND_CS 0x2 +#define ARM_COND_HS ARM_COND_CS +#define ARM_COND_CC 0x3 +#define ARM_COND_LO ARM_COND_CC +#define ARM_COND_MI 0x4 +#define ARM_COND_PL 0x5 +#define ARM_COND_VS 0x6 +#define ARM_COND_VC 0x7 +#define ARM_COND_HI 0x8 +#define ARM_COND_LS 0x9 +#define ARM_COND_GE 0xa +#define ARM_COND_LT 0xb +#define ARM_COND_GT 0xc +#define ARM_COND_LE 0xd +#define ARM_COND_AL 0xe + +/* register shift types */ +#define SRTYPE_LSL 0 +#define SRTYPE_LSR 1 +#define SRTYPE_ASR 2 +#define SRTYPE_ROR 3 + +#define ARM_INST_ADD_R 0x00800000 +#define ARM_INST_ADD_I 0x02800000 + +#define ARM_INST_AND_R 0x00000000 +#define ARM_INST_AND_I 0x02000000 + +#define ARM_INST_BIC_R 0x01c00000 +#define ARM_INST_BIC_I 0x03c00000 + +#define ARM_INST_B 0x0a000000 +#define ARM_INST_BX 0x012FFF10 +#define ARM_INST_BLX_R 0x012fff30 + +#define ARM_INST_CMP_R 0x01500000 +#define ARM_INST_CMP_I 0x03500000 + +#define ARM_INST_LDRB_I 0x05d00000 +#define ARM_INST_LDRB_R 0x07d00000 +#define ARM_INST_LDRH_I 0x01d000b0 +#define ARM_INST_LDR_I 0x05900000 + +#define ARM_INST_LDM 0x08900000 + +#define ARM_INST_LSL_I 0x01a00000 +#define ARM_INST_LSL_R 0x01a00010 + +#define ARM_INST_LSR_I 0x01a00020 +#define ARM_INST_LSR_R 0x01a00030 + +#define ARM_INST_MOV_R 0x01a00000 +#define ARM_INST_MOV_I 0x03a00000 +#define ARM_INST_MOVW 0x03000000 +#define ARM_INST_MOVT 0x03400000 + +#define ARM_INST_MUL 0x00000090 + +#define ARM_INST_POP 0x08bd0000 +#define ARM_INST_PUSH 0x092d0000 + +#define ARM_INST_ORR_R 0x01800000 +#define ARM_INST_ORR_I 0x03800000 + +#define ARM_INST_REV 0x06bf0f30 +#define ARM_INST_REV16 0x06bf0fb0 + +#define ARM_INST_RSB_I 0x02600000 + +#define ARM_INST_SUB_R 0x00400000 +#define ARM_INST_SUB_I 0x02400000 + +#define ARM_INST_STR_I 0x05800000 + +#define ARM_INST_TST_R 0x01100000 +#define ARM_INST_TST_I 0x03100000 + +#define ARM_INST_UDIV 0x0730f010 + +#define ARM_INST_UMULL 0x00800090 + +/* register */ +#define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm)) +/* immediate */ +#define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm)) + +#define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm) +#define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm) + +#define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) +#define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) + +#define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm) +#define ARM_BIC_I(rd, rn, imm) _AL3_I(ARM_INST_BIC, rd, rn, imm) + +#define ARM_B(imm24) (ARM_INST_B | ((imm24) & 0xffffff)) +#define ARM_BX(rm) (ARM_INST_BX | (rm)) +#define ARM_BLX_R(rm) (ARM_INST_BLX_R | (rm)) + +#define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm) +#define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm) + +#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ + | (off)) +#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ + | (off)) +#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ + | (rm)) +#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \ + | (((off) & 0xf0) << 4) | ((off) & 0xf)) + +#define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) + +#define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8) +#define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7) + +#define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8) +#define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7) + +#define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm) +#define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm) + +#define ARM_MOVW(rd, imm) \ + (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) + +#define ARM_MOVT(rd, imm) \ + (ARM_INST_MOVT | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) + +#define ARM_MUL(rd, rm, rn) (ARM_INST_MUL | (rd) << 16 | (rm) << 8 | (rn)) + +#define ARM_POP(regs) (ARM_INST_POP | (regs)) +#define ARM_PUSH(regs) (ARM_INST_PUSH | (regs)) + +#define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm) +#define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm) +#define ARM_ORR_S(rd, rn, rm, type, rs) \ + (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7) + +#define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm)) +#define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm)) + +#define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm) + +#define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm) +#define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm) + +#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ + | (off)) + +#define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) +#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) + +#define ARM_UDIV(rd, rn, rm) (ARM_INST_UDIV | (rd) << 16 | (rn) | (rm) << 8) + +#define ARM_UMULL(rd_lo, rd_hi, rn, rm) (ARM_INST_UMULL | (rd_hi) << 16 \ + | (rd_lo) << 12 | (rm) << 8 | rn) + +#endif /* PFILTER_OPCODES_ARM_H */ From e0449766ef01a99905d4533f579e90787eb49b3b Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Thu, 17 Jan 2013 09:13:31 -0800 Subject: [PATCH 033/117] Make RNG read/write wakeup threshold configurable Conflicts: arch/arm/configs/vigor_defconfig --- drivers/char/Kconfig | 10 ++++++++++ drivers/char/random.c | 7 +++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index fc18d6fb..1fc7f18c 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -6,6 +6,16 @@ menu "Character devices" source "drivers/tty/Kconfig" +config RANDOM_READ_WAKEUP + int "RNG Read Wakeup Threshold" + default 64 + help + +config RANDOM_WRITE_WAKEUP + int "RNG Write Wakeup Threshold" + default 128 + help + config DEVMEM bool "Memory device driver" default y diff --git a/drivers/char/random.c b/drivers/char/random.c index fceac955..2c91ada9 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -283,14 +283,17 @@ * The minimum number of bits of entropy before we wake up a read on * /dev/random. Should be enough to do a significant reseed. */ -static int random_read_wakeup_thresh = 64; + +static int random_read_wakeup_thresh = CONFIG_RANDOM_READ_WAKEUP; + /* * If the entropy count falls under this number of bits, then we * should wake up processes which are selecting or polling on write * access to /dev/random. */ -static int random_write_wakeup_thresh = 128; + +static int random_write_wakeup_thresh = CONFIG_RANDOM_WRITE_WAKEUP; /* * When the input pool goes over trickle_thresh, start dropping most From 17524d6d1f07e797e36f778e0efdf7d9666f4ff7 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 22 Jan 2013 13:27:44 -0500 Subject: [PATCH 034/117] Increase RNG thresholds Double stock RNG thresholds --- arch/arm/configs/vigor_aosp_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 1789c52a..d5a71a4c 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -2871,6 +2871,8 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_QCEDEV is not set # CONFIG_CRYPTO_DEV_OTA_CRYPTO is not set # CONFIG_BINARY_PRINTF is not set +CONFIG_RANDOM_READ_WAKEUP=128 +CONFIG_RANDOM_WRITE_WAKEUP=256 # # Library routines From 05b50dbef3ae86af1557bce204cd4e458176622b Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 22 Jan 2013 13:58:36 -0500 Subject: [PATCH 035/117] Update version --- scripts/mkcompile_h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index bbb89e8c..1ca1adef 100644 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -74,7 +74,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" echo \#define LINUX_COMPILE_BY \"`echo shrike1978`\" - echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD_13.01.16`\" + echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD_13.01.23`\" echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\" ) > .tmpcompile From dde90552c02187a01f1d4013b58c84752f2015cd Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Mon, 21 Jan 2013 00:05:35 -0500 Subject: [PATCH 036/117] [PATCH] ARM: makefile: work around toolchain bug in recent versions of binutils Recent upstream versions of binutils fail to assembler compressed/head.S when passed the -march=all option: http://lists.gnu.org/archive/html/bug-binutils/2011-04/msg00162.html The recommended workaround from the tools folks is not to pass the option, and instead let the assembler deduce the CPU type based on the features used by the code. Signed-off-by: Will Deacon --- arch/arm/boot/compressed/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 2cd81c8a..36e58a00 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -117,7 +117,6 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif ccflags-y := -fpic -fno-builtin -I$(obj) -asflags-y := -Wa,-march=all # Supply kernel BSS size to the decompressor via a linker symbol. SIZEBIN := $(if $(shell which $(CROSS_COMPILE)size),$(CROSS_COMPILE)size,size) From 263e610529024ab7dae37b0979ddd1b6a700f758 Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Mon, 21 Jan 2013 12:50:31 -0500 Subject: [PATCH 037/117] Netfilter Socket fix Courtesy of Coolexe More info found here https://github.com/Coolexe/shooter-ics-stock/blob/31f5bc3d7644a95c61a9b717e885e7f0afcaf332/net/netfilter/xt_socket.c Also merge fixes to correct build --- net/netfilter/xt_socket.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index ddf5e050..848ce106 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -108,9 +108,9 @@ xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par) const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp = NULL; struct sock *sk; - __be32 daddr, saddr; - __be16 dport, sport; - u8 protocol; + __be32 daddr = 0, saddr = 0; + __be16 dport = 0, sport = 0; + u8 protocol = 0; #ifdef XT_SOCKET_HAVE_CONNTRACK struct nf_conn const *ct; enum ip_conntrack_info ctinfo; @@ -271,8 +271,8 @@ xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par) struct ipv6hdr *iph = ipv6_hdr(skb); struct udphdr _hdr, *hp = NULL; struct sock *sk; - struct in6_addr *daddr, *saddr; - __be16 dport, sport; + struct in6_addr *daddr = NULL, *saddr = NULL; + __be16 dport = 0, sport = 0; int thoff, tproto; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); From 373657979ec522b605f8a19743b8118ad67ba0e7 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Thu, 24 Jan 2013 14:21:06 -0500 Subject: [PATCH 038/117] Disable CFQ No one should be using CFQ on a flash device --- arch/arm/configs/vigor_aosp_defconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index d5a71a4c..d663bbf6 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -166,13 +166,13 @@ CONFIG_LBDAF=y # CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y +# CONFIG_IOSCHED_CFQ is not set CONFIG_IOSCHED_ROW=y CONFIG_DEFAULT_ROW=y # CONFIG_DEFAULT_DEADLINE is not set # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_DEFAULT_IOSCHED="row" # CONFIG_INLINE_SPIN_TRYLOCK is not set # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set # CONFIG_INLINE_SPIN_LOCK is not set From 6b457295f411d1e3c45cd7fd0980de9e480d92ae Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Fri, 25 Jan 2013 08:11:58 -0500 Subject: [PATCH 039/117] Add SIO scheduler --- arch/arm/configs/vigor_aosp_defconfig | 2 + block/Kconfig.iosched | 14 + block/Makefile | 1 + block/sio-iosched.c | 399 ++++++++++++++++++++++++++ 4 files changed, 416 insertions(+) create mode 100644 block/sio-iosched.c diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index d663bbf6..069fd218 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -168,10 +168,12 @@ CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_DEADLINE=y # CONFIG_IOSCHED_CFQ is not set CONFIG_IOSCHED_ROW=y +CONFIG_IOSCHED_SIO=y CONFIG_DEFAULT_ROW=y # CONFIG_DEFAULT_DEADLINE is not set # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set CONFIG_DEFAULT_IOSCHED="row" # CONFIG_INLINE_SPIN_TRYLOCK is not set # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 19c873ea..3f9b9d25 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -46,6 +46,16 @@ config IOSCHED_CFQ Note: If BLK_CGROUP=m, then CFQ can be built only as module. +config IOSCHED_SIO + tristate "Simple I/O scheduler" + default y + ---help--- + The Simple I/O scheduler is an extremely simple scheduler, + based on noop and deadline, that relies on deadlines to + ensure fairness. The algorithm does not do any sorting but + basic merging, trying to keep a minimum overhead. It is aimed + mainly for aleatory access devices (eg: flash devices). + config CFQ_GROUP_IOSCHED bool "CFQ Group Scheduling support" depends on IOSCHED_CFQ && BLK_CGROUP @@ -79,6 +89,9 @@ choice config DEFAULT_NOOP bool "No-op" + config DEFAULT_SIO + bool "SIO" if IOSCHED_SIO=y + endchoice config DEFAULT_IOSCHED @@ -87,6 +100,7 @@ config DEFAULT_IOSCHED default "row" if DEFAULT_ROW default "cfq" if DEFAULT_CFQ default "noop" if DEFAULT_NOOP + default "sio" if DEFAULT_SIO endmenu diff --git a/block/Makefile b/block/Makefile index cdf7125b..3db01b11 100644 --- a/block/Makefile +++ b/block/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_IOSCHED_ROW) += row-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o +obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o diff --git a/block/sio-iosched.c b/block/sio-iosched.c new file mode 100644 index 00000000..c52a67c5 --- /dev/null +++ b/block/sio-iosched.c @@ -0,0 +1,399 @@ +/* + * Simple IO scheduler + * Based on Noop, Deadline and V(R) IO schedulers. + * + * Copyright (C) 2012 Miguel Boton + * + * + * This algorithm does not do any kind of sorting, as it is aimed for + * aleatory access devices, but it does some basic merging. We try to + * keep minimum overhead to achieve low latency. + * + * Asynchronous and synchronous requests are not treated separately, but + * we relay on deadlines to ensure fairness. + * + */ +#include +#include +#include +#include +#include +#include + +enum { ASYNC, SYNC }; + +/* Tunables */ +static const int sync_read_expire = HZ / 2; /* max time before a sync read is submitted. */ +static const int sync_write_expire = 2 * HZ; /* max time before a sync write is submitted. */ + +static const int async_read_expire = 4 * HZ; /* ditto for async, these limits are SOFT! */ +static const int async_write_expire = 16 * HZ; /* ditto for async, these limits are SOFT! */ + +static const int writes_starved = 2; /* max times reads can starve a write */ +static const int fifo_batch = 8; /* # of sequential requests treated as one + by the above parameters. For throughput. */ + +/* Elevator data */ +struct sio_data { + /* Request queues */ + struct list_head fifo_list[2][2]; + + /* Attributes */ + unsigned int batched; + unsigned int starved; + + /* Settings */ + int fifo_expire[2][2]; + int fifo_batch; + int writes_starved; +}; + +static void +sio_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * If next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { + if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) { + list_move(&rq->queuelist, &next->queuelist); + rq_set_fifo_time(rq, rq_fifo_time(next)); + } + } + + /* Delete next request */ + rq_fifo_clear(next); +} + +static void +sio_add_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + /* + * Add request to the proper fifo list and set its + * expire time. + */ + rq_set_fifo_time(rq, jiffies + sd->fifo_expire[sync][data_dir]); + list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) +static int +sio_queue_empty(struct request_queue *q) +{ + struct sio_data *sd = q->elevator->elevator_data; + + /* Check if fifo lists are empty */ + return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) && + list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]); +} +#endif + +static struct request * +sio_expired_request(struct sio_data *sd, int sync, int data_dir) +{ + struct list_head *list = &sd->fifo_list[sync][data_dir]; + struct request *rq; + + if (list_empty(list)) + return NULL; + + /* Retrieve request */ + rq = rq_entry_fifo(list->next); + + /* Request has expired */ + if (time_after(jiffies, rq_fifo_time(rq))) + return rq; + + return NULL; +} + +static struct request * +sio_choose_expired_request(struct sio_data *sd) +{ + struct request *rq; + + /* + * Check expired requests. + * Asynchronous requests have priority over synchronous. + * Write requests have priority over read. + */ + rq = sio_expired_request(sd, ASYNC, WRITE); + if (rq) + return rq; + rq = sio_expired_request(sd, ASYNC, READ); + if (rq) + return rq; + + rq = sio_expired_request(sd, SYNC, WRITE); + if (rq) + return rq; + rq = sio_expired_request(sd, SYNC, READ); + if (rq) + return rq; + + return NULL; +} + +static struct request * +sio_choose_request(struct sio_data *sd, int data_dir) +{ + struct list_head *sync = sd->fifo_list[SYNC]; + struct list_head *async = sd->fifo_list[ASYNC]; + + /* + * Retrieve request from available fifo list. + * Synchronous requests have priority over asynchronous. + * Read requests have priority over write. + */ + if (!list_empty(&sync[data_dir])) + return rq_entry_fifo(sync[data_dir].next); + if (!list_empty(&async[data_dir])) + return rq_entry_fifo(async[data_dir].next); + + if (!list_empty(&sync[!data_dir])) + return rq_entry_fifo(sync[!data_dir].next); + if (!list_empty(&async[!data_dir])) + return rq_entry_fifo(async[!data_dir].next); + + return NULL; +} + +static inline void +sio_dispatch_request(struct sio_data *sd, struct request *rq) +{ + /* + * Remove the request from the fifo list + * and dispatch it. + */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + sd->batched++; + + if (rq_data_dir(rq)) + sd->starved = 0; + else + sd->starved++; +} + +static int +sio_dispatch_requests(struct request_queue *q, int force) +{ + struct sio_data *sd = q->elevator->elevator_data; + struct request *rq = NULL; + int data_dir = READ; + + /* + * Retrieve any expired request after a batch of + * sequential requests. + */ + if (sd->batched > sd->fifo_batch) { + sd->batched = 0; + rq = sio_choose_expired_request(sd); + } + + /* Retrieve request */ + if (!rq) { + if (sd->starved > sd->writes_starved) + data_dir = WRITE; + + rq = sio_choose_request(sd, data_dir); + if (!rq) + return 0; + } + + /* Dispatch request */ + sio_dispatch_request(sd, rq); + + return 1; +} + +static struct request * +sio_former_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir]) + return NULL; + + /* Return former request */ + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +sio_latter_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.next == &sd->fifo_list[sync][data_dir]) + return NULL; + + /* Return latter request */ + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static void * +sio_init_queue(struct request_queue *q) +{ + struct sio_data *sd; + + /* Allocate structure */ + sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node); + if (!sd) + return NULL; + + /* Initialize fifo lists */ + INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]); + INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]); + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]); + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]); + + /* Initialize data */ + sd->batched = 0; + sd->fifo_expire[SYNC][READ] = sync_read_expire; + sd->fifo_expire[SYNC][WRITE] = sync_write_expire; + sd->fifo_expire[ASYNC][READ] = async_read_expire; + sd->fifo_expire[ASYNC][WRITE] = async_write_expire; + sd->fifo_batch = fifo_batch; + + return sd; +} + +static void +sio_exit_queue(struct elevator_queue *e) +{ + struct sio_data *sd = e->elevator_data; + + BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ])); + BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE])); + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ])); + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE])); + + /* Free structure */ + kfree(sd); +} + +/* + * sysfs code + */ + +static ssize_t +sio_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +sio_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return sio_var_show(__data, (page)); \ +} +SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1); +SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1); +SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1); +SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1); +SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0); +SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data; \ + int ret = sio_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0); +STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \ + sio_##name##_store) + +static struct elv_fs_entry sio_attrs[] = { + DD_ATTR(sync_read_expire), + DD_ATTR(sync_write_expire), + DD_ATTR(async_read_expire), + DD_ATTR(async_write_expire), + DD_ATTR(fifo_batch), + DD_ATTR(writes_starved), + __ATTR_NULL +}; + +static struct elevator_type iosched_sio = { + .ops = { + .elevator_merge_req_fn = sio_merged_requests, + .elevator_dispatch_fn = sio_dispatch_requests, + .elevator_add_req_fn = sio_add_request, +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) + .elevator_queue_empty_fn = sio_queue_empty, +#endif + .elevator_former_req_fn = sio_former_request, + .elevator_latter_req_fn = sio_latter_request, + .elevator_init_fn = sio_init_queue, + .elevator_exit_fn = sio_exit_queue, + }, + + .elevator_attrs = sio_attrs, + .elevator_name = "sio", + .elevator_owner = THIS_MODULE, +}; + +static int __init sio_init(void) +{ + /* Register elevator */ + elv_register(&iosched_sio); + + return 0; +} + +static void __exit sio_exit(void) +{ + /* Unregister elevator */ + elv_unregister(&iosched_sio); +} + +module_init(sio_init); +module_exit(sio_exit); + +MODULE_AUTHOR("Miguel Boton"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple IO scheduler"); +MODULE_VERSION("0.2"); From 7489239c6a61c1c32edd4378c5f2aab628ee4565 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Fri, 25 Jan 2013 08:39:42 -0500 Subject: [PATCH 040/117] Update version --- Makefile | 2 +- scripts/mkcompile_h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 14739be2..338d6231 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 0 SUBLEVEL = 53 -EXTRAVERSION = +EXTRAVERSION = Ermahgerd-13.01.25 NAME = Sneaky Weasel # *DOCUMENTATION* diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 1ca1adef..c84352b9 100644 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -74,7 +74,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" echo \#define LINUX_COMPILE_BY \"`echo shrike1978`\" - echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD_13.01.23`\" + echo \#define LINUX_COMPILE_HOST \"`echo ERMAHGERD`\" echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\" ) > .tmpcompile From 8af76d2d4665b667d15086b6e304a63f7c012ce5 Mon Sep 17 00:00:00 2001 From: Brandon Berhent Date: Mon, 28 Jan 2013 08:54:58 -0500 Subject: [PATCH 041/117] Add Zen IO Scheduler --- arch/arm/configs/vigor_aosp_defconfig | 2 + block/Kconfig.iosched | 11 + block/Makefile | 1 + block/zen-iosched.c | 277 ++++++++++++++++++++++++++ 4 files changed, 291 insertions(+) create mode 100644 block/zen-iosched.c diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 069fd218..20638451 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -169,11 +169,13 @@ CONFIG_IOSCHED_DEADLINE=y # CONFIG_IOSCHED_CFQ is not set CONFIG_IOSCHED_ROW=y CONFIG_IOSCHED_SIO=y +CONFIG_IOSCHED_ZEN=y CONFIG_DEFAULT_ROW=y # CONFIG_DEFAULT_DEADLINE is not set # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set # CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set CONFIG_DEFAULT_IOSCHED="row" # CONFIG_INLINE_SPIN_TRYLOCK is not set # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 3f9b9d25..ad765f29 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -56,6 +56,13 @@ config IOSCHED_SIO basic merging, trying to keep a minimum overhead. It is aimed mainly for aleatory access devices (eg: flash devices). +config IOSCHED_ZEN + tristate "Zen I/O scheduler" + default y + ---help--- + FCFS, dispatches are back-inserted, deadlines ensure fairness. + Should work best with devices where there is no travel delay. + config CFQ_GROUP_IOSCHED bool "CFQ Group Scheduling support" depends on IOSCHED_CFQ && BLK_CGROUP @@ -92,6 +99,9 @@ choice config DEFAULT_SIO bool "SIO" if IOSCHED_SIO=y + config DEFAULT_ZEN + bool "ZEN" if IOSCHED_ZEN=y + endchoice config DEFAULT_IOSCHED @@ -101,6 +111,7 @@ config DEFAULT_IOSCHED default "cfq" if DEFAULT_CFQ default "noop" if DEFAULT_NOOP default "sio" if DEFAULT_SIO + default "zen" if DEFAULT_ZEN endmenu diff --git a/block/Makefile b/block/Makefile index 3db01b11..10f794c2 100644 --- a/block/Makefile +++ b/block/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_IOSCHED_ROW) += row-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o +obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o diff --git a/block/zen-iosched.c b/block/zen-iosched.c new file mode 100644 index 00000000..77145de8 --- /dev/null +++ b/block/zen-iosched.c @@ -0,0 +1,277 @@ +/* + * Zen IO scheduler + * Primarily based on Noop, deadline, and SIO IO schedulers. + * + * Copyright (C) 2012 Brandon Berhent + * + * FCFS, dispatches are back-inserted, deadlines ensure fairness. + * Should work best with devices where there is no travel delay. + */ +#include +#include +#include +#include +#include +#include + +enum zen_data_dir { ASYNC, SYNC }; + +static const int sync_expire = HZ / 4; /* max time before a sync is submitted. */ +static const int async_expire = 2 * HZ; /* ditto for async, these limits are SOFT! */ +static const int fifo_batch = 1; + +struct zen_data { + /* Runtime Data */ + /* Requests are only present on fifo_list */ + struct list_head fifo_list[2]; + + unsigned int batching; /* number of sequential requests made */ + + /* tunables */ + int fifo_expire[2]; + int fifo_batch; +}; + +static inline struct zen_data * +zen_get_data(struct request_queue *q) { + return q->elevator->elevator_data; +} + +static void zen_dispatch(struct zen_data *, struct request *); + +static void +zen_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * if next expires before rq, assign its expire time to arq + * and move into next position (next will be deleted) in fifo + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { + if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) { + list_move(&rq->queuelist, &next->queuelist); + rq_set_fifo_time(rq, rq_fifo_time(next)); + } + } + + /* next request is gone */ + rq_fifo_clear(next); +} + +static void zen_add_request(struct request_queue *q, struct request *rq) +{ + struct zen_data *zdata = zen_get_data(q); + const int dir = rq_data_dir(rq); + + if (zdata->fifo_expire[dir]) { + rq_set_fifo_time(rq, jiffies + zdata->fifo_expire[dir]); + list_add_tail(&rq->queuelist, &zdata->fifo_list[dir]); + } +} + +static void zen_dispatch(struct zen_data *zdata, struct request *rq) +{ + /* Remove request from list and dispatch it */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + /* Increment # of sequential requests */ + zdata->batching++; +} + +/* + * get the first expired request in direction ddir + */ +static struct request * +zen_expired_request(struct zen_data *zdata, int ddir) +{ + struct request *rq; + + if (list_empty(&zdata->fifo_list[ddir])) + return NULL; + + rq = rq_entry_fifo(zdata->fifo_list[ddir].next); + if (time_after(jiffies, rq_fifo_time(rq))) + return rq; + + return NULL; +} + +/* + * zen_check_fifo returns 0 if there are no expired requests on the fifo, + * otherwise it returns the next expired request + */ +static struct request * +zen_check_fifo(struct zen_data *zdata) +{ + struct request *rq_sync = zen_expired_request(zdata, SYNC); + struct request *rq_async = zen_expired_request(zdata, ASYNC); + + if (rq_async && rq_sync) { + if (time_after(rq_fifo_time(rq_async), rq_fifo_time(rq_sync))) + return rq_sync; + } else if (rq_sync) { + return rq_sync; + } else if (rq_async) { + return rq_async; + } + + return 0; +} + +static struct request * +zen_choose_request(struct zen_data *zdata) +{ + /* + * Retrieve request from available fifo list. + * Synchronous requests have priority over asynchronous. + */ + if (!list_empty(&zdata->fifo_list[SYNC])) + return rq_entry_fifo(zdata->fifo_list[SYNC].next); + if (!list_empty(&zdata->fifo_list[ASYNC])) + return rq_entry_fifo(zdata->fifo_list[ASYNC].next); + + return NULL; +} + +static int zen_dispatch_requests(struct request_queue *q, int force) +{ + struct zen_data *zdata = zen_get_data(q); + struct request *rq = NULL; + + /* Check for and issue expired requests */ + if (zdata->batching > zdata->fifo_batch) { + zdata->batching = 0; + rq = zen_check_fifo(zdata); + } + + if (!rq) { + rq = zen_choose_request(zdata); + if (!rq) + return 0; + } + + zen_dispatch(zdata, rq); + + return 1; +} + +static void *zen_init_queue(struct request_queue *q) +{ + struct zen_data *zdata; + + zdata = kmalloc_node(sizeof(*zdata), GFP_KERNEL, q->node); + if (!zdata) + return NULL; + INIT_LIST_HEAD(&zdata->fifo_list[SYNC]); + INIT_LIST_HEAD(&zdata->fifo_list[ASYNC]); + zdata->fifo_expire[SYNC] = sync_expire; + zdata->fifo_expire[ASYNC] = async_expire; + zdata->fifo_batch = fifo_batch; + return zdata; +} + +static void zen_exit_queue(struct elevator_queue *e) +{ + struct zen_data *zdata = e->elevator_data; + + BUG_ON(!list_empty(&zdata->fifo_list[SYNC])); + BUG_ON(!list_empty(&zdata->fifo_list[ASYNC])); + kfree(zdata); +} + +/* Sysfs */ +static ssize_t +zen_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +zen_var_store(int *var, const char *page, size_t count) +{ + *var = simple_strtol(page, NULL, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct zen_data *zdata = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return zen_var_show(__data, (page)); \ +} +SHOW_FUNCTION(zen_sync_expire_show, zdata->fifo_expire[SYNC], 1); +SHOW_FUNCTION(zen_async_expire_show, zdata->fifo_expire[ASYNC], 1); +SHOW_FUNCTION(zen_fifo_batch_show, zdata->fifo_batch, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct zen_data *zdata = e->elevator_data; \ + int __data; \ + int ret = zen_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(zen_sync_expire_store, &zdata->fifo_expire[SYNC], 0, INT_MAX, 1); +STORE_FUNCTION(zen_async_expire_store, &zdata->fifo_expire[ASYNC], 0, INT_MAX, 1); +STORE_FUNCTION(zen_fifo_batch_store, &zdata->fifo_batch, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, zen_##name##_show, \ + zen_##name##_store) + +static struct elv_fs_entry zen_attrs[] = { + DD_ATTR(sync_expire), + DD_ATTR(async_expire), + DD_ATTR(fifo_batch), + __ATTR_NULL +}; + +static struct elevator_type iosched_zen = { + .ops = { + .elevator_merge_req_fn = zen_merged_requests, + .elevator_dispatch_fn = zen_dispatch_requests, + .elevator_add_req_fn = zen_add_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_fn = zen_init_queue, + .elevator_exit_fn = zen_exit_queue, + }, + .elevator_attrs = zen_attrs, + .elevator_name = "zen", + .elevator_owner = THIS_MODULE, +}; + +static int __init zen_init(void) +{ + elv_register(&iosched_zen); + + return 0; +} + +static void __exit zen_exit(void) +{ + elv_unregister(&iosched_zen); +} + +module_init(zen_init); +module_exit(zen_exit); + + +MODULE_AUTHOR("Brandon Berhent"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zen IO scheduler"); +MODULE_VERSION("1.0"); From c57c43f82ebbeb59b0c23f0fe6d9efb11dd8a90e Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 28 Jan 2013 09:23:22 -0500 Subject: [PATCH 042/117] Remove powersave governor --- arch/arm/configs/vigor_aosp_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 20638451..499e5317 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -644,7 +644,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART=y # CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_ONDEMAND_2_PHASE=y From ad17cf8fd057da90668c0be7c7c073235af15c5a Mon Sep 17 00:00:00 2001 From: Ezekeel Date: Thu, 5 Jan 2012 01:20:49 +0100 Subject: [PATCH 043/117] Added optimized CRC32 algorithm. --- Documentation/00-INDEX | 2 + Documentation/crc32.txt | 183 ++++ arch/arm/configs/vigor_aosp_defconfig | 5 + crypto/Kconfig | 1 + crypto/crc32c.c | 94 +- include/linux/crc32.h | 2 + lib/Kconfig | 54 +- lib/crc32.c | 1279 ++++++++++++++++++------- lib/crc32defs.h | 56 +- lib/gen_crc32table.c | 81 +- 10 files changed, 1308 insertions(+), 449 deletions(-) create mode 100644 Documentation/crc32.txt diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index 1f89424c..11a60b34 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@ -104,6 +104,8 @@ cpuidle/ - info on CPU_IDLE, CPU idle state management subsystem. cputopology.txt - documentation on how CPU topology info is exported via sysfs. +crc32.txt + - brief tutorial on CRC computation cris/ - directory with info about Linux on CRIS architecture. crypto/ diff --git a/Documentation/crc32.txt b/Documentation/crc32.txt new file mode 100644 index 00000000..3d74ba45 --- /dev/null +++ b/Documentation/crc32.txt @@ -0,0 +1,183 @@ +A brief CRC tutorial. + +A CRC is a long-division remainder. You add the CRC to the message, +and the whole thing (message+CRC) is a multiple of the given +CRC polynomial. To check the CRC, you can either check that the +CRC matches the recomputed value, *or* you can check that the +remainder computed on the message+CRC is 0. This latter approach +is used by a lot of hardware implementations, and is why so many +protocols put the end-of-frame flag after the CRC. + +It's actually the same long division you learned in school, except that +- We're working in binary, so the digits are only 0 and 1, and +- When dividing polynomials, there are no carries. Rather than add and + subtract, we just xor. Thus, we tend to get a bit sloppy about + the difference between adding and subtracting. + +Like all division, the remainder is always smaller than the divisor. +To produce a 32-bit CRC, the divisor is actually a 33-bit CRC polynomial. +Since it's 33 bits long, bit 32 is always going to be set, so usually the +CRC is written in hex with the most significant bit omitted. (If you're +familiar with the IEEE 754 floating-point format, it's the same idea.) + +Note that a CRC is computed over a string of *bits*, so you have +to decide on the endianness of the bits within each byte. To get +the best error-detecting properties, this should correspond to the +order they're actually sent. For example, standard RS-232 serial is +little-endian; the most significant bit (sometimes used for parity) +is sent last. And when appending a CRC word to a message, you should +do it in the right order, matching the endianness. + +Just like with ordinary division, you proceed one digit (bit) at a time. +Each step of the division, division, you take one more digit (bit) of the +dividend and append it to the current remainder. Then you figure out the +appropriate multiple of the divisor to subtract to being the remainder +back into range. In binary, this is easy - it has to be either 0 or 1, +and to make the XOR cancel, it's just a copy of bit 32 of the remainder. + +When computing a CRC, we don't care about the quotient, so we can +throw the quotient bit away, but subtract the appropriate multiple of +the polynomial from the remainder and we're back to where we started, +ready to process the next bit. + +A big-endian CRC written this way would be coded like: +for (i = 0; i < input_bits; i++) { + multiple = remainder & 0x80000000 ? CRCPOLY : 0; + remainder = (remainder << 1 | next_input_bit()) ^ multiple; +} + +Notice how, to get at bit 32 of the shifted remainder, we look +at bit 31 of the remainder *before* shifting it. + +But also notice how the next_input_bit() bits we're shifting into +the remainder don't actually affect any decision-making until +32 bits later. Thus, the first 32 cycles of this are pretty boring. +Also, to add the CRC to a message, we need a 32-bit-long hole for it at +the end, so we have to add 32 extra cycles shifting in zeros at the +end of every message, + +These details lead to a standard trick: rearrange merging in the +next_input_bit() until the moment it's needed. Then the first 32 cycles +can be precomputed, and merging in the final 32 zero bits to make room +for the CRC can be skipped entirely. This changes the code to: + +for (i = 0; i < input_bits; i++) { + remainder ^= next_input_bit() << 31; + multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + remainder = (remainder << 1) ^ multiple; +} + +With this optimization, the little-endian code is particularly simple: +for (i = 0; i < input_bits; i++) { + remainder ^= next_input_bit(); + multiple = (remainder & 1) ? CRCPOLY : 0; + remainder = (remainder >> 1) ^ multiple; +} + +The most significant coefficient of the remainder polynomial is stored +in the least significant bit of the binary "remainder" variable. +The other details of endianness have been hidden in CRCPOLY (which must +be bit-reversed) and next_input_bit(). + +As long as next_input_bit is returning the bits in a sensible order, we don't +*have* to wait until the last possible moment to merge in additional bits. +We can do it 8 bits at a time rather than 1 bit at a time: +for (i = 0; i < input_bytes; i++) { + remainder ^= next_input_byte() << 24; + for (j = 0; j < 8; j++) { + multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + remainder = (remainder << 1) ^ multiple; + } +} + +Or in little-endian: +for (i = 0; i < input_bytes; i++) { + remainder ^= next_input_byte(); + for (j = 0; j < 8; j++) { + multiple = (remainder & 1) ? CRCPOLY : 0; + remainder = (remainder >> 1) ^ multiple; + } +} + +If the input is a multiple of 32 bits, you can even XOR in a 32-bit +word at a time and increase the inner loop count to 32. + +You can also mix and match the two loop styles, for example doing the +bulk of a message byte-at-a-time and adding bit-at-a-time processing +for any fractional bytes at the end. + +To reduce the number of conditional branches, software commonly uses +the byte-at-a-time table method, popularized by Dilip V. Sarwate, +"Computation of Cyclic Redundancy Checks via Table Look-Up", Comm. ACM +v.31 no.8 (August 1998) p. 1008-1013. + +Here, rather than just shifting one bit of the remainder to decide +in the correct multiple to subtract, we can shift a byte at a time. +This produces a 40-bit (rather than a 33-bit) intermediate remainder, +and the correct multiple of the polynomial to subtract is found using +a 256-entry lookup table indexed by the high 8 bits. + +(The table entries are simply the CRC-32 of the given one-byte messages.) + +When space is more constrained, smaller tables can be used, e.g. two +4-bit shifts followed by a lookup in a 16-entry table. + +It is not practical to process much more than 8 bits at a time using this +technique, because tables larger than 256 entries use too much memory and, +more importantly, too much of the L1 cache. + +To get higher software performance, a "slicing" technique can be used. +See "High Octane CRC Generation with the Intel Slicing-by-8 Algorithm", +ftp://download.intel.com/technology/comms/perfnet/download/slicing-by-8.pdf + +This does not change the number of table lookups, but does increase +the parallelism. With the classic Sarwate algorithm, each table lookup +must be completed before the index of the next can be computed. + +A "slicing by 2" technique would shift the remainder 16 bits at a time, +producing a 48-bit intermediate remainder. Rather than doing a single +lookup in a 65536-entry table, the two high bytes are looked up in +two different 256-entry tables. Each contains the remainder required +to cancel out the corresponding byte. The tables are different because the +polynomials to cancel are different. One has non-zero coefficients from +x^32 to x^39, while the other goes from x^40 to x^47. + +Since modern processors can handle many parallel memory operations, this +takes barely longer than a single table look-up and thus performs almost +twice as fast as the basic Sarwate algorithm. + +This can be extended to "slicing by 4" using 4 256-entry tables. +Each step, 32 bits of data is fetched, XORed with the CRC, and the result +broken into bytes and looked up in the tables. Because the 32-bit shift +leaves the low-order bits of the intermediate remainder zero, the +final CRC is simply the XOR of the 4 table look-ups. + +But this still enforces sequential execution: a second group of table +look-ups cannot begin until the previous groups 4 table look-ups have all +been completed. Thus, the processor's load/store unit is sometimes idle. + +To make maximum use of the processor, "slicing by 8" performs 8 look-ups +in parallel. Each step, the 32-bit CRC is shifted 64 bits and XORed +with 64 bits of input data. What is important to note is that 4 of +those 8 bytes are simply copies of the input data; they do not depend +on the previous CRC at all. Thus, those 4 table look-ups may commence +immediately, without waiting for the previous loop iteration. + +By always having 4 loads in flight, a modern superscalar processor can +be kept busy and make full use of its L1 cache. + +Two more details about CRC implementation in the real world: + +Normally, appending zero bits to a message which is already a multiple +of a polynomial produces a larger multiple of that polynomial. Thus, +a basic CRC will not detect appended zero bits (or bytes). To enable +a CRC to detect this condition, it's common to invert the CRC before +appending it. This makes the remainder of the message+crc come out not +as zero, but some fixed non-zero value. (The CRC of the inversion +pattern, 0xffffffff.) + +The same problem applies to zero bits prepended to the message, and a +similar solution is used. Instead of starting the CRC computation with +a remainder of 0, an initial remainder of all ones is used. As long as +you start the same way on decoding, it doesn't make a difference. + diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 499e5317..fc488b2f 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -2887,6 +2887,11 @@ CONFIG_CRC16=y # CONFIG_CRC_T10DIF is not set # CONFIG_CRC_ITU_T is not set CONFIG_CRC32=y +CONFIG_CRC32_SELFTEST=y +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set # CONFIG_CRC7 is not set CONFIG_LIBCRC32C=y CONFIG_ZLIB_INFLATE=y diff --git a/crypto/Kconfig b/crypto/Kconfig index 87b22ca9..9bedcf36 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -302,6 +302,7 @@ comment "Digest" config CRYPTO_CRC32C tristate "CRC32c CRC algorithm" select CRYPTO_HASH + select CRC32 help Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used by iSCSI for header and data digests and by others. diff --git a/crypto/crc32c.c b/crypto/crc32c.c index de9e55c2..e0a300ce 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c @@ -40,6 +40,7 @@ #include #include #include +#include #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 @@ -52,95 +53,6 @@ struct chksum_desc_ctx { u32 crc; }; -/* - * This is the CRC-32C table - * Generated with: - * width = 32 bits - * poly = 0x1EDC6F41 - * reflect input bytes = true - * reflect output bytes = true - */ - -static const u32 crc32c_table[256] = { - 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, - 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, - 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL, - 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L, - 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL, - 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, - 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, - 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL, - 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL, - 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L, - 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, - 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, - 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L, - 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL, - 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL, - 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, - 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, - 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L, - 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L, - 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L, - 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, - 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, - 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L, - 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L, - 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L, - 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, - 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, - 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L, - 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L, - 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L, - 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, - 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, - 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL, - 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L, - 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L, - 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, - 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, - 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL, - 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL, - 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L, - 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, - 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, - 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL, - 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L, - 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL, - 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, - 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, - 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL, - 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L, - 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL, - 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, - 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, - 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL, - 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L, - 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L, - 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, - 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, - 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L, - 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L, - 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL, - 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, - 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, - 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL, - 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L -}; - -/* - * Steps through buffer one byte at at time, calculates reflected - * crc using table. - */ - -static u32 crc32c(u32 crc, const u8 *data, unsigned int length) -{ - while (length--) - crc = crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); - - return crc; -} - /* * Steps through buffer one byte at at time, calculates reflected * crc using table. @@ -179,7 +91,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - ctx->crc = crc32c(ctx->crc, data, length); + ctx->crc = __crc32c_le(ctx->crc, data, length); return 0; } @@ -193,7 +105,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out) static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { - *(__le32 *)out = ~cpu_to_le32(crc32c(*crcp, data, len)); + *(__le32 *)out = ~cpu_to_le32(__crc32c_le(*crcp, data, len)); return 0; } diff --git a/include/linux/crc32.h b/include/linux/crc32.h index 391a259b..68267b64 100644 --- a/include/linux/crc32.h +++ b/include/linux/crc32.h @@ -11,6 +11,8 @@ extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len); extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); +extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len); + #define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) /* diff --git a/lib/Kconfig b/lib/Kconfig index 830181cc..8991c57c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -51,14 +51,60 @@ config CRC_ITU_T functions require M here. config CRC32 - tristate "CRC32 functions" + tristate "CRC32/CRC32c functions" default y select BITREVERSE help This option is provided for the case where no in-kernel-tree - modules require CRC32 functions, but a module built outside the - kernel tree does. Such modules that use library CRC32 functions - require M here. + modules require CRC32/CRC32c functions, but a module built outside + the kernel tree does. Such modules that use library CRC32/CRC32c + functions require M here. + +config CRC32_SELFTEST + bool "CRC32 perform self test on init" + default n + depends on CRC32 + help + This option enables the CRC32 library functions to perform a + self test on initialization. The self test computes crc32_le + and crc32_be over byte strings with random alignment and length + and computes the total elapsed time and number of bytes processed. + +choice + prompt "CRC32 implementation" + depends on CRC32 + default CRC32_SLICEBY8 + +config CRC32_SLICEBY8 + bool "Slice by 8 bytes" + help + Calculate checksum 8 bytes at a time with a clever slicing algorithm. + This is the fastest algorithm, but comes with a 8KiB lookup table. + Most modern processors have enough cache that this shouldn't be + a problem. + + If you don't know which to choose, choose this one. + +config CRC32_SLICEBY4 + bool "Slice by 4 bytes" + help + Calculate checksum 4 bytes at a time with a clever slicing algorithm. + This is a bit slower than slice by 8, but has a smaller 4KiB lookup + table. + +config CRC32_SARWATE + bool "Sarwate's Algorithm (one byte at a time)" + help + Calculate checksum a byte at a time using Sarwate's algorithm. This + is not particularly fast, but has a small 256 byte lookup table. + +config CRC32_BIT + bool "Classic Algorithm (one bit at a time)" + help + Calculate checksum one bit at a time. This is VERY slow, but has + no lookup table. This is provided as a debugging option. + +endchoice config CRC7 tristate "CRC7 functions" diff --git a/lib/crc32.c b/lib/crc32.c index 4855995f..382fa767 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -1,4 +1,8 @@ /* + * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin + * cleaned up code to current version of sparse and added the slicing-by-8 + * algorithm to the closely similar existing slicing-by-4 algorithm. + * * Oct 15, 2000 Matt Domsch * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! * Code was from the public domain, copyright abandoned. Code was @@ -20,51 +24,58 @@ * Version 2. See the file COPYING for more details. */ +/* see: Documentation/crc32.txt for a description of algorithms */ + #include -#include #include -#include #include -#include -#include #include "crc32defs.h" -#if CRC_LE_BITS == 8 -# define tole(x) __constant_cpu_to_le32(x) + +#if CRC_LE_BITS > 8 +# define tole(x) (__force u32) __constant_cpu_to_le32(x) #else # define tole(x) (x) #endif -#if CRC_BE_BITS == 8 -# define tobe(x) __constant_cpu_to_be32(x) +#if CRC_BE_BITS > 8 +# define tobe(x) (__force u32) __constant_cpu_to_be32(x) #else # define tobe(x) (x) #endif + #include "crc32table.h" MODULE_AUTHOR("Matt Domsch "); -MODULE_DESCRIPTION("Ethernet CRC32 calculations"); +MODULE_DESCRIPTION("Various CRC32 calculations"); MODULE_LICENSE("GPL"); -#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 +#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 +/* implements slicing-by-4 or slicing-by-8 algorithm */ static inline u32 crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) { # ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8) -# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \ - tab[2][(crc >> 8) & 255] ^ \ - tab[1][(crc >> 16) & 255] ^ \ - tab[0][(crc >> 24) & 255] +# define DO_CRC(x) (crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)) +# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ + t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) +# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ + t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) # else -# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8) -# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \ - tab[1][(crc >> 8) & 255] ^ \ - tab[2][(crc >> 16) & 255] ^ \ - tab[3][(crc >> 24) & 255] +# define DO_CRC(x) (crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)) +# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ + t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) +# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ + t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) # endif const u32 *b; - size_t rem_len; + size_t rem_len; +# ifdef CONFIG_X86 + size_t i; +# endif + const u32 *t0 = tab[0], *t1 = tab[1], *t2 = tab[2], *t3 = tab[3]; + const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; + u32 q; /* Align it */ if (unlikely((long)buf & 3 && len)) { @@ -72,27 +83,51 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) DO_CRC(*buf++); } while ((--len) && ((long)buf)&3); } + +# if CRC_LE_BITS == 32 rem_len = len & 3; - /* load data 32 bits wide, xor data 32 bits wide. */ len = len >> 2; +# else + rem_len = len & 7; + len = len >> 3; +# endif + b = (const u32 *)buf; +# ifdef CONFIG_X86 + --b; + for (i = 0; i < len; i++) { +# else for (--b; len; --len) { - crc ^= *++b; /* use pre increment for speed */ - DO_CRC4; +# endif + q = crc ^ *++b; /* use pre increment for speed */ +# if CRC_LE_BITS == 32 + crc = DO_CRC4; +# else + crc = DO_CRC8; + q = *++b; + crc ^= DO_CRC4; +# endif } len = rem_len; /* And the last few bytes */ if (len) { u8 *p = (u8 *)(b + 1) - 1; +# ifdef CONFIG_X86 + for (i = 0; i < len; i++) + DO_CRC(*++p); /* use pre increment for speed */ +# else do { DO_CRC(*++p); /* use pre increment for speed */ } while (--len); +# endif } return crc; #undef DO_CRC #undef DO_CRC4 +#undef DO_CRC8 } #endif + /** * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for @@ -100,53 +135,56 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) * @p: pointer to buffer over which CRC is run * @len: length of buffer @p */ -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); - -#if CRC_LE_BITS == 1 -/* - * In fact, the table-based code will work in this case, but it can be - * simplified by inlining the table in ?: form. - */ - -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, + size_t len, const u32 (*tab)[256], + u32 polynomial) { +#if CRC_LE_BITS == 1 int i; while (len--) { crc ^= *p++; for (i = 0; i < 8; i++) - crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); + } +# elif CRC_LE_BITS == 2 + while (len--) { + crc ^= *p++; + crc = (crc >> 2) ^ tab[0][crc & 3]; + crc = (crc >> 2) ^ tab[0][crc & 3]; + crc = (crc >> 2) ^ tab[0][crc & 3]; + crc = (crc >> 2) ^ tab[0][crc & 3]; } - return crc; -} -#else /* Table-based approach */ - -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) -{ -# if CRC_LE_BITS == 8 - const u32 (*tab)[] = crc32table_le; - - crc = __cpu_to_le32(crc); - crc = crc32_body(crc, p, len, tab); - return __le32_to_cpu(crc); # elif CRC_LE_BITS == 4 while (len--) { crc ^= *p++; - crc = (crc >> 4) ^ crc32table_le[crc & 15]; - crc = (crc >> 4) ^ crc32table_le[crc & 15]; + crc = (crc >> 4) ^ tab[0][crc & 15]; + crc = (crc >> 4) ^ tab[0][crc & 15]; } - return crc; -# elif CRC_LE_BITS == 2 +# elif CRC_LE_BITS == 8 + /* aka Sarwate algorithm */ while (len--) { crc ^= *p++; - crc = (crc >> 2) ^ crc32table_le[crc & 3]; - crc = (crc >> 2) ^ crc32table_le[crc & 3]; - crc = (crc >> 2) ^ crc32table_le[crc & 3]; - crc = (crc >> 2) ^ crc32table_le[crc & 3]; + crc = (crc >> 8) ^ tab[0][crc & 255]; } +# else + crc = (__force u32) __cpu_to_le32(crc); + crc = crc32_body(crc, p, len, tab); + crc = __le32_to_cpu((__force __le32)crc); +#endif return crc; -# endif } -#endif + +u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +{ + return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE); +} +EXPORT_SYMBOL(crc32_le); + +u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) +{ + return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); +} +EXPORT_SYMBOL(__crc32c_le); /** * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 @@ -155,317 +193,906 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) * @p: pointer to buffer over which CRC is run * @len: length of buffer @p */ -u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); - -#if CRC_BE_BITS == 1 -/* - * In fact, the table-based code will work in this case, but it can be - * simplified by inlining the table in ?: form. - */ - -u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) +static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, + size_t len, const u32 (*tab)[256], + u32 polynomial) { +#if CRC_BE_BITS == 1 int i; while (len--) { crc ^= *p++ << 24; for (i = 0; i < 8; i++) crc = - (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : + (crc << 1) ^ ((crc & 0x80000000) ? polynomial : 0); } - return crc; -} - -#else /* Table-based approach */ -u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) -{ -# if CRC_BE_BITS == 8 - const u32 (*tab)[] = crc32table_be; - - crc = __cpu_to_be32(crc); - crc = crc32_body(crc, p, len, tab); - return __be32_to_cpu(crc); +# elif CRC_BE_BITS == 2 + while (len--) { + crc ^= *p++ << 24; + crc = (crc << 2) ^ tab[0][crc >> 30]; + crc = (crc << 2) ^ tab[0][crc >> 30]; + crc = (crc << 2) ^ tab[0][crc >> 30]; + crc = (crc << 2) ^ tab[0][crc >> 30]; + } # elif CRC_BE_BITS == 4 while (len--) { crc ^= *p++ << 24; - crc = (crc << 4) ^ crc32table_be[crc >> 28]; - crc = (crc << 4) ^ crc32table_be[crc >> 28]; + crc = (crc << 4) ^ tab[0][crc >> 28]; + crc = (crc << 4) ^ tab[0][crc >> 28]; } - return crc; -# elif CRC_BE_BITS == 2 +# elif CRC_BE_BITS == 8 while (len--) { crc ^= *p++ << 24; - crc = (crc << 2) ^ crc32table_be[crc >> 30]; - crc = (crc << 2) ^ crc32table_be[crc >> 30]; - crc = (crc << 2) ^ crc32table_be[crc >> 30]; - crc = (crc << 2) ^ crc32table_be[crc >> 30]; + crc = (crc << 8) ^ tab[0][crc >> 24]; } - return crc; +# else + crc = (__force u32) __cpu_to_be32(crc); + crc = crc32_body(crc, p, len, tab); + crc = __be32_to_cpu((__force __be32)crc); # endif + return crc; } -#endif -EXPORT_SYMBOL(crc32_le); +u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) +{ + return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE); +} EXPORT_SYMBOL(crc32_be); -/* - * A brief CRC tutorial. - * - * A CRC is a long-division remainder. You add the CRC to the message, - * and the whole thing (message+CRC) is a multiple of the given - * CRC polynomial. To check the CRC, you can either check that the - * CRC matches the recomputed value, *or* you can check that the - * remainder computed on the message+CRC is 0. This latter approach - * is used by a lot of hardware implementations, and is why so many - * protocols put the end-of-frame flag after the CRC. - * - * It's actually the same long division you learned in school, except that - * - We're working in binary, so the digits are only 0 and 1, and - * - When dividing polynomials, there are no carries. Rather than add and - * subtract, we just xor. Thus, we tend to get a bit sloppy about - * the difference between adding and subtracting. - * - * A 32-bit CRC polynomial is actually 33 bits long. But since it's - * 33 bits long, bit 32 is always going to be set, so usually the CRC - * is written in hex with the most significant bit omitted. (If you're - * familiar with the IEEE 754 floating-point format, it's the same idea.) - * - * Note that a CRC is computed over a string of *bits*, so you have - * to decide on the endianness of the bits within each byte. To get - * the best error-detecting properties, this should correspond to the - * order they're actually sent. For example, standard RS-232 serial is - * little-endian; the most significant bit (sometimes used for parity) - * is sent last. And when appending a CRC word to a message, you should - * do it in the right order, matching the endianness. - * - * Just like with ordinary division, the remainder is always smaller than - * the divisor (the CRC polynomial) you're dividing by. Each step of the - * division, you take one more digit (bit) of the dividend and append it - * to the current remainder. Then you figure out the appropriate multiple - * of the divisor to subtract to being the remainder back into range. - * In binary, it's easy - it has to be either 0 or 1, and to make the - * XOR cancel, it's just a copy of bit 32 of the remainder. - * - * When computing a CRC, we don't care about the quotient, so we can - * throw the quotient bit away, but subtract the appropriate multiple of - * the polynomial from the remainder and we're back to where we started, - * ready to process the next bit. - * - * A big-endian CRC written this way would be coded like: - * for (i = 0; i < input_bits; i++) { - * multiple = remainder & 0x80000000 ? CRCPOLY : 0; - * remainder = (remainder << 1 | next_input_bit()) ^ multiple; - * } - * Notice how, to get at bit 32 of the shifted remainder, we look - * at bit 31 of the remainder *before* shifting it. - * - * But also notice how the next_input_bit() bits we're shifting into - * the remainder don't actually affect any decision-making until - * 32 bits later. Thus, the first 32 cycles of this are pretty boring. - * Also, to add the CRC to a message, we need a 32-bit-long hole for it at - * the end, so we have to add 32 extra cycles shifting in zeros at the - * end of every message, - * - * So the standard trick is to rearrage merging in the next_input_bit() - * until the moment it's needed. Then the first 32 cycles can be precomputed, - * and merging in the final 32 zero bits to make room for the CRC can be - * skipped entirely. - * This changes the code to: - * for (i = 0; i < input_bits; i++) { - * remainder ^= next_input_bit() << 31; - * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; - * remainder = (remainder << 1) ^ multiple; - * } - * With this optimization, the little-endian code is simpler: - * for (i = 0; i < input_bits; i++) { - * remainder ^= next_input_bit(); - * multiple = (remainder & 1) ? CRCPOLY : 0; - * remainder = (remainder >> 1) ^ multiple; - * } - * - * Note that the other details of endianness have been hidden in CRCPOLY - * (which must be bit-reversed) and next_input_bit(). - * - * However, as long as next_input_bit is returning the bits in a sensible - * order, we can actually do the merging 8 or more bits at a time rather - * than one bit at a time: - * for (i = 0; i < input_bytes; i++) { - * remainder ^= next_input_byte() << 24; - * for (j = 0; j < 8; j++) { - * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; - * remainder = (remainder << 1) ^ multiple; - * } - * } - * Or in little-endian: - * for (i = 0; i < input_bytes; i++) { - * remainder ^= next_input_byte(); - * for (j = 0; j < 8; j++) { - * multiple = (remainder & 1) ? CRCPOLY : 0; - * remainder = (remainder << 1) ^ multiple; - * } - * } - * If the input is a multiple of 32 bits, you can even XOR in a 32-bit - * word at a time and increase the inner loop count to 32. - * - * You can also mix and match the two loop styles, for example doing the - * bulk of a message byte-at-a-time and adding bit-at-a-time processing - * for any fractional bytes at the end. - * - * The only remaining optimization is to the byte-at-a-time table method. - * Here, rather than just shifting one bit of the remainder to decide - * in the correct multiple to subtract, we can shift a byte at a time. - * This produces a 40-bit (rather than a 33-bit) intermediate remainder, - * but again the multiple of the polynomial to subtract depends only on - * the high bits, the high 8 bits in this case. - * - * The multiple we need in that case is the low 32 bits of a 40-bit - * value whose high 8 bits are given, and which is a multiple of the - * generator polynomial. This is simply the CRC-32 of the given - * one-byte message. - * - * Two more details: normally, appending zero bits to a message which - * is already a multiple of a polynomial produces a larger multiple of that - * polynomial. To enable a CRC to detect this condition, it's common to - * invert the CRC before appending it. This makes the remainder of the - * message+crc come out not as zero, but some fixed non-zero value. - * - * The same problem applies to zero bits prepended to the message, and - * a similar solution is used. Instead of starting with a remainder of - * 0, an initial remainder of all ones is used. As long as you start - * the same way on decoding, it doesn't make a difference. - */ +#ifdef CONFIG_CRC32_SELFTEST -#ifdef UNITTEST - -#include -#include +/* 4096 random bytes */ +static u8 __attribute__((__aligned__(8))) test_buf[] = +{ + 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, + 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, + 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60, + 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c, + 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4, + 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a, + 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a, + 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4, + 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9, + 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4, + 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca, + 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61, + 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e, + 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a, + 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f, + 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd, + 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c, + 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88, + 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53, + 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f, + 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4, + 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74, + 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60, + 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09, + 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07, + 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1, + 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f, + 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2, + 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0, + 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95, + 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22, + 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93, + 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86, + 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d, + 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40, + 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b, + 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35, + 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40, + 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63, + 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b, + 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8, + 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72, + 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86, + 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff, + 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed, + 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c, + 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed, + 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30, + 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99, + 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4, + 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80, + 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37, + 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04, + 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e, + 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd, + 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c, + 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09, + 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb, + 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b, + 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53, + 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b, + 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f, + 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff, + 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40, + 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6, + 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb, + 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73, + 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f, + 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4, + 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66, + 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1, + 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80, + 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f, + 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5, + 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7, + 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce, + 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff, + 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48, + 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26, + 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72, + 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88, + 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9, + 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc, + 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8, + 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09, + 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8, + 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c, + 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48, + 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d, + 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f, + 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae, + 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97, + 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8, + 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75, + 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc, + 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27, + 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf, + 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7, + 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0, + 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8, + 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c, + 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44, + 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54, + 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38, + 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f, + 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b, + 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7, + 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef, + 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e, + 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c, + 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c, + 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0, + 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37, + 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf, + 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e, + 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4, + 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60, + 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe, + 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61, + 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3, + 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe, + 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40, + 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec, + 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f, + 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7, + 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79, + 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c, + 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f, + 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21, + 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9, + 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30, + 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b, + 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee, + 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6, + 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3, + 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09, + 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd, + 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f, + 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9, + 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc, + 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59, + 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60, + 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5, + 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1, + 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8, + 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9, + 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab, + 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80, + 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01, + 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e, + 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d, + 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35, + 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38, + 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a, + 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac, + 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca, + 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57, + 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed, + 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20, + 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef, + 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c, + 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a, + 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64, + 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4, + 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54, + 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16, + 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26, + 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc, + 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87, + 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60, + 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d, + 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54, + 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13, + 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59, + 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb, + 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f, + 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15, + 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78, + 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93, + 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e, + 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31, + 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1, + 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37, + 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15, + 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78, + 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f, + 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31, + 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f, + 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc, + 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9, + 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3, + 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe, + 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4, + 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24, + 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1, + 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85, + 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8, + 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09, + 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c, + 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46, + 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5, + 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39, + 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2, + 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc, + 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35, + 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde, + 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80, + 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15, + 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63, + 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58, + 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d, + 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf, + 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12, + 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c, + 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b, + 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1, + 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6, + 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73, + 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9, + 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e, + 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22, + 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb, + 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2, + 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c, + 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c, + 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93, + 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f, + 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38, + 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57, + 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03, + 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90, + 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8, + 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4, + 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36, + 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7, + 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47, + 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46, + 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73, + 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72, + 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23, + 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a, + 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58, + 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f, + 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96, + 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9, + 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b, + 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c, + 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef, + 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3, + 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4, + 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f, + 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17, + 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18, + 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8, + 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98, + 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42, + 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97, + 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97, + 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1, + 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77, + 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb, + 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c, + 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb, + 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56, + 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04, + 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48, + 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe, + 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d, + 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97, + 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8, + 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f, + 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e, + 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca, + 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44, + 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f, + 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6, + 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63, + 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19, + 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58, + 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b, + 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28, + 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf, + 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6, + 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3, + 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe, + 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f, + 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf, + 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9, + 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e, + 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7, + 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70, + 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0, + 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d, + 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4, + 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5, + 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85, + 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc, + 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f, + 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56, + 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb, + 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b, + 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5, + 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03, + 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23, + 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03, + 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87, + 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4, + 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43, + 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11, + 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40, + 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59, + 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9, + 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30, + 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd, + 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45, + 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83, + 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b, + 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5, + 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3, + 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84, + 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8, + 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34, + 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b, + 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31, + 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b, + 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40, + 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b, + 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e, + 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38, + 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb, + 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2, + 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c, + 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1, + 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc, + 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec, + 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34, + 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95, + 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92, + 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f, + 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c, + 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b, + 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c, + 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5, + 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb, + 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4, + 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9, + 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4, + 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41, + 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a, + 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8, + 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06, + 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62, + 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47, + 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4, + 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00, + 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67, + 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81, + 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0, + 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10, + 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79, + 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19, + 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8, + 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1, + 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83, + 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86, + 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55, + 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66, + 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0, + 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49, + 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea, + 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24, + 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e, + 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88, + 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87, + 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34, + 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f, + 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a, + 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a, + 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93, + 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37, + 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38, + 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4, + 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48, + 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65, + 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09, + 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e, + 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5, + 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b, + 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4, + 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e, + 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d, + 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0, + 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5, + 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48, + 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e, + 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f, + 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a, + 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d, + 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14, + 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69, + 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53, + 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56, + 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48, + 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4, + 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26, + 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e, + 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40, + 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7, + 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62, + 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe, + 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf, + 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2, + 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d, + 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32, + 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa, + 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45, + 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04, + 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33, + 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad, + 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4, + 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c, + 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b, + 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36, + 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa, + 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9, + 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28, + 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b, + 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03, + 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d, + 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff, + 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39, + 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b, + 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2, + 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34, + 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe, + 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0, + 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27, + 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86, + 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90, + 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03, + 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb, + 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57, + 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9, + 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5, + 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16, + 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5, + 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a, + 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d, + 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0, + 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f, + 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48, + 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1, + 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09, + 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51, + 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b, + 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf, + 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe, + 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad, + 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e, + 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57, + 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f, + 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef, + 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8, + 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69, + 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d, + 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59, + 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9, + 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d, + 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea, + 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56, + 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4, + 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8, + 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78, + 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f, + 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4, + 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91, + 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f, + 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c, + 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57, + 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4, + 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23, + 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17, + 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66, + 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39, + 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36, + 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00, + 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7, + 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60, + 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c, + 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e, + 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7, + 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a, + 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d, + 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37, + 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82, + 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8, + 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e, + 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85, + 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98, + 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22, + 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7, + 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49, + 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33, + 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc, + 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8, + 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f, + 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3, + 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98, + 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c, + 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6, + 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc, + 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d, +}; -#if 0 /*Not used at present */ -static void -buf_dump(char const *prefix, unsigned char const *buf, size_t len) +/* 100 test cases */ +static struct crc_test { + u32 crc; /* random starting crc */ + u32 start; /* random 6 bit offset in buf */ + u32 length; /* random 11 bit length of test */ + u32 crc_le; /* expected crc32_le result */ + u32 crc_be; /* expected crc32_be result */ + u32 crc32c_le; /* expected crc32c_le result */ +} test[] = { - fputs(prefix, stdout); - while (len--) - printf(" %02x", *buf++); - putchar('\n'); + {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, + 0xf6e93d6c}, + {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, + 0x0fe92aca}, + {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, + 0x52e1ebb8}, + {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, + 0x0798af9a}, + {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, + 0x18eb3152}, + {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, + 0xd00d08c7}, + {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, + 0x8ba966bc}, + {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, + 0x11d694a2}, + {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, + 0x6ab3208d}, + {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, + 0xba4603c5}, + {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, + 0xe6071c6f}, + {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, + 0x179ec30a}, + {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, + 0x0903beb8}, + {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, + 0x6a7cb4fa}, + {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, + 0xdb535801}, + {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, + 0x92bed597}, + {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, + 0x192a3f1b}, + {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, + 0xccbaec1a}, + {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, + 0x7eabae4d}, + {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, + 0x28c72982}, + {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, + 0xc3cd4d18}, + {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, + 0xbca8f0e7}, + {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, + 0x713f60b3}, + {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, + 0xebd08fd5}, + {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, + 0x64406c59}, + {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, + 0x7421890e}, + {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, + 0xe9347603}, + {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, + 0x1bef9060}, + {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, + 0x34720072}, + {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, + 0x48310f59}, + {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, + 0x783a4213}, + {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, + 0x9e8efd41}, + {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, + 0xfc3d34a5}, + {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, + 0x17a52ae2}, + {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, + 0x886d935a}, + {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, + 0xeaaeaeb2}, + {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, + 0x8e900a4b}, + {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, + 0xd74662b1}, + {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, + 0xd26752ba}, + {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, + 0x8b1fcd62}, + {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, + 0xf54342fe}, + {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, + 0x5b95b988}, + {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, + 0x2e1176be}, + {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, + 0x66120546}, + {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, + 0xf256a5cc}, + {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, + 0x4af1dd69}, + {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, + 0x56f0a04a}, + {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, + 0x74f6b6b2}, + {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, + 0x085951fd}, + {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, + 0xc65387eb}, + {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, + 0x1ca9257b}, + {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, + 0xfd196d76}, + {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, + 0x5ef88339}, + {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, + 0x2c3714d9}, + {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, + 0x58576548}, + {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, + 0xfd7c57de}, + {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, + 0xd5fedd59}, + {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, + 0x1cc3b17b}, + {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, + 0x270eed73}, + {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, + 0x91ecbb11}, + {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, + 0x05ed8d0c}, + {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, + 0x0b09ad5b}, + {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, + 0xf8d511fb}, + {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, + 0x5ad832cc}, + {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, + 0x1214d196}, + {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, + 0x5747218a}, + {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, + 0xde8f14de}, + {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, + 0x3563b7b9}, + {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, + 0x071475d0}, + {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, + 0x54c79d60}, + {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, + 0x4c53eee6}, + {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, + 0x10137a3c}, + {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, + 0xaa9d6c73}, + {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, + 0xb63d23e7}, + {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, + 0x7f53e9cf}, + {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, + 0x13c1cd83}, + {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, + 0x49ff5867}, + {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, + 0x8467f211}, + {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, + 0x3f9683b2}, + {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, + 0x76a3f874}, + {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, + 0x863b702f}, + {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, + 0xdc6c58ff}, + {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, + 0x0622cc95}, + {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, + 0xe85605cd}, + {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, + 0x31da5f06}, + {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, + 0xa1f2e784}, + {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, + 0xb07cc616}, + {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, + 0xbf943b6c}, + {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, + 0x2c01af1c}, + {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, + 0x0fe5f56d}, + {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, + 0xf8943b2d}, + {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, + 0xe4d89272}, + {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, + 0x7c2f6bbb}, + {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, + 0xabbf388b}, + {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, + 0x1dca1f4e}, + {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, + 0x5c170e23}, + {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, + 0xc0e9d672}, + {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, + 0xc18bdc86}, + {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, + 0xa874fcdd}, + {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, + 0x9dc0bb48}, +}; -} -#endif +#include -static void bytereverse(unsigned char *buf, size_t len) +static int __init crc32c_test(void) { - while (len--) { - unsigned char x = bitrev8(*buf); - *buf++ = x; + int i; + int errors = 0; + int bytes = 0; + struct timespec start, stop; + u64 nsec; + unsigned long flags; + + /* keep static to prevent cache warming code from + * getting eliminated by the compiler */ + static u32 crc; + + /* pre-warm the cache */ + for (i = 0; i < 100; i++) { + bytes += 2*test[i].length; + + crc ^= __crc32c_le(test[i].crc, test_buf + + test[i].start, test[i].length); } -} -static void random_garbage(unsigned char *buf, size_t len) -{ - while (len--) - *buf++ = (unsigned char) random(); -} + /* reduce OS noise */ + local_irq_save(flags); + local_irq_disable(); -#if 0 /* Not used at present */ -static void store_le(u32 x, unsigned char *buf) -{ - buf[0] = (unsigned char) x; - buf[1] = (unsigned char) (x >> 8); - buf[2] = (unsigned char) (x >> 16); - buf[3] = (unsigned char) (x >> 24); -} -#endif + getnstimeofday(&start); + for (i = 0; i < 100; i++) { + if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + + test[i].start, test[i].length)) + errors++; + } + getnstimeofday(&stop); -static void store_be(u32 x, unsigned char *buf) -{ - buf[0] = (unsigned char) (x >> 24); - buf[1] = (unsigned char) (x >> 16); - buf[2] = (unsigned char) (x >> 8); - buf[3] = (unsigned char) x; + local_irq_restore(flags); + local_irq_enable(); + + nsec = stop.tv_nsec - start.tv_nsec + + 1000000000 * (stop.tv_sec - start.tv_sec); + + pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); + + if (errors) + pr_warn("crc32c: %d self tests failed\n", errors); + else { + pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n", + bytes, nsec); + } + + return 0; } -/* - * This checks that CRC(buf + CRC(buf)) = 0, and that - * CRC commutes with bit-reversal. This has the side effect - * of bytewise bit-reversing the input buffer, and returns - * the CRC of the reversed buffer. - */ -static u32 test_step(u32 init, unsigned char *buf, size_t len) +static int __init crc32_test(void) { - u32 crc1, crc2; - size_t i; + int i; + int errors = 0; + int bytes = 0; + struct timespec start, stop; + u64 nsec; + unsigned long flags; + + /* keep static to prevent cache warming code from + * getting eliminated by the compiler */ + static u32 crc; + + /* pre-warm the cache */ + for (i = 0; i < 100; i++) { + bytes += 2*test[i].length; - crc1 = crc32_be(init, buf, len); - store_be(crc1, buf + len); - crc2 = crc32_be(init, buf, len + 4); - if (crc2) - printf("\nCRC cancellation fail: 0x%08x should be 0\n", - crc2); - - for (i = 0; i <= len + 4; i++) { - crc2 = crc32_be(init, buf, i); - crc2 = crc32_be(crc2, buf + i, len + 4 - i); - if (crc2) - printf("\nCRC split fail: 0x%08x\n", crc2); + crc ^= crc32_le(test[i].crc, test_buf + + test[i].start, test[i].length); + + crc ^= crc32_be(test[i].crc, test_buf + + test[i].start, test[i].length); } - /* Now swap it around for the other test */ - - bytereverse(buf, len + 4); - init = bitrev32(init); - crc2 = bitrev32(crc1); - if (crc1 != bitrev32(crc2)) - printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n", - crc1, crc2, bitrev32(crc2)); - crc1 = crc32_le(init, buf, len); - if (crc1 != crc2) - printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1, - crc2); - crc2 = crc32_le(init, buf, len + 4); - if (crc2) - printf("\nCRC cancellation fail: 0x%08x should be 0\n", - crc2); - - for (i = 0; i <= len + 4; i++) { - crc2 = crc32_le(init, buf, i); - crc2 = crc32_le(crc2, buf + i, len + 4 - i); - if (crc2) - printf("\nCRC split fail: 0x%08x\n", crc2); + /* reduce OS noise */ + local_irq_save(flags); + local_irq_disable(); + + getnstimeofday(&start); + for (i = 0; i < 100; i++) { + if (test[i].crc_le != crc32_le(test[i].crc, test_buf + + test[i].start, test[i].length)) + errors++; + + if (test[i].crc_be != crc32_be(test[i].crc, test_buf + + test[i].start, test[i].length)) + errors++; } + getnstimeofday(&stop); - return crc1; -} + local_irq_restore(flags); + local_irq_enable(); -#define SIZE 64 -#define INIT1 0 -#define INIT2 0 + nsec = stop.tv_nsec - start.tv_nsec + + 1000000000 * (stop.tv_sec - start.tv_sec); -int main(void) -{ - unsigned char buf1[SIZE + 4]; - unsigned char buf2[SIZE + 4]; - unsigned char buf3[SIZE + 4]; - int i, j; - u32 crc1, crc2, crc3; - - for (i = 0; i <= SIZE; i++) { - printf("\rTesting length %d...", i); - fflush(stdout); - random_garbage(buf1, i); - random_garbage(buf2, i); - for (j = 0; j < i; j++) - buf3[j] = buf1[j] ^ buf2[j]; - - crc1 = test_step(INIT1, buf1, i); - crc2 = test_step(INIT2, buf2, i); - /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */ - crc3 = test_step(INIT1 ^ INIT2, buf3, i); - if (crc3 != (crc1 ^ crc2)) - printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n", - crc3, crc1, crc2); + pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", + CRC_LE_BITS, CRC_BE_BITS); + + if (errors) + pr_warn("crc32: %d self tests failed\n", errors); + else { + pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n", + bytes, nsec); } - printf("\nAll test complete. No failures expected.\n"); + + return 0; +} + +static int __init crc32test_init(void) +{ + crc32_test(); + crc32c_test(); return 0; } -#endif /* UNITTEST */ +static void __exit crc32_exit(void) +{ +} + +module_init(crc32test_init); +module_exit(crc32_exit); +#endif /* CONFIG_CRC32_SELFTEST */ diff --git a/lib/crc32defs.h b/lib/crc32defs.h index 9b6773d7..64cba2c3 100644 --- a/lib/crc32defs.h +++ b/lib/crc32defs.h @@ -6,27 +6,67 @@ #define CRCPOLY_LE 0xedb88320 #define CRCPOLY_BE 0x04c11db7 -/* How many bits at a time to use. Requires a table of 4< 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1 -# error CRC_LE_BITS must be a power of 2 between 1 and 8 +#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \ + CRC_LE_BITS & CRC_LE_BITS-1 +# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}" #endif /* * Big-endian CRC computation. Used with serial bit streams sent * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. */ -#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1 -# error CRC_BE_BITS must be a power of 2 between 1 and 8 +#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \ + CRC_BE_BITS & CRC_BE_BITS-1 +# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}" #endif diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index 85d0e412..8f8d5439 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c @@ -1,14 +1,29 @@ #include +#include "../include/generated/autoconf.h" #include "crc32defs.h" #include #define ENTRIES_PER_LINE 4 -#define LE_TABLE_SIZE (1 << CRC_LE_BITS) -#define BE_TABLE_SIZE (1 << CRC_BE_BITS) +#if CRC_LE_BITS > 8 +# define LE_TABLE_ROWS (CRC_LE_BITS/8) +# define LE_TABLE_SIZE 256 +#else +# define LE_TABLE_ROWS 1 +# define LE_TABLE_SIZE (1 << CRC_LE_BITS) +#endif -static uint32_t crc32table_le[4][LE_TABLE_SIZE]; -static uint32_t crc32table_be[4][BE_TABLE_SIZE]; +#if CRC_BE_BITS > 8 +# define BE_TABLE_ROWS (CRC_BE_BITS/8) +# define BE_TABLE_SIZE 256 +#else +# define BE_TABLE_ROWS 1 +# define BE_TABLE_SIZE (1 << CRC_BE_BITS) +#endif + +static uint32_t crc32table_le[LE_TABLE_ROWS][256]; +static uint32_t crc32table_be[BE_TABLE_ROWS][256]; +static uint32_t crc32ctable_le[LE_TABLE_ROWS][256]; /** * crc32init_le() - allocate and initialize LE table data @@ -17,27 +32,38 @@ static uint32_t crc32table_be[4][BE_TABLE_SIZE]; * fact that crctable[i^j] = crctable[i] ^ crctable[j]. * */ -static void crc32init_le(void) +static void crc32init_le_generic(const uint32_t polynomial, + uint32_t (*tab)[256]) { unsigned i, j; uint32_t crc = 1; - crc32table_le[0][0] = 0; + tab[0][0] = 0; - for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { - crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) { + crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) - crc32table_le[0][i + j] = crc ^ crc32table_le[0][j]; + tab[0][i + j] = crc ^ tab[0][j]; } for (i = 0; i < LE_TABLE_SIZE; i++) { - crc = crc32table_le[0][i]; - for (j = 1; j < 4; j++) { - crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8); - crc32table_le[j][i] = crc; + crc = tab[0][i]; + for (j = 1; j < LE_TABLE_ROWS; j++) { + crc = tab[0][crc & 0xff] ^ (crc >> 8); + tab[j][i] = crc; } } } +static void crc32init_le(void) +{ + crc32init_le_generic(CRCPOLY_LE, crc32table_le); +} + +static void crc32cinit_le(void) +{ + crc32init_le_generic(CRC32C_POLY_LE, crc32ctable_le); +} + /** * crc32init_be() - allocate and initialize BE table data */ @@ -55,18 +81,18 @@ static void crc32init_be(void) } for (i = 0; i < BE_TABLE_SIZE; i++) { crc = crc32table_be[0][i]; - for (j = 1; j < 4; j++) { + for (j = 1; j < BE_TABLE_ROWS; j++) { crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); crc32table_be[j][i] = crc; } } } -static void output_table(uint32_t table[4][256], int len, char *trans) +static void output_table(uint32_t (*table)[256], int rows, int len, char *trans) { int i, j; - for (j = 0 ; j < 4; j++) { + for (j = 0 ; j < rows; j++) { printf("{"); for (i = 0; i < len - 1; i++) { if (i % ENTRIES_PER_LINE == 0) @@ -83,15 +109,30 @@ int main(int argc, char** argv) if (CRC_LE_BITS > 1) { crc32init_le(); - printf("static const u32 crc32table_le[4][256] = {"); - output_table(crc32table_le, LE_TABLE_SIZE, "tole"); + printf("static const u32 __cacheline_aligned " + "crc32table_le[%d][%d] = {", + LE_TABLE_ROWS, LE_TABLE_SIZE); + output_table(crc32table_le, LE_TABLE_ROWS, + LE_TABLE_SIZE, "tole"); printf("};\n"); } if (CRC_BE_BITS > 1) { crc32init_be(); - printf("static const u32 crc32table_be[4][256] = {"); - output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); + printf("static const u32 __cacheline_aligned " + "crc32table_be[%d][%d] = {", + BE_TABLE_ROWS, BE_TABLE_SIZE); + output_table(crc32table_be, LE_TABLE_ROWS, + BE_TABLE_SIZE, "tobe"); + printf("};\n"); + } + if (CRC_LE_BITS > 1) { + crc32cinit_le(); + printf("static const u32 __cacheline_aligned " + "crc32ctable_le[%d][%d] = {", + LE_TABLE_ROWS, LE_TABLE_SIZE); + output_table(crc32ctable_le, LE_TABLE_ROWS, + LE_TABLE_SIZE, "tole"); printf("};\n"); } From 309663f747e038bd36bf8e1dfd550566304f61b2 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Tue, 11 Sep 2012 18:34:41 +0100 Subject: [PATCH 044/117] lowmemorykiller: Compact memory when killing processes * Memory compaction is never invoked on Android because we avoid swap and don't allocate huge pages. Fix it by invoking compaction from the LMK when a process is killed to reduce memory fragmentation. Change-Id: I2bda790c2093e65fd6f43d52ea5149d6b57cb1e9 Conflicts: drivers/staging/android/lowmemorykiller.c --- drivers/staging/android/lowmemorykiller.c | 4 ++++ include/linux/compaction.h | 5 +++++ mm/compaction.c | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 1d771dbe..c07f23b0 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -42,6 +42,7 @@ #include #include #include +#include #define DEBUG_LEVEL_DEATHPENDING 6 @@ -74,6 +75,7 @@ static size_t lowmem_minfile[6] = { static int lowmem_minfile_size = 6; static unsigned long lowmem_deathpending_timeout; +extern int compact_nodes(void); static uint32_t lowmem_check_filepages = 0; #define lowmem_print(level, x...) \ @@ -223,6 +225,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); rcu_read_unlock(); + if (selected) + compact_nodes(); return rem; } diff --git a/include/linux/compaction.h b/include/linux/compaction.h index bb2bbdbe..1669fcc0 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -76,6 +76,11 @@ static inline bool compaction_deferred(struct zone *zone) return 1; } +static inline int compact_nodes() +{ + return COMPACT_CONTINUE; +} + #endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) diff --git a/mm/compaction.c b/mm/compaction.c index 46973fb5..14ac67fd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -714,7 +714,7 @@ static int compact_node(int nid) } /* Compact all nodes in the system */ -static int compact_nodes(void) +int compact_nodes(void) { int nid; From e4ce74e0947a2608b3fb910450a6ba113ee97dce Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Tue, 11 Sep 2012 19:15:39 +0100 Subject: [PATCH 045/117] lowmemorykiller: Use asynchronous compaction Change-Id: I6a65d06fc30b88fcedaaf1abf1855fdd19e3c912 Conflicts: mm/compaction.c --- drivers/staging/android/lowmemorykiller.c | 4 +- include/linux/compaction.h | 2 +- mm/compaction.c | 57 +++++++++++++---------- 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index c07f23b0..f578bd88 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -75,7 +75,7 @@ static size_t lowmem_minfile[6] = { static int lowmem_minfile_size = 6; static unsigned long lowmem_deathpending_timeout; -extern int compact_nodes(void); +extern int compact_nodes(int); static uint32_t lowmem_check_filepages = 0; #define lowmem_print(level, x...) \ @@ -226,7 +226,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) sc->nr_to_scan, sc->gfp_mask, rem); rcu_read_unlock(); if (selected) - compact_nodes(); + compact_nodes(false); return rem; } diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 1669fcc0..14d0746e 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -76,7 +76,7 @@ static inline bool compaction_deferred(struct zone *zone) return 1; } -static inline int compact_nodes() +static inline int compact_nodes(bool sync) { return COMPACT_CONTINUE; } diff --git a/mm/compaction.c b/mm/compaction.c index 14ac67fd..9d337354 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -674,52 +674,61 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, return rc; } - /* Compact all zones within a node */ -static int compact_node(int nid) +static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) { int zoneid; - pg_data_t *pgdat; struct zone *zone; - if (nid < 0 || nid >= nr_node_ids || !node_online(nid)) - return -EINVAL; - pgdat = NODE_DATA(nid); - - /* Flush pending updates to the LRU lists */ - lru_add_drain_all(); - for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { - struct compact_control cc = { - .nr_freepages = 0, - .nr_migratepages = 0, - .order = -1, - }; zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; - cc.zone = zone; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); + cc->nr_freepages = 0; + cc->nr_migratepages = 0; + cc->zone = zone; + INIT_LIST_HEAD(&cc->freepages); + INIT_LIST_HEAD(&cc->migratepages); - compact_zone(zone, &cc); + if (cc->order == -1 || !compaction_deferred(zone)) + compact_zone(zone, cc); - VM_BUG_ON(!list_empty(&cc.freepages)); - VM_BUG_ON(!list_empty(&cc.migratepages)); + VM_BUG_ON(!list_empty(&cc->freepages)); + VM_BUG_ON(!list_empty(&cc->migratepages)); } return 0; } +int compact_pgdat(pg_data_t *pgdat, int order) +{ + struct compact_control cc = { + .order = order, + .sync = false, + }; + + return __compact_pgdat(pgdat, &cc); +} + +static int compact_node(int nid, bool sync) +{ + struct compact_control cc = { + .order = -1, + .sync = true, + }; + + return __compact_pgdat(NODE_DATA(nid), &cc); +} + /* Compact all nodes in the system */ -int compact_nodes(void) +int compact_nodes(bool sync) { int nid; for_each_online_node(nid) - compact_node(nid); + compact_node(nid, sync); return COMPACT_COMPLETE; } @@ -732,7 +741,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { if (write) - return compact_nodes(); + return compact_nodes(true); return 0; } From ead27f5cbfe5510472e6aeb678c9da878758c6c8 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 28 Jan 2013 10:53:24 -0500 Subject: [PATCH 046/117] Tweak RNG thresholds --- arch/arm/configs/vigor_aosp_defconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index fc488b2f..b887ce6e 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -2875,8 +2875,8 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_QCEDEV is not set # CONFIG_CRYPTO_DEV_OTA_CRYPTO is not set # CONFIG_BINARY_PRINTF is not set -CONFIG_RANDOM_READ_WAKEUP=128 -CONFIG_RANDOM_WRITE_WAKEUP=256 +CONFIG_RANDOM_READ_WAKEUP=256 +CONFIG_RANDOM_WRITE_WAKEUP=512 # # Library routines From c224d13b2d3f680c06a554900154b6923ae322fc Mon Sep 17 00:00:00 2001 From: akshaykarle Date: Wed, 22 Feb 2012 23:33:08 +0530 Subject: [PATCH 047/117] Added frontswap_by_Dan --- .../ABI/testing/sysfs-kernel-mm-cleancache | 11 - Documentation/vm/cleancache.txt | 50 ++-- Documentation/vm/frontswap.txt | 210 +++++++++++++ arch/arm/configs/vigor_aosp_defconfig | 4 +- drivers/staging/zcache/zcache.c | 10 +- drivers/xen/tmem.c | 6 +- fs/buffer.c | 3 +- fs/super.c | 2 +- include/linux/cleancache.h | 38 ++- include/linux/frontswap.h | 127 ++++++++ include/linux/swap.h | 11 +- include/linux/swapfile.h | 14 + mm/Kconfig | 17 ++ mm/Makefile | 1 + mm/cleancache.c | 110 ++++--- mm/filemap.c | 3 +- mm/frontswap.c | 280 ++++++++++++++++++ mm/page_io.c | 12 + mm/swapfile.c | 83 +++++- mm/truncate.c | 15 +- 20 files changed, 867 insertions(+), 140 deletions(-) create mode 100644 Documentation/vm/frontswap.txt create mode 100644 include/linux/frontswap.h create mode 100644 include/linux/swapfile.h create mode 100644 mm/frontswap.c diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cleancache b/Documentation/ABI/testing/sysfs-kernel-mm-cleancache index 662ae646..e69de29b 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-cleancache +++ b/Documentation/ABI/testing/sysfs-kernel-mm-cleancache @@ -1,11 +0,0 @@ -What: /sys/kernel/mm/cleancache/ -Date: April 2011 -Contact: Dan Magenheimer -Description: - /sys/kernel/mm/cleancache/ contains a number of files which - record a count of various cleancache operations - (sum across all filesystems): - succ_gets - failed_gets - puts - flushes diff --git a/Documentation/vm/cleancache.txt b/Documentation/vm/cleancache.txt index 36c367c7..a9566de5 100644 --- a/Documentation/vm/cleancache.txt +++ b/Documentation/vm/cleancache.txt @@ -44,12 +44,15 @@ pool id which, if positive, must be saved in the filesystem's superblock; a negative return value indicates failure. A "put_page" will copy a (presumably about-to-be-evicted) page into cleancache and associate it with the pool id, a file key, and a page index into the file. (The combination + of a pool id, a file key, and an index is sometimes called a "handle".) A "get_page" will copy the page, if found, from cleancache into kernel memory. -A "flush_page" will ensure the page no longer is present in cleancache; -a "flush_inode" will flush all pages associated with the specified file; -and, when a filesystem is unmounted, a "flush_fs" will flush all pages in -all files specified by the given pool id and also surrender the pool id. + +An "invalidate_page" will ensure the page no longer is present in cleancache; +an "invalidate_inode" will invalidate all pages associated with the specified +file; and, when a filesystem is unmounted, an "invalidate_fs" will invalidate +all pages in all files specified by the given pool id and also surrender +the pool id. An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache to treat the pool as shared using a 128-bit UUID as a key. On systems @@ -61,14 +64,12 @@ be shared. Note that any security requirements must be imposed outside of the kernel (e.g. by "tools" that control cleancache). Or a cleancache implementation can simply disable shared_init by always returning a negative value. - -If a get_page is successful on a non-shared pool, the page is flushed (thus -making cleancache an "exclusive" cache). On a shared pool, the page -is NOT flushed on a successful get_page so that it remains accessible to +If a get_page is successful on a non-shared pool, the page is invalidated +(thus making cleancache an "exclusive" cache). On a shared pool, the page +is NOT invalidated on a successful get_page so that it remains accessible to other sharers. The kernel is responsible for ensuring coherency between cleancache (shared or not), the page cache, and the filesystem, using -cleancache flush operations as required. - +cleancache invalidate operations as required. Note that cleancache must enforce put-put-get coherency and get-get coherency. For the former, if two puts are made to the same handle but with different data, say AAA by the first put and BBB by the second, a @@ -77,20 +78,21 @@ if a get for a given handle fails, subsequent gets for that handle will never succeed unless preceded by a successful put with that handle. Last, cleancache provides no SMP serialization guarantees; if two -different Linux threads are simultaneously putting and flushing a page +different Linux threads are simultaneously putting and invalidating a page with the same handle, the results are indeterminate. Callers must lock the page to ensure serial behavior. CLEANCACHE PERFORMANCE METRICS -Cleancache monitoring is done by sysfs files in the -/sys/kernel/mm/cleancache directory. The effectiveness of cleancache + +If properly configured, monitoring of cleancache is done via debugfs in +the /sys/kernel/debug/cleancache directory. The effectiveness of cleancache can be measured (across all filesystems) with: succ_gets - number of gets that were successful failed_gets - number of gets that failed puts - number of puts attempted (all "succeed") -flushes - number of flushes attempted +invalidates - number of invalidates attempted A backend implementatation may provide additional metrics. @@ -143,7 +145,7 @@ systems. The core hooks for cleancache in VFS are in most cases a single line and the minimum set are placed precisely where needed to maintain -coherency (via cleancache_flush operations) between cleancache, +coherency (via cleancache_invalidate operations) between cleancache, the page cache, and disk. All hooks compile into nothingness if cleancache is config'ed off and turn into a function-pointer- compare-to-NULL if config'ed on but no backend claims the ops @@ -184,16 +186,13 @@ or for real kernel-addressable RAM, it makes perfect sense for transcendent memory. 4) Why is non-shared cleancache "exclusive"? And where is the - page "flushed" after a "get"? (Minchan Kim) - + page "invalidated" after a "get"? (Minchan Kim) The main reason is to free up space in transcendent memory and -to avoid unnecessary cleancache_flush calls. If you want inclusive, +to avoid unnecessary cleancache_invalidate calls. If you want inclusive, the page can be "put" immediately following the "get". If put-after-get for inclusive becomes common, the interface could -be easily extended to add a "get_no_flush" call. - -The flush is done by the cleancache backend implementation. - +be easily extended to add a "get_no_invalidate" call. +The invalidate is done by the cleancache backend implementation. 5) What's the performance impact? Performance analysis has been presented at OLS'09 and LCA'10. @@ -222,7 +221,8 @@ Some points for a filesystem to consider: as tmpfs should not enable cleancache) - To ensure coherency/correctness, the FS must ensure that all file removal or truncation operations either go through VFS or - add hooks to do the equivalent cleancache "flush" operations + add hooks to do the equivalent cleancache "invalidate" operations + - To ensure coherency/correctness, either inode numbers must be unique across the lifetime of the on-disk file OR the FS must provide an "encode_fh" function. @@ -243,11 +243,11 @@ If cleancache would use the inode virtual address instead of inode/filehandle, the pool id could be eliminated. But, this won't work because cleancache retains pagecache data pages persistently even when the inode has been pruned from the -inode unused list, and only flushes the data page if the file +inode unused list, and only invalidates the data page if the file gets removed/truncated. So if cleancache used the inode kva, there would be potential coherency issues if/when the inode kva is reused for a different file. Alternately, if cleancache -flushed the pages when the inode kva was freed, much of the value +invalidated the pages when the inode kva was freed, much of the value of cleancache would be lost because the cache of pages in cleanache is potentially much larger than the kernel pagecache and is most useful if the pages survive inode cache removal. diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt new file mode 100644 index 00000000..0a039380 --- /dev/null +++ b/Documentation/vm/frontswap.txt @@ -0,0 +1,210 @@ +Frontswap provides a "transcendent memory" interface for swap pages. +In some environments, dramatic performance savings may be obtained because +swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk. + +Frontswap is so named because it can be thought of as the opposite of +a "backing" store for a swap device. The storage is assumed to be +a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming +to the requirements of transcendent memory (such as Xen's "tmem", or +in-kernel compressed memory, aka "zcache", or future RAM-like devices); +this pseudo-RAM device is not directly accessible or addressable by the +kernel and is of unknown and possibly time-varying size. The driver +links itself to frontswap by calling frontswap_register_ops to set the +frontswap_ops funcs appropriately and the functions it provides must +conform to certain policies as follows: + +An "init" prepares the device to receive frontswap pages associated +with the specified swap device number (aka "type"). A "put_page" will +copy the page to transcendent memory and associate it with the type and +offset associated with the page. A "get_page" will copy the page, if found, +from transcendent memory into kernel memory, but will NOT remove the page +from from transcendent memory. An "invalidate_page" will remove the page +from transcendent memory and an "invalidate_area" will remove ALL pages +associated with the swap type (e.g., like swapoff) and notify the "device" +to refuse further puts with that swap type. + +Once a page is successfully put, a matching get on the page will normally +succeed. So when the kernel finds itself in a situation where it needs +to swap out a page, it first attempts to use frontswap. If the put returns +success, the data has been successfully saved to transcendent memory and +a disk write and, if the data is later read back, a disk read are avoided. +If a put returns failure, transcendent memory has rejected the data, and the +page can be written to swap as usual. + +Note that if a page is put and the page already exists in transcendent memory +(a "duplicate" put), either the put succeeds and the data is overwritten, +or the put fails AND the page is invalidated. This ensures stale data may +never be obtained from frontswap. + +If properly configured, monitoring of frontswap is done via debugfs in +the /sys/kernel/debug/frontswap directory. The effectiveness of +frontswap can be measured (across all swap devices) with: + +failed_puts - how many put attempts have failed +gets - how many gets were attempted (all should succeed) +succ_puts - how many put attempts have succeeded +invalidates - how many invalidates were attempted + +A backend implementation may provide additional metrics. + +FAQ + +1) Where's the value? + +When a workload starts swapping, performance falls through the floor. +Frontswap significantly increases performance in many such workloads by +providing a clean, dynamic interface to read and write swap pages to +"transcendent memory" that is otherwise not directly addressable to the kernel. +This interface is ideal when data is transformed to a different form +and size (such as with compression) or secretly moved (as might be +useful for write-balancing for some RAM-like devices). Swap pages (and +evicted page-cache pages) are a great use for this kind of slower-than-RAM- +but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and +cleancache) interface to transcendent memory provides a nice way to read +and write -- and indirectly "name" -- the pages. + +In the virtual case, the whole point of virtualization is to statistically +multiplex physical resources acrosst the varying demands of multiple +virtual machines. This is really hard to do with RAM and efforts to do +it well with no kernel changes have essentially failed (except in some +well-publicized special-case workloads). Frontswap -- and cleancache -- +with a fairly small impact on the kernel, provides a huge amount +of flexibility for more dynamic, flexible RAM multiplexing. +Specifically, the Xen Transcendent Memory backend allows otherwise +"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple +virtual machines, but the pages can be compressed and deduplicated to +optimize RAM utilization. And when guest OS's are induced to surrender +underutilized RAM (e.g. with "self-ballooning"), sudden unexpected +memory pressure may result in swapping; frontswap allows those pages +to be swapped to and from hypervisor RAM if overall host system memory +conditions allow. + +2) Sure there may be performance advantages in some situations, but + what's the space/time overhead of frontswap? + +If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into +nothingness and the only overhead is a few extra bytes per swapon'ed +swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend" +registers, there is one extra global variable compared to zero for +every swap page read or written. If CONFIG_FRONTSWAP is enabled +AND a frontswap backend registers AND the backend fails every "put" +request (i.e. provides no memory despite claiming it might), +CPU overhead is still negligible -- and since every frontswap fail +precedes a swap page write-to-disk, the system is highly likely +to be I/O bound and using a small fraction of a percent of a CPU +will be irrelevant anyway. + +As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend +registers, one bit is allocated for every swap page for every swap +device that is swapon'd. This is added to the EIGHT bits (which +was sixteen until about 2.6.34) that the kernel already allocates +for every swap page for every swap device that is swapon'd. (Hugh +Dickins has observed that frontswap could probably steal one of +the existing eight bits, but let's worry about that minor optimization +later.) For very large swap disks (which are rare) on a standard +4K pagesize, this is 1MB per 32GB swap. + +3) OK, how about a quick overview of what this frontswap patch does + in terms that a kernel hacker can grok? + +Let's assume that a frontswap "backend" has registered during +kernel initialization; this registration indicates that this +frontswap backend has access to some "memory" that is not directly +accessible by the kernel. Exactly how much memory it provides is +entirely dynamic and random. + +Whenever a swap-device is swapon'd frontswap_init() is called, +passing the swap device number (aka "type") as a parameter. +This notifies frontswap to expect attempts to "put" swap pages +associated with that number. + +Whenever the swap subsystem is readying a page to write to a swap +device (c.f swap_writepage()), frontswap_put_page is called. Frontswap +consults with the frontswap backend and if the backend says it does NOT +have room, frontswap_put_page returns -1 and the kernel swaps the page +to the swap device as normal. Note that the response from the frontswap +backend is unpredictable to the kernel; it may choose to never accept a +page, it could accept every ninth page, or it might accept every +page. But if the backend does accept a page, the data from the page +has already been copied and associated with the type and offset, +and the backend guarantees the persistence of the data. In this case, +frontswap sets a bit in the "frontswap_map" for the swap device +corresponding to the page offset on the swap device to which it would +otherwise have written the data. + +When the swap subsystem needs to swap-in a page (swap_readpage()), +it first calls frontswap_get_page() which checks the frontswap_map to +see if the page was earlier accepted by the frontswap backend. If +it was, the page of data is filled from the frontswap backend and +the swap-in is complete. If not, the normal swap-in code is +executed to obtain the page of data from the real swap device. + +So every time the frontswap backend accepts a page, a swap device read +and (potentially) a swap device write are replaced by a "frontswap backend +put" and (possibly) a "frontswap backend get", which are presumably much +faster. + +4) Can't frontswap be configured as a "special" swap device that is + just higher priority than any real swap device (e.g. like zswap)? + +No. Recall that acceptance of any swap page by the frontswap +backend is entirely unpredictable. This is critical to the definition +of frontswap because it grants completely dynamic discretion to the +backend. But since any "put" might fail, there must always be a real +slot on a real swap device to swap the page. Thus frontswap must be +implemented as a "shadow" to every swapon'd device with the potential +capability of holding every page that the swap device might have held +and the possibility that it might hold no pages at all. +On the downside, this also means that frontswap cannot contain more +pages than the total of swapon'd swap devices. For example, if NO +swap device is configured on some installation, frontswap is useless. + +Further, frontswap is entirely synchronous whereas a real swap +device is, by definition, asynchronous and uses block I/O. The +block I/O layer is not only unnecessary, but may perform "optimizations" +that are inappropriate for a RAM-oriented device including delaying +the write of some pages for a significant amount of time. Synchrony is +required to ensure the dynamicity of the backend and to avoid thorny race +conditions that would unnecessarily and greatly complicate frontswap +and/or the block I/O subsystem. + +In a virtualized environment, the dynamicity allows the hypervisor +(or host OS) to do "intelligent overcommit". For example, it can +choose to accept pages only until host-swapping might be imminent, +then force guests to do their own swapping. In zcache, "poorly" +compressible pages can be rejected, where "poorly" can itself be defined +dynamically depending on current memory constraints. + +5) Why this weird definition about "duplicate puts"? If a page + has been previously successfully put, can't it always be + successfully overwritten? + +Nearly always it can, but no, sometimes it cannot. Consider an example +where data is compressed and the original 4K page has been compressed +to 1K. Now an attempt is made to overwrite the page with data that +is non-compressible and so would take the entire 4K. But the backend +has no more space. In this case, the put must be rejected. Whenever +frontswap rejects a put that would overwrite, it also must invalidate +the old data and ensure that it is no longer accessible. Since the +swap subsystem then writes the new data to the read swap device, +this is the correct course of action to ensure coherency. + +6) What is frontswap_shrink for? + ++When the (non-frontswap) swap subsystem swaps out a page to a real +swap device, that page is only taking up low-value pre-allocated disk +space. But if frontswap has placed a page in transcendent memory, that +page may be taking up valuable real estate. The frontswap_shrink +routine allows code outside of the swap subsystem (such as Xen tmem +or zcache or some future tmem backend) to force pages out of the memory +managed by frontswap and back into kernel-addressable memory. + +7) Why does the frontswap patch create the new include file swapfile.h? + +The frontswap code depends on some swap-subsystem-internal data +structures that have, over the years, moved back and forth between +static and global. This seemed a reasonable compromise: Define +them as global but declare them in a new include file that isn't +included by the large number of source files that include swap.h. + +Dan Magenheimer, last updated September 12, 2011 diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index b887ce6e..e21e48a2 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -542,6 +542,7 @@ CONFIG_CPU_HAS_PMU=y # CONFIG_ARM_ERRATA_754322 is not set # CONFIG_ARM_ERRATA_754327 is not set # CONFIG_KSAPI is not set +# CONFIG_ARM_ERRATA_764369 is not set CONFIG_ARM_GIC=y # CONFIG_FIQ_DEBUGGER is not set @@ -599,7 +600,8 @@ CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y # CONFIG_KSM is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -# CONFIG_CLEANCACHE is not set +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y # CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG is not set # CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE is not set CONFIG_FORCE_MAX_ZONEORDER=11 diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache.c index 77ac2d4d..7c155b7a 100644 --- a/drivers/staging/zcache/zcache.c +++ b/drivers/staging/zcache/zcache.c @@ -1435,9 +1435,9 @@ static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize) static struct cleancache_ops zcache_cleancache_ops = { .put_page = zcache_cleancache_put_page, .get_page = zcache_cleancache_get_page, - .flush_page = zcache_cleancache_flush_page, - .flush_inode = zcache_cleancache_flush_inode, - .flush_fs = zcache_cleancache_flush_fs, + .invalidate_page = zcache_cleancache_flush_page, + .invalidate_inode = zcache_cleancache_flush_inode, + .invalidate_fs = zcache_cleancache_flush_fs, .init_shared_fs = zcache_cleancache_init_shared_fs, .init_fs = zcache_cleancache_init_fs }; @@ -1541,8 +1541,8 @@ static void zcache_frontswap_init(unsigned ignored) static struct frontswap_ops zcache_frontswap_ops = { .put_page = zcache_frontswap_put_page, .get_page = zcache_frontswap_get_page, - .flush_page = zcache_frontswap_flush_page, - .flush_area = zcache_frontswap_flush_area, + .invalidate_page = zcache_frontswap_flush_page, + .invalidate_area = zcache_frontswap_flush_area, .init = zcache_frontswap_init }; diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 816a4495..4cac54a5 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -234,9 +234,9 @@ __setup("nocleancache", no_cleancache); static struct cleancache_ops tmem_cleancache_ops = { .put_page = tmem_cleancache_put_page, .get_page = tmem_cleancache_get_page, - .flush_page = tmem_cleancache_flush_page, - .flush_inode = tmem_cleancache_flush_inode, - .flush_fs = tmem_cleancache_flush_fs, + .invalidate_page = tmem_cleancache_flush_page, + .invalidate_inode = tmem_cleancache_flush_inode, + .invalidate_fs = tmem_cleancache_flush_fs, .init_shared_fs = tmem_cleancache_init_shared_fs, .init_fs = tmem_cleancache_init_fs }; diff --git a/fs/buffer.c b/fs/buffer.c index 166028be..cfed2734 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -273,7 +273,8 @@ void invalidate_bdev(struct block_device *bdev) /* 99% of the time, we don't need to flush the cleancache on the bdev. * But, for the strange corners, lets be cautious */ - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); + } EXPORT_SYMBOL(invalidate_bdev); diff --git a/fs/super.c b/fs/super.c index caf4dfa2..a687cf2a 100644 --- a/fs/super.c +++ b/fs/super.c @@ -179,7 +179,7 @@ void deactivate_locked_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) { - cleancache_flush_fs(s); + cleancache_invalidate_fs(s); fs->kill_sb(s); /* * We need to call rcu_barrier so all the delayed rcu free diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h index 04ffb2e6..f85d98cd 100644 --- a/include/linux/cleancache.h +++ b/include/linux/cleancache.h @@ -28,9 +28,12 @@ struct cleancache_ops { pgoff_t, struct page *); void (*put_page)(int, struct cleancache_filekey, pgoff_t, struct page *); - void (*flush_page)(int, struct cleancache_filekey, pgoff_t); - void (*flush_inode)(int, struct cleancache_filekey); - void (*flush_fs)(int); + + void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t); + void (*invalidate_inode)(int, struct cleancache_filekey); + void (*invalidate_fs)(int); + + }; extern struct cleancache_ops @@ -39,9 +42,12 @@ extern void __cleancache_init_fs(struct super_block *); extern void __cleancache_init_shared_fs(char *, struct super_block *); extern int __cleancache_get_page(struct page *); extern void __cleancache_put_page(struct page *); -extern void __cleancache_flush_page(struct address_space *, struct page *); -extern void __cleancache_flush_inode(struct address_space *); -extern void __cleancache_flush_fs(struct super_block *); + +extern void __cleancache_invalidate_page(struct address_space *, struct page *); +extern void __cleancache_invalidate_inode(struct address_space *); +extern void __cleancache_invalidate_fs(struct super_block *); + + extern int cleancache_enabled; #ifdef CONFIG_CLEANCACHE @@ -99,24 +105,32 @@ static inline void cleancache_put_page(struct page *page) __cleancache_put_page(page); } -static inline void cleancache_flush_page(struct address_space *mapping, + +static inline void cleancache_invalidate_page(struct address_space *mapping, struct page *page) { /* careful... page->mapping is NULL sometimes when this is called */ if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) - __cleancache_flush_page(mapping, page); + + __cleancache_invalidate_page(mapping, page); + + } -static inline void cleancache_flush_inode(struct address_space *mapping) + +static inline void cleancache_invalidate_inode(struct address_space *mapping) { if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) - __cleancache_flush_inode(mapping); + __cleancache_invalidate_inode(mapping); } -static inline void cleancache_flush_fs(struct super_block *sb) + + +static inline void cleancache_invalidate_fs(struct super_block *sb) { if (cleancache_enabled) - __cleancache_flush_fs(sb); + __cleancache_invalidate_fs(sb); + } #endif /* _LINUX_CLEANCACHE_H */ diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h new file mode 100644 index 00000000..5e584d76 --- /dev/null +++ b/include/linux/frontswap.h @@ -0,0 +1,127 @@ +#ifndef _LINUX_FRONTSWAP_H +#define _LINUX_FRONTSWAP_H + +#include +#include +#include + +struct frontswap_ops { + void (*init)(unsigned); + int (*put_page)(unsigned, pgoff_t, struct page *); + int (*get_page)(unsigned, pgoff_t, struct page *); + void (*invalidate_page)(unsigned, pgoff_t); + void (*invalidate_area)(unsigned); + +}; + +extern int frontswap_enabled; +extern struct frontswap_ops + frontswap_register_ops(struct frontswap_ops *ops); +extern void frontswap_shrink(unsigned long); +extern unsigned long frontswap_curr_pages(void); + +extern void __frontswap_init(unsigned type); +extern int __frontswap_put_page(struct page *page); +extern int __frontswap_get_page(struct page *page); +extern void __frontswap_invalidate_page(unsigned, pgoff_t); +extern void __frontswap_invalidate_area(unsigned); + +#ifdef CONFIG_FRONTSWAP + +static inline int frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + int ret = 0; + + if (frontswap_enabled && sis->frontswap_map) + ret = test_bit(offset, sis->frontswap_map); + return ret; +} + +static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) +{ + if (frontswap_enabled && sis->frontswap_map) + set_bit(offset, sis->frontswap_map); +} + +static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) +{ + if (frontswap_enabled && sis->frontswap_map) + clear_bit(offset, sis->frontswap_map); +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ + p->frontswap_map = map; +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return p->frontswap_map; +} +#else +/* all inline routines become no-ops and all externs are ignored */ + +#define frontswap_enabled (0) + +static inline int frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + return 0; +} + +static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) +{ +} + +static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) +{ +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return NULL; +} +#endif + +static inline int frontswap_put_page(struct page *page) +{ + int ret = -1; + + if (frontswap_enabled) + ret = __frontswap_put_page(page); + return ret; +} + +static inline int frontswap_get_page(struct page *page) +{ + int ret = -1; + + if (frontswap_enabled) + ret = __frontswap_get_page(page); + return ret; +} + +static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + if (frontswap_enabled) + __frontswap_invalidate_page(type, offset); +} + +static inline void frontswap_invalidate_area(unsigned type) +{ + if (frontswap_enabled) + __frontswap_invalidate_area(type); +} + +static inline void frontswap_init(unsigned type) +{ + if (frontswap_enabled) + __frontswap_init(type); +} + +#endif /* _LINUX_FRONTSWAP_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index e73799d3..86159a4d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -194,7 +194,16 @@ struct swap_info_struct { struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ -}; + + + +#ifdef CONFIG_FRONTSWAP + unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ + atomic_t frontswap_pages; /* frontswap pages in-use counter */ +#endif + + }; + struct swap_list_t { int head; /* head of priority-ordered swapfile list */ diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h new file mode 100644 index 00000000..83c3778b --- /dev/null +++ b/include/linux/swapfile.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_SWAPFILE_H +#define _LINUX_SWAPFILE_H + +/* + * these were static in swapfile.c but frontswap.c needs them and we don't + * want to expose them to the dozens of source files that include swap.h + */ +extern spinlock_t swap_lock; +extern struct swap_list_t swap_list; +extern struct swap_info_struct *swap_info[]; +extern int try_to_unuse(unsigned int, bool, unsigned long); + +#endif /* _LINUX_SWAPFILE_H */ + diff --git a/mm/Kconfig b/mm/Kconfig index 9f30530d..8006727c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -370,3 +370,20 @@ config CLEANCACHE in a negligible performance hit. If unsure, say Y to enable cleancache + +config FRONTSWAP + bool "Enable frontswap to cache swap pages if tmem is present" + depends on SWAP + default n + help + Frontswap is so named because it can be thought of as the opposite + of a "backing" store for a swap device. The data is stored into + "transcendent memory", memory that is not directly accessible or + addressable by the kernel and is of unknown and possibly + time-varying size. When space in transcendent memory is available, + a significant swap I/O reduction may be achieved. When none is + available, all frontswap calls are reduced to a single pointer- + compare-against-NULL resulting in a negligible performance hit + and swap data is stored as normal on the matching swap device. + + If unsure, say Y to enable frontswap. diff --git a/mm/Makefile b/mm/Makefile index 2d00bf57..4ebbab94 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o +obj-$(CONFIG_FRONTSWAP) += frontswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o diff --git a/mm/cleancache.c b/mm/cleancache.c index bcaae4c2..42915705 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -15,29 +15,36 @@ #include #include #include +#include #include /* * This global enablement flag may be read thousands of times per second - * by cleancache_get/put/flush even on systems where cleancache_ops + * by cleancache_get/put/invalidate even on systems where cleancache_ops * is not claimed (e.g. cleancache is config'ed on but remains * disabled), so is preferred to the slower alternative: a function * call that checks a non-global. */ -int cleancache_enabled; +int cleancache_enabled __read_mostly; EXPORT_SYMBOL(cleancache_enabled); /* * cleancache_ops is set by cleancache_ops_register to contain the pointers * to the cleancache "backend" implementation functions. */ -static struct cleancache_ops cleancache_ops; +static struct cleancache_ops cleancache_ops __read_mostly; + + +/* + * Counters available via /sys/kernel/debug/frontswap (if debugfs is + * properly configured. These are for information only so are not protected + * against increment races. + */ +static u64 cleancache_succ_gets; +static u64 cleancache_failed_gets; +static u64 cleancache_puts; +static u64 cleancache_invalidates; -/* useful stats available in /sys/kernel/mm/cleancache */ -static unsigned long cleancache_succ_gets; -static unsigned long cleancache_failed_gets; -static unsigned long cleancache_puts; -static unsigned long cleancache_flushes; /* * register operations for cleancache, returning previous thus allowing @@ -148,10 +155,12 @@ void __cleancache_put_page(struct page *page) EXPORT_SYMBOL(__cleancache_put_page); /* - * Flush any data from cleancache associated with the poolid and the + * Invalidate any data from cleancache associated with the poolid and the * page's inode and page index so that a subsequent "get" will fail. */ -void __cleancache_flush_page(struct address_space *mapping, struct page *page) + +void __cleancache_invalidate_page(struct address_space *mapping, + struct page *page) { /* careful... page->mapping is NULL sometimes when this is called */ int pool_id = mapping->host->i_sb->cleancache_poolid; @@ -160,85 +169,66 @@ void __cleancache_flush_page(struct address_space *mapping, struct page *page) if (pool_id >= 0) { VM_BUG_ON(!PageLocked(page)); if (cleancache_get_key(mapping->host, &key) >= 0) { - (*cleancache_ops.flush_page)(pool_id, key, page->index); - cleancache_flushes++; + (*cleancache_ops.invalidate_page)(pool_id, + key, page->index); + cleancache_invalidates++; + } } } -EXPORT_SYMBOL(__cleancache_flush_page); + +EXPORT_SYMBOL(__cleancache_invalidate_page); /* - * Flush all data from cleancache associated with the poolid and the + + * Invalidate all data from cleancache associated with the poolid and the * mappings's inode so that all subsequent gets to this poolid/inode * will fail. */ -void __cleancache_flush_inode(struct address_space *mapping) + + + +void __cleancache_invalidate_inode(struct address_space *mapping) { int pool_id = mapping->host->i_sb->cleancache_poolid; struct cleancache_filekey key = { .u.key = { 0 } }; if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) - (*cleancache_ops.flush_inode)(pool_id, key); + (*cleancache_ops.invalidate_inode)(pool_id, key); + } -EXPORT_SYMBOL(__cleancache_flush_inode); +EXPORT_SYMBOL(__cleancache_invalidate_inode); /* * Called by any cleancache-enabled filesystem at time of unmount; * note that pool_id is surrendered and may be reutrned by a subsequent * cleancache_init_fs or cleancache_init_shared_fs */ -void __cleancache_flush_fs(struct super_block *sb) + +void __cleancache_invalidate_fs(struct super_block *sb) { if (sb->cleancache_poolid >= 0) { int old_poolid = sb->cleancache_poolid; sb->cleancache_poolid = -1; - (*cleancache_ops.flush_fs)(old_poolid); - } -} -EXPORT_SYMBOL(__cleancache_flush_fs); + (*cleancache_ops.invalidate_fs)(old_poolid); -#ifdef CONFIG_SYSFS - -/* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */ - -#define CLEANCACHE_SYSFS_RO(_name) \ - static ssize_t cleancache_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf) \ - { \ - return sprintf(buf, "%lu\n", cleancache_##_name); \ - } \ - static struct kobj_attribute cleancache_##_name##_attr = { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = cleancache_##_name##_show, \ } - -CLEANCACHE_SYSFS_RO(succ_gets); -CLEANCACHE_SYSFS_RO(failed_gets); -CLEANCACHE_SYSFS_RO(puts); -CLEANCACHE_SYSFS_RO(flushes); - -static struct attribute *cleancache_attrs[] = { - &cleancache_succ_gets_attr.attr, - &cleancache_failed_gets_attr.attr, - &cleancache_puts_attr.attr, - &cleancache_flushes_attr.attr, - NULL, -}; - -static struct attribute_group cleancache_attr_group = { - .attrs = cleancache_attrs, - .name = "cleancache", -}; - -#endif /* CONFIG_SYSFS */ +} +EXPORT_SYMBOL(__cleancache_invalidate_fs); static int __init init_cleancache(void) { -#ifdef CONFIG_SYSFS - int err; - - err = sysfs_create_group(mm_kobj, &cleancache_attr_group); -#endif /* CONFIG_SYSFS */ +#ifdef CONFIG_DEBUG_FS + struct dentry *root = debugfs_create_dir("cleancache", NULL); + if (root == NULL) + return -ENXIO; + debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets); + debugfs_create_u64("failed_gets", S_IRUGO, + root, &cleancache_failed_gets); + debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts); + debugfs_create_u64("invalidates", S_IRUGO, + root, &cleancache_invalidates); +#endif return 0; } module_init(init_cleancache) diff --git a/mm/filemap.c b/mm/filemap.c index 10481ebd..9618ad1b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -127,7 +127,8 @@ void __delete_from_page_cache(struct page *page) if (PageUptodate(page) && PageMappedToDisk(page)) cleancache_put_page(page); else - cleancache_flush_page(mapping, page); + cleancache_invalidate_page(mapping, page); + radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; diff --git a/mm/frontswap.c b/mm/frontswap.c new file mode 100644 index 00000000..cb9ba289 --- /dev/null +++ b/mm/frontswap.c @@ -0,0 +1,280 @@ +/* + * Frontswap frontend + * + * This code provides the generic "frontend" layer to call a matching + * "backend" driver implementation of frontswap. See + * Documentation/vm/frontswap.txt for more information. + * + * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. + * Author: Dan Magenheimer + * + * This work is licensed under the terms of the GNU GPL, version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * frontswap_ops is set by frontswap_register_ops to contain the pointers + * to the frontswap "backend" implementation functions. + */ +static struct frontswap_ops frontswap_ops __read_mostly; + +/* + * This global enablement flag reduces overhead on systems where frontswap_ops + * has not been registered, so is preferred to the slower alternative: a + * function call that checks a non-global. + */ +int frontswap_enabled __read_mostly; +EXPORT_SYMBOL(frontswap_enabled); + +/* + * Counters available via /sys/kernel/debug/frontswap (if debugfs is + * properly configured. These are for information only so are not protected + * against increment races. + */ +static u64 frontswap_gets; +static u64 frontswap_succ_puts; +static u64 frontswap_failed_puts; +static u64 frontswap_invalidates; + +/* + * Register operations for frontswap, returning previous thus allowing + * detection of multiple backends and possible nesting + */ +struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) +{ + + + struct frontswap_ops old = frontswap_ops; + + frontswap_ops = *ops; + frontswap_enabled = 1; + return old; +} +EXPORT_SYMBOL(frontswap_register_ops); + +/* Called when a swap device is swapon'd */ +void __frontswap_init(unsigned type) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (sis->frontswap_map == NULL) + return; + if (frontswap_enabled) + (*frontswap_ops.init)(type); +} +EXPORT_SYMBOL(__frontswap_init); + +/* + * "Put" data from a page to frontswap and associate it with the page's + * swaptype and offset. Page must be locked and in the swap cache. + * If frontswap already contains a page with matching swaptype and + * offset, the frontswap implmentation may either overwrite the data and + * return success or invalidate the page from frontswap and return failure + */ +int __frontswap_put_page(struct page *page) +{ + + int ret = -1, dup = 0; + swp_entry_t entry = { .val = page_private(page), }; + int type = swp_type(entry); + struct swap_info_struct *sis = swap_info[type]; + pgoff_t offset = swp_offset(entry); + + BUG_ON(!PageLocked(page)); + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) + dup = 1; + ret = (*frontswap_ops.put_page)(type, offset, page); + if (ret == 0) { + frontswap_set(sis, offset); + frontswap_succ_puts++; + if (!dup) + atomic_inc(&sis->frontswap_pages); + } else if (dup) { + /* + failed dup always results in automatic invalidate of + the (older) page from frontswap + */ + frontswap_clear(sis, offset); + atomic_dec(&sis->frontswap_pages); + frontswap_failed_puts++; + } else + frontswap_failed_puts++; + return ret; +} +EXPORT_SYMBOL(__frontswap_put_page); + +/* + * "Get" data from frontswap associated with swaptype and offset that were + * specified when the data was put to frontswap and use it to fill the + * specified page with data. Page must be locked and in the swap cache + */ +int __frontswap_get_page(struct page *page) +{ + + + int ret = -1; + swp_entry_t entry = { .val = page_private(page), }; + int type = swp_type(entry); + struct swap_info_struct *sis = swap_info[type]; + pgoff_t offset = swp_offset(entry); + + BUG_ON(!PageLocked(page)); + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) + ret = (*frontswap_ops.get_page)(type, offset, page); + if (ret == 0) + frontswap_gets++; + return ret; +} +EXPORT_SYMBOL(__frontswap_get_page); + +/* + * Invalidate any data from frontswap associated with the specified swaptype + * and offset so that a subsequent "get" will fail. + */ +void __frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + struct swap_info_struct *sis = swap_info[type]; + + + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) { + (*frontswap_ops.invalidate_page)(type, offset); + atomic_dec(&sis->frontswap_pages); + frontswap_clear(sis, offset); + frontswap_invalidates++; + } +} +EXPORT_SYMBOL(__frontswap_invalidate_page); + +/* + * Invalidate all data from frontswap associated with all offsets for the + * specified swaptype. + */ +void __frontswap_invalidate_area(unsigned type) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (sis->frontswap_map == NULL) + return; + (*frontswap_ops.invalidate_area)(type); + atomic_set(&sis->frontswap_pages, 0); + memset(sis->frontswap_map, 0, sis->max / sizeof(long)); +} +EXPORT_SYMBOL(__frontswap_invalidate_area); + +/* + * Frontswap, like a true swap device, may unnecessarily retain pages + * under certain circumstances; "shrink" frontswap is essentially a + * "partial swapoff" and works by calling try_to_unuse to attempt to + * unuse enough frontswap pages to attempt to -- subject to memory + * constraints -- reduce the number of pages in frontswap to the + * number given in the parameter target_pages. + */ +void frontswap_shrink(unsigned long target_pages) +{ + + struct swap_info_struct *si = NULL; + int si_frontswap_pages; + unsigned long total_pages = 0, total_pages_to_unuse; + unsigned long pages = 0, pages_to_unuse = 0; + int type; + bool locked = false; + + /* + * we don't want to hold swap_lock while doing a very + * lengthy try_to_unuse, but swap_list may change + * so restart scan from swap_list.head each time + */ + spin_lock(&swap_lock); + locked = true; + total_pages = 0; + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + total_pages += atomic_read(&si->frontswap_pages); + } + if (total_pages <= target_pages) + goto out; + total_pages_to_unuse = total_pages - target_pages; + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + si_frontswap_pages = atomic_read(&si->frontswap_pages); + if (total_pages_to_unuse < si_frontswap_pages) + pages = pages_to_unuse = total_pages_to_unuse; + else { + pages = si_frontswap_pages; + pages_to_unuse = 0; /* unuse all */ + } + /* ensure there is enough RAM to fetch pages from frontswap */ + if (security_vm_enough_memory_kern(pages)) + continue; + vm_unacct_memory(pages); + break; + } + if (type < 0) + goto out; + locked = false; + spin_unlock(&swap_lock); + try_to_unuse(type, true, pages_to_unuse); +out: + if (locked) + spin_unlock(&swap_lock); + return; +} +EXPORT_SYMBOL(frontswap_shrink); + +/* + * Count and return the number of frontswap pages across all + * swap devices. This is exported so that backend drivers can + * determine current usage without reading debugfs. + */ +unsigned long frontswap_curr_pages(void) +{ + int type; + unsigned long totalpages = 0; + struct swap_info_struct *si = NULL; + + spin_lock(&swap_lock); + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + totalpages += atomic_read(&si->frontswap_pages); + } + spin_unlock(&swap_lock); + return totalpages; +} +EXPORT_SYMBOL(frontswap_curr_pages); + +static int __init init_frontswap(void) +{ + + int err = 0; + +#ifdef CONFIG_DEBUG_FS + struct dentry *root = debugfs_create_dir("frontswap", NULL); + if (root == NULL) + return -ENXIO; + debugfs_create_u64("gets", S_IRUGO, root, &frontswap_gets); + debugfs_create_u64("succ_puts", S_IRUGO, root, &frontswap_succ_puts); + debugfs_create_u64("puts", S_IRUGO, root, &frontswap_failed_puts); + debugfs_create_u64("invalidates", S_IRUGO, + root, &frontswap_invalidates); +#endif + return err; +} + +module_init(init_frontswap); diff --git a/mm/page_io.c b/mm/page_io.c index dc76b4d0..651a9125 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -18,6 +18,7 @@ #include #include #include +#include #include static struct bio *get_swap_bio(gfp_t gfp_flags, @@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) unlock_page(page); goto out; } + if (frontswap_put_page(page) == 0) { + set_page_writeback(page); + unlock_page(page); + end_page_writeback(page); + goto out; + } bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); @@ -122,6 +129,11 @@ int swap_readpage(struct page *page) VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); + if (frontswap_get_page(page) == 0) { + SetPageUptodate(page); + unlock_page(page); + goto out; + } bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); diff --git a/mm/swapfile.c b/mm/swapfile.c index c8f43388..2c9d4534 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -32,7 +32,8 @@ #include #include #include - +#include +#include #include #include #include @@ -43,7 +44,8 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, static void free_swap_count_continuations(struct swap_info_struct *); static sector_t map_swap_entry(swp_entry_t, struct block_device**); -static DEFINE_SPINLOCK(swap_lock); +DEFINE_SPINLOCK(swap_lock); + static unsigned int nr_swapfiles; long nr_swap_pages; long total_swap_pages; @@ -54,9 +56,12 @@ static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; static const char Unused_offset[] = "Unused swap offset entry "; -static struct swap_list_t swap_list = {-1, -1}; -static struct swap_info_struct *swap_info[MAX_SWAPFILES]; + +struct swap_list_t swap_list = {-1, -1}; + + +struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); @@ -557,6 +562,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, swap_list.next = p->type; nr_swap_pages++; p->inuse_pages--; + frontswap_invalidate_page(p->type, offset); if ((p->flags & SWP_BLKDEV) && disk->fops->swap_slot_free_notify) disk->fops->swap_slot_free_notify(p->bdev, offset); @@ -1015,12 +1021,17 @@ static int unuse_mm(struct mm_struct *mm, return (ret < 0)? ret: 0; } + + /* - * Scan swap_map from current position to next entry still in use. + + + * Scan swap_map (or frontswap_map if frontswap parameter is true) + * from current position to next entry still in use. * Recycle to start on reaching the end, returning 0 when empty. */ static unsigned int find_next_to_unuse(struct swap_info_struct *si, - unsigned int prev) + unsigned int prev, bool frontswap) { unsigned int max = si->max; unsigned int i = prev; @@ -1046,6 +1057,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, prev = 0; i = 1; } + if (frontswap) { + if (frontswap_test(si, i)) + break; + else + continue; + } count = si->swap_map[i]; if (count && swap_count(count) != SWAP_MAP_BAD) break; @@ -1057,8 +1074,16 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. + + * + * if the boolean frontswap is true, only unuse pages_to_unuse pages; + * pages_to_unuse==0 means all pages; ignored if frontswap is false */ -static int try_to_unuse(unsigned int type) + + + +int try_to_unuse(unsigned int type, bool frontswap, + unsigned long pages_to_unuse) { struct swap_info_struct *si = swap_info[type]; struct mm_struct *start_mm; @@ -1091,7 +1116,8 @@ static int try_to_unuse(unsigned int type) * one pass through swap_map is enough, but not necessarily: * there are races when an instance of an entry might be missed. */ - while ((i = find_next_to_unuse(si, i)) != 0) { + + while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { if (signal_pending(current)) { retval = -EINTR; break; @@ -1258,6 +1284,10 @@ static int try_to_unuse(unsigned int type) * interactive performance. */ cond_resched(); + if (frontswap && pages_to_unuse > 0) { + if (!--pages_to_unuse) + break; + } } mmput(start_mm); @@ -1517,7 +1547,8 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) } static void enable_swap_info(struct swap_info_struct *p, int prio, - unsigned char *swap_map) + unsigned char *swap_map, + unsigned long *frontswap_map) { int i, prev; @@ -1527,6 +1558,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, else p->prio = --least_priority; p->swap_map = swap_map; + frontswap_map_set(p, frontswap_map); p->flags |= SWP_WRITEOK; nr_swap_pages += p->pages; total_swap_pages += p->pages; @@ -1543,6 +1575,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, swap_list.head = swap_list.next = p->type; else swap_info[prev]->next = p->type; + frontswap_init(p->type); spin_unlock(&swap_lock); } @@ -1614,7 +1647,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); - err = try_to_unuse(type); + + err = try_to_unuse(type, false, 0); /* force all pages to be unused */ + test_set_oom_score_adj(oom_score_adj); if (err) { @@ -1625,7 +1660,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) * sys_swapoff for this swap_info_struct at this point. */ /* re-insert swap space back into swap_list */ - enable_swap_info(p, p->prio, p->swap_map); + enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p)); goto out_dput; } @@ -1651,9 +1686,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) swap_map = p->swap_map; p->swap_map = NULL; p->flags = 0; + frontswap_invalidate_area(type); spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); vfree(swap_map); + vfree(frontswap_map_get(p)); /* Destroy swap account informatin */ swap_cgroup_swapoff(type); @@ -2026,6 +2063,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) sector_t span; unsigned long maxpages; unsigned char *swap_map = NULL; + unsigned long *frontswap_map = NULL; struct page *page = NULL; struct inode *inode = NULL; @@ -2107,6 +2145,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; } + /* frontswap enabled? set up bit-per-page map for frontswap */ + if (frontswap_enabled) + frontswap_map = vzalloc(maxpages / sizeof(long)); + if (p->bdev) { if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { p->flags |= SWP_SOLIDSTATE; @@ -2121,14 +2163,17 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) if (swap_flags & SWAP_FLAG_PREFER) prio = (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; - enable_swap_info(p, prio, swap_map); + enable_swap_info(p, prio, swap_map, frontswap_map); printk(KERN_INFO "Adding %uk swap on %s. " - "Priority:%d extents:%d across:%lluk %s%s\n", + "Priority:%d extents:%d across:%lluk %s%s%s\n", + p->pages<<(PAGE_SHIFT-10), name, p->prio, nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), (p->flags & SWP_SOLIDSTATE) ? "SS" : "", - (p->flags & SWP_DISCARDABLE) ? "D" : ""); + (p->flags & SWP_DISCARDABLE) ? "D" : "", + (frontswap_map) ? "FS" : ""); + mutex_unlock(&swapon_mutex); atomic_inc(&proc_poll_event); @@ -2319,6 +2364,10 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) base++; spin_lock(&swap_lock); + if (frontswap_test(si, target)) { + spin_unlock(&swap_lock); + return 0; + } if (end > si->max) /* don't go beyond end of map */ end = si->max; @@ -2329,6 +2378,9 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) break; if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) break; + /* Don't read in frontswap pages */ + if (frontswap_test(si, toff)) + break; } /* Count contiguous allocated slots below our target */ for (toff = target; --toff >= base; nr_pages++) { @@ -2337,6 +2389,9 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) break; if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) break; + /* Don't read in frontswap pages */ + if (frontswap_test(si, toff)) + break; } spin_unlock(&swap_lock); diff --git a/mm/truncate.c b/mm/truncate.c index 3e9829f3..e1b209c4 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -52,7 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset) static inline void truncate_partial_page(struct page *page, unsigned partial) { zero_user_segment(page, partial, PAGE_CACHE_SIZE); - cleancache_flush_page(page->mapping, page); + cleancache_invalidate_page(page->mapping, page); if (page_has_private(page)) do_invalidatepage(page, partial); } @@ -216,7 +216,8 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t next; int i; - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); + if (mapping->nrpages == 0) return; @@ -294,7 +295,8 @@ void truncate_inode_pages_range(struct address_space *mapping, pagevec_release(&pvec); mem_cgroup_uncharge_end(); } - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); + } EXPORT_SYMBOL(truncate_inode_pages_range); @@ -450,7 +452,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping, int did_range_unmap = 0; int wrapped = 0; - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); + pagevec_init(&pvec, 0); next = start; while (next <= end && !wrapped && @@ -509,7 +512,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping, mem_cgroup_uncharge_end(); cond_resched(); } - cleancache_flush_inode(mapping); + + cleancache_invalidate_inode(mapping); + return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); From 3783dfb18d01cf9f95ca48181d5b1150807700c1 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 07:21:16 -0500 Subject: [PATCH 048/117] Linux 3.0.54 --- Documentation/dvb/get_dvb_firmware | 2 +- Documentation/networking/ifenslave.c | 2 ++ Makefile | 4 +-- arch/parisc/kernel/signal32.c | 6 ++-- arch/parisc/kernel/sys_parisc.c | 2 ++ arch/sparc/kernel/signal_64.c | 4 +-- arch/x86/include/asm/ptrace.h | 15 +++------ arch/x86/kernel/cpu/mcheck/therm_throt.c | 29 +++++------------ arch/x86/kernel/microcode_amd.c | 4 +++ arch/x86/kernel/ptrace.c | 28 +++++++++++++++++ drivers/ata/sata_svw.c | 35 +++++++++++++++++++++ drivers/block/DAC960.c | 18 ++++++----- drivers/gpu/drm/radeon/radeon_agp.c | 5 ++- drivers/input/mouse/bcm5974.c | 3 ++ drivers/isdn/gigaset/bas-gigaset.c | 19 +++++++++-- drivers/md/dm.c | 8 ++++- drivers/mmc/host/sdhci-s3c.c | 2 +- drivers/mtd/devices/slram.c | 2 +- drivers/net/ixgbe/ixgbe_82599.c | 2 ++ drivers/net/ixgbe/ixgbe_common.c | 1 + drivers/net/ixgbe/ixgbe_main.c | 6 ++++ drivers/net/ixgbe/ixgbe_type.h | 3 ++ drivers/net/wireless/mwifiex/sdio.c | 11 ++++--- drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | 1 + drivers/platform/x86/acer-wmi.c | 1 + drivers/scsi/isci/request.c | 2 +- drivers/usb/serial/mct_u232.c | 14 +++++---- drivers/video/riva/fbdev.c | 5 ++- fs/jbd/transaction.c | 2 ++ kernel/futex.c | 18 ++++++++++- kernel/watchdog.c | 4 +-- net/can/bcm.c | 3 ++ net/mac80211/ibss.c | 8 ++--- sound/pci/hda/patch_realtek.c | 4 +++ sound/pcmcia/pdaudiocf/pdaudiocf.c | 2 +- sound/pcmcia/vx/vxpocket.c | 2 +- sound/usb/midi.c | 8 +++-- 37 files changed, 203 insertions(+), 82 deletions(-) diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware index 3348d313..511dd4df 100644 --- a/Documentation/dvb/get_dvb_firmware +++ b/Documentation/dvb/get_dvb_firmware @@ -114,7 +114,7 @@ sub tda10045 { sub tda10046 { my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip"; - my $url = "http://www.tt-download.com/download/updates/219/$sourcefile"; + my $url = "http://technotrend.com.ua/download/software/219/$sourcefile"; my $hash = "6a7e1e2f2644b162ff0502367553c72d"; my $outfile = "dvb-fe-tda10046.fw"; my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c index 2bac9618..50f1dc49 100644 --- a/Documentation/networking/ifenslave.c +++ b/Documentation/networking/ifenslave.c @@ -539,12 +539,14 @@ static int if_getconfig(char *ifname) metric = 0; } else metric = ifr.ifr_metric; + printf("The result of SIOCGIFMETRIC is %d\n", metric); strcpy(ifr.ifr_name, ifname); if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0) mtu = 0; else mtu = ifr.ifr_mtu; + printf("The result of SIOCGIFMTU is %d\n", mtu); strcpy(ifr.ifr_name, ifname); if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) { diff --git a/Makefile b/Makefile index 338d6231..aba6aa36 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 53 -EXTRAVERSION = Ermahgerd-13.01.25 +SUBLEVEL = 54 +EXTRAVERSION = NAME = Sneaky Weasel # *DOCUMENTATION* diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index e1413243..d0ea054b 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) { compat_sigset_t s; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; sigset_64to32(&s, set); return copy_to_user(up, &s, sizeof s); @@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) compat_sigset_t s; int r; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; if ((r = copy_from_user(&s, up, sz)) == 0) { sigset_32to64(set, &s); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index c9b93226..7ea75d14 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping, struct vm_area_struct *vma; int offset = mapping ? get_offset(mapping) : 0; + offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; + addr = DCACHE_ALIGN(addr - offset) + offset; for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index d58260bf..77d47617 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -309,9 +309,7 @@ void do_rt_sigreturn(struct pt_regs *regs) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); - err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); - - if (err) + if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) goto segv; err |= __get_user(rwin_save, &sf->rwin_save); diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 94e7618f..f332d64c 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -187,21 +187,14 @@ static inline int v8086_mode(struct pt_regs *regs) #endif } -/* - * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. The previous stack will be directly underneath the saved - * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. - * - * This is valid only for kernel mode traps. - */ -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ #ifdef CONFIG_X86_32 - return (unsigned long)(®s->sp); +extern unsigned long kernel_stack_pointer(struct pt_regs *regs); #else +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ return regs->sp; -#endif } +#endif #define GET_IP(regs) ((regs)->ip) #define GET_FP(regs) ((regs)->bp) diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 27c62517..99cd9d2a 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -322,17 +322,6 @@ device_initcall(thermal_throttle_init_device); #endif /* CONFIG_SYSFS */ -/* - * Set up the most two significant bit to notify mce log that this thermal - * event type. - * This is a temp solution. May be changed in the future with mce log - * infrasture. - */ -#define CORE_THROTTLED (0) -#define CORE_POWER_LIMIT ((__u64)1 << 62) -#define PACKAGE_THROTTLED ((__u64)2 << 62) -#define PACKAGE_POWER_LIMIT ((__u64)3 << 62) - static void notify_thresholds(__u64 msr_val) { /* check whether the interrupt handler is defined; @@ -362,27 +351,23 @@ static void intel_thermal_interrupt(void) if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, CORE_LEVEL) != 0) - mce_log_therm_throt_event(CORE_THROTTLED | msr_val); + mce_log_therm_throt_event(msr_val); if (this_cpu_has(X86_FEATURE_PLN)) - if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, + therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, - CORE_LEVEL) != 0) - mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); + CORE_LEVEL); if (this_cpu_has(X86_FEATURE_PTS)) { rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); - if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, + therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, - PACKAGE_LEVEL) != 0) - mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); + PACKAGE_LEVEL); if (this_cpu_has(X86_FEATURE_PLN)) - if (therm_throt_process(msr_val & + therm_throt_process(msr_val & PACKAGE_THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, - PACKAGE_LEVEL) != 0) - mce_log_therm_throt_event(PACKAGE_POWER_LIMIT - | msr_val); + PACKAGE_LEVEL); } } diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index b727450f..53ab9ff2 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -162,6 +162,7 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 +#define F16H_MPB_MAX_SIZE 3458 switch (c->x86) { case 0x14: @@ -170,6 +171,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) case 0x15: max_size = F15H_MPB_MAX_SIZE; break; + case 0x16: + max_size = F16H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 807c2a2b..8ede5679 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -164,6 +164,34 @@ static inline bool invalid_selector(u16 value) #define FLAG_MASK FLAG_MASK_32 +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. + * + * Now, if the stack is empty, '®s->sp' is out of range. In this + * case we try to take the previous stack. To always return a non-null + * stack pointer we fall back to regs as stack if no previous stack + * exists. + * + * This is valid only for kernel mode traps. + */ +unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; + struct thread_info *tinfo; + + if (context == (sp & ~(THREAD_SIZE - 1))) + return sp; + + tinfo = (struct thread_info *)context; + if (tinfo->previous_esp) + return tinfo->previous_esp; + + return (unsigned long)regs; +} + static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 35eabcf3..84980acf 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link, return 0; } +static int k2_sata_softreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return ata_sff_softreset(link, class, deadline); +} + +static int k2_sata_hardreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return sata_sff_hardreset(link, class, deadline); +} static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { @@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = { static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, + .softreset = k2_sata_softreset, + .hardreset = k2_sata_hardreset, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index e086fbbb..8db90891 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -1177,7 +1177,8 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T int TimeoutCounter; int i; - + memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T)); + if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32))) return DAC960_Failure(Controller, "DMA mask out of range"); Controller->BounceBufferLimit = DMA_BIT_MASK(32); @@ -4627,7 +4628,8 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) DAC960_Controller_T *Controller = Command->Controller; DAC960_CommandType_T CommandType = Command->CommandType; DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; - DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode; + DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode; + DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode; DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus; if (CommandType == DAC960_ReadCommand || @@ -4699,7 +4701,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) { if (Controller->ShutdownMonitoringTimer) return; - if (CommandOpcode == DAC960_V2_GetControllerInfo) + if (IOCTLOpcode == DAC960_V2_GetControllerInfo) { DAC960_V2_ControllerInfo_T *NewControllerInfo = Controller->V2.NewControllerInformation; @@ -4719,14 +4721,14 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) memcpy(ControllerInfo, NewControllerInfo, sizeof(DAC960_V2_ControllerInfo_T)); } - else if (CommandOpcode == DAC960_V2_GetEvent) + else if (IOCTLOpcode == DAC960_V2_GetEvent) { if (CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_ReportEvent(Controller, Controller->V2.Event); } Controller->V2.NextEventSequenceNumber++; } - else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid && + else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid && CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo = @@ -4915,7 +4917,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) NewPhysicalDeviceInfo->LogicalUnit++; Controller->V2.PhysicalDeviceIndex++; } - else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid) + else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid) { unsigned int DeviceIndex; for (DeviceIndex = Controller->V2.PhysicalDeviceIndex; @@ -4938,7 +4940,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) } Controller->V2.NeedPhysicalDeviceInformation = false; } - else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid && + else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid && CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo = @@ -5065,7 +5067,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) [LogicalDeviceNumber] = true; NewLogicalDeviceInfo->LogicalDeviceNumber++; } - else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid) + else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid) { int LogicalDriveNumber; for (LogicalDriveNumber = 0; diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index bd2f33e5..bc6b64fe 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -70,9 +70,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, - /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ + /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, + /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x0531, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 3126983c..13e38ffe 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -373,6 +373,9 @@ static void setup_events_to_report(struct input_dev *input_dev, __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); + if (cfg->caps & HAS_INTEGRATED_BUTTON) + __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); + input_set_events_per_packet(input_dev, 60); } diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 3913f47e..492aa520 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -616,7 +616,13 @@ static void int_in_work(struct work_struct *work) if (rc == 0) /* success, resubmit interrupt read URB */ rc = usb_submit_urb(urb, GFP_ATOMIC); - if (rc != 0 && rc != -ENODEV) { + + switch (rc) { + case 0: /* success */ + case -ENODEV: /* device gone */ + case -EINVAL: /* URB already resubmitted, or terminal badness */ + break; + default: /* failure: try to recover by resetting the device */ dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc)); rc = usb_lock_device_for_reset(ucs->udev, ucs->interface); if (rc == 0) { @@ -2437,7 +2443,9 @@ static void gigaset_disconnect(struct usb_interface *interface) } /* gigaset_suspend - * This function is called before the USB connection is suspended. + * This function is called before the USB connection is suspended + * or before the USB device is reset. + * In the latter case, message == PMSG_ON. */ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) { @@ -2493,7 +2501,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) del_timer_sync(&ucs->timer_atrdy); del_timer_sync(&ucs->timer_cmd_in); del_timer_sync(&ucs->timer_int_in); - cancel_work_sync(&ucs->int_in_wq); + + /* don't try to cancel int_in_wq from within reset as it + * might be the one requesting the reset + */ + if (message.event != PM_EVENT_ON) + cancel_work_sync(&ucs->int_in_wq); gig_dbg(DEBUG_SUSPEND, "suspend complete"); return 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0a0e4f86..cfed9d22 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -745,8 +745,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) if (!md_in_flight(md)) wake_up(&md->wait); + /* + * Run this off this callpath, as drivers could invoke end_io while + * inside their request_fn (and holding the queue lock). Calling + * back into ->request_fn() could deadlock attempting to grab the + * queue lock again. + */ if (run_queue) - blk_run_queue(md->queue); + blk_run_queue_async(md->queue); /* * dm_put() must be at the end of this function. See the comment above diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 8cd999f4..4a5c5012 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -589,7 +589,7 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) sdhci_remove_host(host, 1); - for (ptr = 0; ptr < 3; ptr++) { + for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { if (sc->clk_bus[ptr]) { clk_disable(sc->clk_bus[ptr]); clk_put(sc->clk_bus[ptr]); diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index e5852631..f38c348e 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -266,7 +266,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) if (*(szlength) != '+') { devlength = simple_strtoul(szlength, &buffer, 0); - devlength = handle_unit(devlength, buffer) - devstart; + devlength = handle_unit(devlength, buffer); if (devlength < devstart) goto err_out; diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 8ee66124..4adce71a 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -360,6 +360,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599EN_SFP: + case IXGBE_DEV_ID_82599_SFP_SF_QP: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82599_CX4: diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index b894b42a..864403da 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -3181,6 +3181,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: return 0; case IXGBE_DEV_ID_82599_T3_LOM: return 0; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 83f197d0..f0b0ff36 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -129,6 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), + board_X540 }, /* required last entry */ {0, } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index fa43f250..1ea15776 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -59,11 +59,14 @@ #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C #define IXGBE_DEV_ID_82599_LS 0x154F +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A #define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_X540T1 0x1560 /* General Registers */ #define IXGBE_CTRL 0x00000 diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index d425dbd9..3b1217f0 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -122,7 +122,6 @@ static int mwifiex_sdio_suspend(struct device *dev) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; mmc_pm_flag_t pm_flag = 0; - int hs_actived = 0; int i; int ret = 0; @@ -149,12 +148,14 @@ static int mwifiex_sdio_suspend(struct device *dev) adapter = card->adapter; /* Enable the Host Sleep */ - hs_actived = mwifiex_enable_hs(adapter); - if (hs_actived) { - pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); - ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (!mwifiex_enable_hs(adapter)) { + dev_err(adapter->dev, "cmd: failed to suspend\n"); + return -EFAULT; } + dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + /* Indicate device suspended */ adapter->is_suspended = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 942f7a39..354f9b15 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -295,6 +295,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { /*=== Customer ID ===*/ /****** 8188CU ********/ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ + {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 2080b223..202b567a 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -104,6 +104,7 @@ static const struct key_entry acer_wmi_keymap[] = { {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ + {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ {KE_IGNORE, 0x41, {KEY_MUTE} }, {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, {KE_IGNORE, 0x43, {KEY_NEXTSONG} }, diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 225b1968..b70f9992 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1693,7 +1693,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, (void **)&frame_buffer); - sci_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index 42de17b7..d3addb29 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -577,12 +577,14 @@ static void mct_u232_close(struct usb_serial_port *port) { dbg("%s port %d", __func__, port->number); - if (port->serial->dev) { - /* shutdown our urbs */ - usb_kill_urb(port->write_urb); - usb_kill_urb(port->read_urb); - usb_kill_urb(port->interrupt_in_urb); - } + /* + * Must kill the read urb as it is actually an interrupt urb, which + * generic close thus fails to kill. + */ + usb_kill_urb(port->read_urb); + usb_kill_urb(port->interrupt_in_urb); + + usb_serial_generic_close(port); } /* mct_u232_close */ diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index d8ab7be4..fabc90de 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -1816,6 +1816,8 @@ static void __devinit riva_update_default_var(struct fb_var_screeninfo *var, specs->modedb, specs->modedb_len, NULL, 8); } else if (specs->modedb != NULL) { + /* get first mode in database as fallback */ + modedb = specs->modedb[0]; /* get preferred timing */ if (info->monspecs.misc & FB_MISC_1ST_DETAIL) { int i; @@ -1826,9 +1828,6 @@ static void __devinit riva_update_default_var(struct fb_var_screeninfo *var, break; } } - } else { - /* otherwise, get first mode in database */ - modedb = specs->modedb[0]; } var->bits_per_pixel = 8; riva_update_var(var, &modedb); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index b0161a66..d7ab0926 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1955,7 +1955,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); spin_unlock(&journal->j_state_lock); + unlock_buffer(bh); log_wait_commit(journal, tid); + lock_buffer(bh); goto retry; } /* diff --git a/kernel/futex.c b/kernel/futex.c index 66cff749..0c48eeea 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -843,6 +843,9 @@ static void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; + if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) + return; + /* * We set q->lock_ptr = NULL _before_ we wake up the task. If * a non-futex wake up happens on another CPU then the task @@ -1078,6 +1081,10 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key1)) { + if (this->pi_state || this->rt_waiter) { + ret = -EINVAL; + goto out_unlock; + } wake_futex(this); if (++ret >= nr_wake) break; @@ -1090,6 +1097,10 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, op_ret = 0; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key2)) { + if (this->pi_state || this->rt_waiter) { + ret = -EINVAL; + goto out_unlock; + } wake_futex(this); if (++op_ret >= nr_wake2) break; @@ -1098,6 +1109,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, ret += op_ret; } +out_unlock: double_unlock_hb(hb1, hb2); out_put_keys: put_futex_key(&key2); @@ -1387,9 +1399,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, /* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. + * + * We should never be requeueing a futex_q with a pi_state, + * which is awaiting a futex_unlock_pi(). */ if ((requeue_pi && !this->rt_waiter) || - (!requeue_pi && this->rt_waiter)) { + (!requeue_pi && this->rt_waiter) || + this->pi_state) { ret = -EINVAL; break; } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 3d0c56ad..53e44324 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -113,7 +113,7 @@ static unsigned long get_timestamp(int this_cpu) return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } -static unsigned long get_sample_period(void) +static u64 get_sample_period(void) { /* * convert watchdog_thresh from seconds to ns @@ -121,7 +121,7 @@ static unsigned long get_sample_period(void) * increment before the hardlockup detector generates * a warning */ - return get_softlockup_thresh() * (NSEC_PER_SEC / 5); + return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); } /* Commands for resetting the watchdog */ diff --git a/net/can/bcm.c b/net/can/bcm.c index c6cc66f7..b117bfa4 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1085,6 +1085,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->sk = sk; op->ifindex = ifindex; + /* ifindex for timeout events w/o previous frame reception */ + op->rx_ifindex = ifindex; + /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); op->timer.function = bcm_rx_timeout_handler; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 8adcc9e9..c6399f6c 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -965,10 +965,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) mutex_lock(&sdata->u.ibss.mtx); - sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; - memset(sdata->u.ibss.bssid, 0, ETH_ALEN); - sdata->u.ibss.ssid_len = 0; - active_ibss = ieee80211_sta_active_ibss(sdata); if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { @@ -989,6 +985,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) } } + ifibss->state = IEEE80211_IBSS_MLME_SEARCH; + memset(ifibss->bssid, 0, ETH_ALEN); + ifibss->ssid_len = 0; + sta_info_flush(sdata->local, sdata); /* remove beacon */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8d288a78..33158163 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5967,6 +5967,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, const hda_nid_t *nids, ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir)) static const struct snd_pci_quirk beep_white_list[] = { + SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1), @@ -20134,6 +20135,9 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 }, { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 }, { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, + { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, + { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, + { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", .patch = patch_alc861 }, { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c index ce33be0e..66488a7a 100644 --- a/sound/pcmcia/pdaudiocf/pdaudiocf.c +++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c @@ -223,7 +223,7 @@ static int pdacf_config(struct pcmcia_device *link) if (ret) goto failed; - ret = pcmcia_request_exclusive_irq(link, pdacf_interrupt); + ret = pcmcia_request_irq(link, pdacf_interrupt); if (ret) goto failed; diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c index d9ef21d8..31777d1e 100644 --- a/sound/pcmcia/vx/vxpocket.c +++ b/sound/pcmcia/vx/vxpocket.c @@ -229,7 +229,7 @@ static int vxpocket_config(struct pcmcia_device *link) if (ret) goto failed; - ret = pcmcia_request_exclusive_irq(link, snd_vx_irq_handler); + ret = pcmcia_request_irq(link, snd_vx_irq_handler); if (ret) goto failed; diff --git a/sound/usb/midi.c b/sound/usb/midi.c index f9289102..a0fe5dd3 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -147,6 +147,7 @@ struct snd_usb_midi_out_endpoint { struct snd_usb_midi_out_endpoint* ep; struct snd_rawmidi_substream *substream; int active; + bool autopm_reference; uint8_t cable; /* cable number << 4 */ uint8_t state; #define STATE_UNKNOWN 0 @@ -1059,7 +1060,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) return -ENXIO; } err = usb_autopm_get_interface(umidi->iface); - if (err < 0) + port->autopm_reference = err >= 0; + if (err < 0 && err != -EACCES) return -EIO; substream->runtime->private_data = port; port->state = STATE_UNKNOWN; @@ -1070,9 +1072,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { struct snd_usb_midi* umidi = substream->rmidi->private_data; + struct usbmidi_out_port *port = substream->runtime->private_data; substream_open(substream, 0); - usb_autopm_put_interface(umidi->iface); + if (port->autopm_reference) + usb_autopm_put_interface(umidi->iface); return 0; } From 5f5cbb2f7fbea03a9ed7eaea7f086532b8805bab Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 07:23:24 -0500 Subject: [PATCH 049/117] Linux 3.0.55 --- Makefile | 4 ++-- arch/x86/kernel/ptrace.c | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index aba6aa36..54a25c26 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 54 -EXTRAVERSION = +SUBLEVEL = 55 +EXTRAVERSION = NAME = Sneaky Weasel # *DOCUMENTATION* diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 8ede5679..911e16d8 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -191,6 +192,7 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) return (unsigned long)regs; } +EXPORT_SYMBOL_GPL(kernel_stack_pointer); static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { From bc2eb5c6562e2b6a2d1577888e5aeb201be92e16 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 07:24:33 -0500 Subject: [PATCH 050/117] Linux 3.0.56 --- Makefile | 2 +- arch/arm/mach-dove/include/mach/pm.h | 2 +- arch/arm/mach-dove/irq.c | 14 +++++++++++++- block/scsi_ioctl.c | 5 ++++- drivers/acpi/processor_driver.c | 1 + drivers/gpu/drm/i915/intel_lvds.c | 16 ++++++++++++++++ kernel/sched_autogroup.c | 4 ---- kernel/sched_autogroup.h | 5 ----- kernel/workqueue.c | 4 +++- mm/memory-failure.c | 8 ++++++++ mm/sparse.c | 10 ++++------ net/ipv4/route.c | 4 ++++ 12 files changed, 55 insertions(+), 20 deletions(-) diff --git a/Makefile b/Makefile index 54a25c26..e0251982 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 55 +SUBLEVEL = 56 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h index 3ad9f946..11799c33 100644 --- a/arch/arm/mach-dove/include/mach/pm.h +++ b/arch/arm/mach-dove/include/mach/pm.h @@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin) static inline int irq_to_pmu(int irq) { - if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS) + if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS) return irq - IRQ_DOVE_PMU_START; return -EINVAL; diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index f07fd16e..9f2fd100 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d) int pin = irq_to_pmu(d->irq); u32 u; + /* + * The PMU mask register is not RW0C: it is RW. This means that + * the bits take whatever value is written to them; if you write + * a '1', you will set the interrupt. + * + * Unfortunately this means there is NO race free way to clear + * these interrupts. + * + * So, let's structure the code so that the window is as small as + * possible. + */ u = ~(1 << (pin & 31)); - writel(u, PMU_INTERRUPT_CAUSE); + u &= readl_relaxed(PMU_INTERRUPT_CAUSE); + writel_relaxed(u, PMU_INTERRUPT_CAUSE); } static struct irq_chip pmu_irq_chip = { diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 5ef1f4c1..055952e9 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -722,11 +722,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd) break; } + if (capable(CAP_SYS_RAWIO)) + return 0; + /* In particular, rule out all resets and host-specific ioctls. */ printk_ratelimited(KERN_WARNING "%s: sending ioctl %x to a partition!\n", current->comm, cmd); - return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY; + return -ENOTTY; } EXPORT_SYMBOL(scsi_verify_blk_ioctl); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index a4e0f1ba..6da4f071 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -409,6 +409,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event) acpi_bus_generate_proc_event(device, event, 0); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); + break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 7adba112..645f8aa3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -759,6 +759,22 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Gigabyte GA-D525TUD", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Supermicro X7SPA-H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), + }, + }, { } /* terminating entry */ }; diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index 429242f3..f280df1e 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c @@ -160,15 +160,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) p->signal->autogroup = autogroup_kref_get(ag); - if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) - goto out; - t = p; do { sched_move_task(t); } while_each_thread(p, t); -out: unlock_task_sighand(p, &flags); autogroup_kref_put(prev); } diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h index 05577055..7b859ffe 100644 --- a/kernel/sched_autogroup.h +++ b/kernel/sched_autogroup.h @@ -1,11 +1,6 @@ #ifdef CONFIG_SCHED_AUTOGROUP struct autogroup { - /* - * reference doesn't mean how many thread attach to this - * autogroup now. It just stands for the number of task - * could use this autogroup. - */ struct kref kref; struct task_group *tg; struct rw_semaphore lock; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6324e688..ca0f9eae 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2066,8 +2066,10 @@ static int rescuer_thread(void *__wq) repeat: set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); return 0; + } /* * See whether any cpu is asking for help. Unbounded diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2f49dcf4..eace560d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1382,9 +1382,17 @@ int soft_offline_page(struct page *page, int flags) { int ret; unsigned long pfn = page_to_pfn(page); + struct page *hpage = compound_trans_head(page); if (PageHuge(page)) return soft_offline_huge_page(page, flags); + if (PageTransHuge(hpage)) { + if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { + pr_info("soft offline: %#lx: failed to split THP\n", + pfn); + return -EBUSY; + } + } ret = get_any_page(page, pfn, flags); if (ret < 0) diff --git a/mm/sparse.c b/mm/sparse.c index 4cd05e5f..9054f836 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -619,7 +619,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { return; /* XXX: Not implemented yet */ } -static void free_map_bootmem(struct page *page, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) { } #else @@ -660,10 +660,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) get_order(sizeof(struct page) * nr_pages)); } -static void free_map_bootmem(struct page *page, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) { unsigned long maps_section_nr, removing_section_nr, i; unsigned long magic; + struct page *page = virt_to_page(memmap); for (i = 0; i < nr_pages; i++, page++) { magic = (unsigned long) page->lru.next; @@ -712,13 +713,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) */ if (memmap) { - struct page *memmap_page; - memmap_page = virt_to_page(memmap); - nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) >> PAGE_SHIFT; - free_map_bootmem(memmap_page, nr_pages); + free_map_bootmem(memmap, nr_pages); } } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 30cb03aa..54b5c6be 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1374,6 +1374,7 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) struct rtable *rt = (struct rtable *) dst; __be32 orig_gw = rt->rt_gateway; struct neighbour *n, *old_n; + struct hh_cache *old_hh; dst_confirm(&rt->dst); @@ -1381,6 +1382,9 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) n = __arp_bind_neighbour(&rt->dst, rt->rt_gateway); if (IS_ERR(n)) return PTR_ERR(n); + old_hh = xchg(&rt->dst.hh, NULL); + if (old_hh) + hh_cache_put(old_hh); old_n = xchg(&rt->dst._neighbour, n); if (old_n) neigh_release(old_n); From ff299208300b0ed09893fdac806f3876ae0fd50a Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 07:58:05 -0500 Subject: [PATCH 051/117] Linux 3.0.57 --- Makefile | 2 +- arch/arm/configs/vigor_aosp_defconfig | 1 - arch/arm/include/asm/hwcap.h | 1 + arch/arm/include/asm/vfpmacros.h | 12 ++--- arch/arm/vfp/vfpmodule.c | 9 ++-- arch/powerpc/kernel/ptrace.c | 18 +++++-- arch/powerpc/kernel/sysfs.c | 10 ++++ arch/powerpc/kernel/traps.c | 3 +- arch/x86/kernel/hpet.c | 4 +- drivers/acpi/battery.c | 77 +++++++++++++++++++++++++++ drivers/acpi/video.c | 14 +++++ drivers/hwmon/fam15h_power.c | 4 ++ drivers/pnp/pnpacpi/core.c | 3 ++ drivers/telephony/ixj.c | 24 ++++----- drivers/usb/host/ohci-q.c | 19 +++++++ drivers/usb/host/xhci-pci.c | 7 ++- drivers/usb/serial/cp210x.c | 1 + drivers/usb/serial/ftdi_sio.c | 3 +- drivers/usb/serial/ftdi_sio_ids.h | 6 +++ drivers/usb/serial/option.c | 25 +++++++++ drivers/usb/storage/Kconfig | 2 +- include/linux/mempolicy.h | 16 ------ kernel/trace/ftrace.c | 2 +- kernel/workqueue.c | 4 +- mm/dmapool.c | 31 +++-------- mm/mempolicy.c | 22 -------- mm/shmem.c | 22 ++++---- 27 files changed, 234 insertions(+), 108 deletions(-) diff --git a/Makefile b/Makefile index e0251982..737b9c7d 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 56 +SUBLEVEL = 57 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index e21e48a2..9f3e167f 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -552,7 +552,6 @@ CONFIG_ARM_GIC=y # CONFIG_PCI_SYSCALL is not set # CONFIG_ARCH_SUPPORTS_MSI is not set # CONFIG_PCCARD is not set -# CONFIG_ARM_ERRATA_764369 is not set # # Kernel Features diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c93a22a8..3c938262 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -23,6 +23,7 @@ #define HWCAP_VFPv4 (1 << 16) #define HWCAP_IDIVA (1 << 17) #define HWCAP_IDIVT (1 << 18) +#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #if defined(__KERNEL__) && !defined(__ASSEMBLY__) diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index bf530479..c49c8f77 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -27,9 +27,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field @@ -51,9 +51,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index dcce3f6a..a2a929b8 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -605,11 +605,14 @@ static int __init vfp_init(void) elf_hwcap |= HWCAP_VFPv3; /* - * Check for VFPv3 D16. CPUs in this configuration - * only have 16 x 64bit registers. + * Check for VFPv3 D16 and VFPv4 D16. CPUs in + * this configuration only have 16 x 64bit + * registers. */ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) - elf_hwcap |= HWCAP_VFPv3D16; + elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ + else + elf_hwcap |= HWCAP_VFPD32; } #endif /* diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index cb22024f..9321d0f4 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1497,9 +1497,14 @@ long arch_ptrace(struct task_struct *child, long request, if (index < PT_FPR0) { tmp = ptrace_get_reg(child, (int) index); } else { + unsigned int fpidx = index - PT_FPR0; + flush_fp_to_thread(child); - tmp = ((unsigned long *)child->thread.fpr) - [TS_FPRWIDTH * (index - PT_FPR0)]; + if (fpidx < (PT_FPSCR - PT_FPR0)) + tmp = ((unsigned long *)child->thread.fpr) + [fpidx * TS_FPRWIDTH]; + else + tmp = child->thread.fpscr.val; } ret = put_user(tmp, datalp); break; @@ -1525,9 +1530,14 @@ long arch_ptrace(struct task_struct *child, long request, if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { + unsigned int fpidx = index - PT_FPR0; + flush_fp_to_thread(child); - ((unsigned long *)child->thread.fpr) - [TS_FPRWIDTH * (index - PT_FPR0)] = data; + if (fpidx < (PT_FPSCR - PT_FPR0)) + ((unsigned long *)child->thread.fpr) + [fpidx * TS_FPRWIDTH] = data; + else + child->thread.fpscr.val = data; ret = 0; } break; diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index f0f2199e..cd51a5c7 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -192,6 +192,14 @@ static ssize_t show_dscr_default(struct sysdev_class *class, return sprintf(buf, "%lx\n", dscr_default); } +static void update_dscr(void *dummy) +{ + if (!current->thread.dscr_inherit) { + current->thread.dscr = dscr_default; + mtspr(SPRN_DSCR, dscr_default); + } +} + static ssize_t __used store_dscr_default(struct sysdev_class *class, struct sysdev_class_attribute *attr, const char *buf, size_t count) @@ -204,6 +212,8 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class, return -EINVAL; dscr_default = val; + on_each_cpu(update_dscr, NULL, 1); + return count; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 1a014142..6889f267 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -935,8 +935,9 @@ static int emulate_instruction(struct pt_regs *regs) cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; - mtspr(SPRN_DSCR, regs->gpr[rd]); + current->thread.dscr = regs->gpr[rd]; current->thread.dscr_inherit = 1; + mtspr(SPRN_DSCR, current->thread.dscr); return 0; } #endif diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index aa083d35..0aa649ee 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -427,7 +427,7 @@ void hpet_msi_unmask(struct irq_data *data) /* unmask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); - cfg |= HPET_TN_FSB; + cfg |= HPET_TN_ENABLE | HPET_TN_FSB; hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } @@ -438,7 +438,7 @@ void hpet_msi_mask(struct irq_data *data) /* mask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); - cfg &= ~HPET_TN_FSB; + cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index d77c97de..4a15d57b 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -34,6 +34,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI_PROCFS_POWER #include @@ -97,6 +98,18 @@ enum { */ ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, + /* On Lenovo Thinkpad models from 2010 and 2011, the power unit + switches between mWh and mAh depending on whether the system + is running on battery or not. When mAh is the unit, most + reported values are incorrect and need to be adjusted by + 10000/design_voltage. Verified on x201, t410, t410s, and x220. + Pre-2010 and 2012 models appear to always report in mWh and + are thus unaffected (tested with t42, t61, t500, x200, x300, + and x230). Also, in mid-2012 Lenovo issued a BIOS update for + the 2011 models that fixes the issue (tested on x220 with a + post-1.29 BIOS), but as of Nov. 2012, no such update is + available for the 2010 models. */ + ACPI_BATTERY_QUIRK_THINKPAD_MAH, }; struct acpi_battery { @@ -429,6 +442,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery) kfree(buffer.pointer); if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) battery->full_charge_capacity = battery->design_capacity; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->design_capacity = battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + /* Curiously, design_capacity_low, unlike the rest of them, + is correct. */ + /* capacity_granularity_* equal 1 on the systems tested, so + it's impossible to tell if they would need an adjustment + or not if their values were higher. */ + } return result; } @@ -469,6 +497,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery) && battery->capacity_now >= 0 && battery->capacity_now <= 100) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } return result; } @@ -580,6 +613,24 @@ static void acpi_battery_quirks(struct acpi_battery *battery) } } +static void find_battery(const struct dmi_header *dm, void *private) +{ + struct acpi_battery *battery = (struct acpi_battery *)private; + /* Note: the hardcoded offsets below have been extracted from + the source code of dmidecode. */ + if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) { + const u8 *dmi_data = (const u8 *)(dm + 1); + int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6)); + if (dm->length >= 18) + dmi_capacity *= dmi_data[17]; + if (battery->design_capacity * battery->design_voltage / 1000 + != dmi_capacity && + battery->design_capacity * 10 == dmi_capacity) + set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags); + } +} + /* * According to the ACPI spec, some kinds of primary batteries can * report percentage battery remaining capacity directly to OS. @@ -605,6 +656,32 @@ static void acpi_battery_quirks2(struct acpi_battery *battery) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; } + + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags)) + return ; + + if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { + const char *s; + s = dmi_get_system_info(DMI_PRODUCT_VERSION); + if (s && !strnicmp(s, "ThinkPad", 8)) { + dmi_walk(find_battery, battery); + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags) && + battery->design_voltage) { + battery->design_capacity = + battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = + battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } + } + } } static int acpi_battery_update(struct acpi_battery *battery) diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 623a3357..76f0b943 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -390,6 +390,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } +static int video_ignore_initial_backlight(const struct dmi_system_id *d) +{ + use_bios_initial_backlight = 0; + return 0; +} + static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -434,6 +440,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Folio 13-2000", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), + }, + }, {} }; diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index ac2d6cb3..770e9595 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -31,6 +31,9 @@ MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); MODULE_AUTHOR("Andreas Herrmann "); MODULE_LICENSE("GPL"); +/* Family 16h Northbridge's function 4 PCI ID */ +#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 + /* D18F3 */ #define REG_NORTHBRIDGE_CAP 0xe8 @@ -256,6 +259,7 @@ static void __devexit fam15h_power_remove(struct pci_dev *pdev) static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, {} }; MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 5f44b551..3f84f84f 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -94,6 +94,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) return -ENODEV; } + if (WARN_ON_ONCE(acpi_dev != dev->data)) + dev->data = acpi_dev; + ret = pnpacpi_build_resource_template(dev, &buffer); if (ret) return ret; diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index d5f923bc..e1abb45c 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -3190,12 +3190,12 @@ static void ixj_write_cid(IXJ *j) ixj_fsk_alloc(j); - strcpy(sdmf1, j->cid_send.month); - strcat(sdmf1, j->cid_send.day); - strcat(sdmf1, j->cid_send.hour); - strcat(sdmf1, j->cid_send.min); - strcpy(sdmf2, j->cid_send.number); - strcpy(sdmf3, j->cid_send.name); + strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); + strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); + strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); len1 = strlen(sdmf1); len2 = strlen(sdmf2); @@ -3340,12 +3340,12 @@ static void ixj_write_cidcw(IXJ *j) ixj_pre_cid(j); } j->flags.cidcw_ack = 0; - strcpy(sdmf1, j->cid_send.month); - strcat(sdmf1, j->cid_send.day); - strcat(sdmf1, j->cid_send.hour); - strcat(sdmf1, j->cid_send.min); - strcpy(sdmf2, j->cid_send.number); - strcpy(sdmf3, j->cid_send.name); + strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); + strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); + strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); len1 = strlen(sdmf1); len2 = strlen(sdmf2); diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index dd24fc11..e66eb299 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -1130,6 +1130,25 @@ dl_done_list (struct ohci_hcd *ohci) while (td) { struct td *td_next = td->next_dl_td; + struct ed *ed = td->ed; + + /* + * Some OHCI controllers (NVIDIA for sure, maybe others) + * occasionally forget to add TDs to the done queue. Since + * TDs for a given endpoint are always processed in order, + * if we find a TD on the donelist then all of its + * predecessors must be finished as well. + */ + for (;;) { + struct td *td2; + + td2 = list_first_entry(&ed->td_list, struct td, + td_list); + if (td2 == td) + break; + takeback_td(ohci, td2); + } + takeback_td(ohci, td); td = td_next; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 73fea4b8..7998b6fc 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -28,6 +28,7 @@ /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_ASROCK_P67 0x7023 @@ -109,8 +110,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd) /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) { - if (pdev->revision == 0x0) { + (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && + pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint\n"); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 35e6b5f6..381d00d3 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 00f1bf53..c3770e5c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -195,6 +195,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, @@ -1797,7 +1798,7 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) dbg("%s", __func__); if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || - (udev->product && !strcmp(udev->product, "BeagleBone/XDS100"))) + (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) return ftdi_jtag_probe(serial); return 0; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 7b5eb742..aedf65fc 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -752,6 +752,12 @@ #define TTI_VID 0x103E /* Vendor Id */ #define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */ +/* + * Newport Cooperation (www.newport.com) + */ +#define NEWPORT_VID 0x104D +#define NEWPORT_AGILIS_PID 0x3000 + /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ #define INTERBIOMETRICS_VID 0x1209 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a5f875d0..872807ba 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -80,6 +80,7 @@ static void option_instat_callback(struct urb *urb); #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 +#define HUAWEI_PRODUCT_E173 0x140C #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_K4605 0x14C6 @@ -552,6 +553,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), @@ -883,6 +886,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, @@ -903,20 +910,34 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, @@ -1096,6 +1117,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 97987255..dc272600 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig @@ -199,7 +199,7 @@ config USB_STORAGE_ENE_UB6250 config USB_UAS tristate "USB Attached SCSI" - depends on USB && SCSI + depends on USB && SCSI && BROKEN help The USB Attached SCSI protocol is supported by some USB storage devices. It permits higher performance by supporting diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 3e8f2f70..f85c5ab2 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -137,16 +137,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) __mpol_put(pol); } -extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol); -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol) -{ - if (!frompol) - return frompol; - return __mpol_cond_copy(tompol, frompol); -} - extern struct mempolicy *__mpol_dup(struct mempolicy *pol); static inline struct mempolicy *mpol_dup(struct mempolicy *pol) { @@ -270,12 +260,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) { } -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, - struct mempolicy *from) -{ - return from; -} - static inline void mpol_get(struct mempolicy *pol) { } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9f8e2e11..f88ea18d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2058,7 +2058,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) { iter->pos = 0; iter->func_pos = 0; - iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); + iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); } static void *t_start(struct seq_file *m, loff_t *pos) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ca0f9eae..58c93e7c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1167,8 +1167,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { unsigned int lcpu; - BUG_ON(timer_pending(timer)); - BUG_ON(!list_empty(&work->entry)); + WARN_ON_ONCE(timer_pending(timer)); + WARN_ON_ONCE(!list_empty(&work->entry)); timer_stats_timer_set_start_info(&dwork->timer); diff --git a/mm/dmapool.c b/mm/dmapool.c index 03bf3bb4..f8e675ee 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -49,7 +49,6 @@ struct dma_pool { /* the pool */ size_t allocation; size_t boundary; char name[32]; - wait_queue_head_t waitq; struct list_head pools; }; @@ -61,8 +60,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ unsigned int offset; }; -#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) - static DEFINE_MUTEX(pools_lock); static ssize_t @@ -171,7 +168,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, retval->size = size; retval->boundary = boundary; retval->allocation = allocation; - init_waitqueue_head(&retval->waitq); if (dev) { int ret; @@ -226,7 +222,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) memset(page->vaddr, POOL_POISON_FREED, pool->allocation); #endif pool_initialise_page(pool, page); - list_add(&page->page_list, &pool->page_list); page->in_use = 0; page->offset = 0; } else { @@ -314,30 +309,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, might_sleep_if(mem_flags & __GFP_WAIT); spin_lock_irqsave(&pool->lock, flags); - restart: list_for_each_entry(page, &pool->page_list, page_list) { if (page->offset < pool->allocation) goto ready; } - page = pool_alloc_page(pool, GFP_ATOMIC); - if (!page) { - if (mem_flags & __GFP_WAIT) { - DECLARE_WAITQUEUE(wait, current); - __set_current_state(TASK_UNINTERRUPTIBLE); - __add_wait_queue(&pool->waitq, &wait); - spin_unlock_irqrestore(&pool->lock, flags); + /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ + spin_unlock_irqrestore(&pool->lock, flags); - schedule_timeout(POOL_TIMEOUT_JIFFIES); + page = pool_alloc_page(pool, mem_flags); + if (!page) + return NULL; - spin_lock_irqsave(&pool->lock, flags); - __remove_wait_queue(&pool->waitq, &wait); - goto restart; - } - retval = NULL; - goto done; - } + spin_lock_irqsave(&pool->lock, flags); + list_add(&page->page_list, &pool->page_list); ready: page->in_use++; offset = page->offset; @@ -347,7 +333,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, #ifdef DMAPOOL_DEBUG memset(retval, POOL_POISON_ALLOCATED, pool->size); #endif - done: spin_unlock_irqrestore(&pool->lock, flags); return retval; } @@ -434,8 +419,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) page->in_use--; *(int *)vaddr = page->offset; page->offset = offset; - if (waitqueue_active(&pool->waitq)) - wake_up_locked(&pool->waitq); /* * Resist a temptation to do * if (!is_page_busy(page)) pool_free_page(pool, page); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5dce7d46..04282baf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1973,28 +1973,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) return new; } -/* - * If *frompol needs [has] an extra ref, copy *frompol to *tompol , - * eliminate the * MPOL_F_* flags that require conditional ref and - * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly - * after return. Use the returned value. - * - * Allows use of a mempolicy for, e.g., multiple allocations with a single - * policy lookup, even if the policy needs/has extra ref on lookup. - * shmem_readahead needs this. - */ -struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol) -{ - if (!mpol_needs_cond_ref(frompol)) - return frompol; - - *tompol = *frompol; - tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ - __mpol_put(frompol); - return tompol; -} - /* Slow path of a mempolicy comparison */ int __mpol_equal(struct mempolicy *a, struct mempolicy *b) { diff --git a/mm/shmem.c b/mm/shmem.c index e89e9e0c..1208f765 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1168,19 +1168,20 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { - struct mempolicy mpol, *spol; struct vm_area_struct pvma; struct page *page; - spol = mpol_cond_copy(&mpol, - mpol_shared_policy_lookup(&info->policy, idx)); - /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; pvma.vm_pgoff = idx; pvma.vm_ops = NULL; - pvma.vm_policy = spol; + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + page = swapin_readahead(entry, gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + return page; } @@ -1188,6 +1189,7 @@ static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { struct vm_area_struct pvma; + struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; @@ -1195,10 +1197,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, pvma.vm_ops = NULL; pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); - /* - * alloc_page_vma() will drop the shared policy reference - */ - return alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma(gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + + return page; } #else /* !CONFIG_NUMA */ #ifdef CONFIG_TMPFS From b6da56c011b0afd8e937ec080d26eb0b19dac54a Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 08:05:00 -0500 Subject: [PATCH 052/117] Linux 3.0.58 --- Documentation/networking/ip-sysctl.txt | 5 ++ Makefile | 2 +- arch/arm/kernel/swp_emulate.c | 2 + arch/arm/mm/mmu.c | 2 +- arch/cris/include/asm/io.h | 39 +++++++++-- arch/sparc/include/asm/hugetlb.h | 10 ++- arch/x86/kernel/cpu/amd.c | 14 ++++ drivers/acpi/scan.c | 2 +- drivers/atm/solos-pci.c | 5 +- drivers/bluetooth/ath3k.c | 1 + drivers/bluetooth/btusb.c | 1 + drivers/input/joystick/walkera0701.c | 7 +- drivers/input/serio/i8042-x86ia64io.h | 9 +++ drivers/net/bonding/bond_main.c | 7 ++ drivers/net/bonding/bond_sysfs.c | 2 + drivers/net/can/dev.c | 3 +- drivers/net/irda/sir_dev.c | 2 +- drivers/net/ne.c | 1 + drivers/net/usb/ipheth.c | 5 ++ drivers/net/wireless/p54/p54usb.c | 4 ++ drivers/pci/quirks.c | 7 +- drivers/pnp/pnpacpi/core.c | 2 +- drivers/rtc/rtc-vt8500.c | 11 +-- drivers/usb/gadget/f_phonet.c | 8 ++- drivers/usb/host/ehci-pci.c | 3 +- drivers/usb/host/pci-quirks.c | 4 +- drivers/virtio/virtio_ring.c | 7 ++ fs/binfmt_misc.c | 5 +- fs/binfmt_script.c | 4 +- fs/exec.c | 15 +++++ fs/nfs/client.c | 3 +- fs/nfs/dir.c | 11 +-- fs/nfs/super.c | 2 +- fs/nfsd/nfs4xdr.c | 11 ++- include/asm-generic/tlb.h | 9 +++ include/linux/binfmts.h | 1 + include/linux/compiler-gcc.h | 5 ++ include/linux/page-flags.h | 8 ++- include/linux/pci_ids.h | 1 + include/linux/snmp.h | 3 +- include/net/tcp.h | 1 + kernel/cgroup.c | 2 - kernel/irq/manage.c | 23 ++++++- mm/memory.c | 5 ++ mm/mempolicy.c | 64 +++++++----------- net/bluetooth/hci_core.c | 2 + net/ipv4/proc.c | 3 +- net/ipv4/sysctl_net_ipv4.c | 7 ++ net/ipv4/tcp_input.c | 93 +++++++++++++++++++------- net/sched/sch_htb.c | 2 +- net/sctp/chunk.c | 20 ++++-- net/sctp/socket.c | 4 +- sound/usb/midi.c | 91 ++++++++++++++++--------- 53 files changed, 406 insertions(+), 154 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 7d4ecaa5..890fce9b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -534,6 +534,11 @@ tcp_thin_dupack - BOOLEAN Documentation/networking/tcp-thin.txt Default: 0 +tcp_challenge_ack_limit - INTEGER + Limits number of Challenge ACK sent per second, as recommended + in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks) + Default: 100 + UDP variables: udp_mem - vector of 3 INTEGERs: min, pressure, max diff --git a/Makefile b/Makefile index 737b9c7d..4f1ae9ca 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 57 +SUBLEVEL = 58 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 40ee7e50..0951a324 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -108,10 +108,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { siginfo_t info; + down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, addr) == NULL) info.si_code = SEGV_MAPERR; else info.si_code = SEGV_ACCERR; + up_read(¤t->mm->mmap_sem); info.si_signo = SIGSEGV; info.si_errno = 0; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 0f79b76c..7fcfb483 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -496,7 +496,7 @@ static void __init build_mem_type_table(void) } for (i = 0; i < 16; i++) { - unsigned long v = pgprot_val(protection_map[i]); + pteval_t v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); } diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h index 32567bc2..ac12ae2b 100644 --- a/arch/cris/include/asm/io.h +++ b/arch/cris/include/asm/io.h @@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr) #define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0) #define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0) #define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0) -#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1) -#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1) -#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1) -#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count) -#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count) -#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count) +static inline void outb(unsigned char data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 1, 1); +} +static inline void outw(unsigned short data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 2, 1); +} +static inline void outl(unsigned int data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 4, 1); +} +static inline void outsb(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 1, count); +} +static inline void outsw(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 2, count); +} +static inline void outsl(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 4, count); +} /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 17706106..f368cef0 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -58,14 +58,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep_set_wrprotect(mm, addr, ptep); + pte_t old_pte = *ptep; + set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + int changed = !pte_same(*ptep, pte); + if (changed) { + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + flush_tlb_page(vma, addr); + } + return changed; } static inline pte_t huge_ptep_get(pte_t *ptep) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 8115040e..3f4b6dac 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -554,6 +554,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + checking_wrmsrl(0xc0011021, val); + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 449c5562..8cb97421 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1153,7 +1153,7 @@ static void acpi_device_set_id(struct acpi_device *device) acpi_add_id(device, ACPI_DOCK_HID); else if (!acpi_ibm_smbus_match(device)) acpi_add_id(device, ACPI_SMBUS_IBM_HID); - else if (!acpi_device_hid(device) && + else if (list_empty(&device->pnp.ids) && ACPI_IS_ROOT_DEVICE(device->parent)) { acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index d4525928..adfce9f1 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -967,10 +967,11 @@ static uint32_t fpga_tx(struct solos_card *card) for (port = 0; tx_pending; tx_pending >>= 1, port++) { if (tx_pending & 1) { struct sk_buff *oldskb = card->tx_skb[port]; - if (oldskb) + if (oldskb) { pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr, oldskb->len, PCI_DMA_TODEVICE); - + card->tx_skb[port] = NULL; + } spin_lock(&card->tx_queue_lock); skb = skb_dequeue(&card->tx_queue[port]); if (!skb) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 05f27eaa..462cde73 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -65,6 +65,7 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3304) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x0489, 0xE027) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index fa9803dd..7ed0b6fd 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -123,6 +123,7 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index 4dfa1eed..f8f892b0 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev) struct walkera_dev *w = input_get_drvdata(dev); parport_disable_irq(w->parport); + hrtimer_cancel(&w->timer); } static int walkera0701_connect(struct walkera_dev *w, int parport) @@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) if (parport_claim(w->pardevice)) goto init_err1; + hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + w->timer.function = timer_handler; + w->input_dev = input_allocate_device(); if (!w->input_dev) goto init_err2; @@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) if (err) goto init_err3; - hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - w->timer.function = timer_handler; return 0; init_err3: @@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) static void walkera0701_disconnect(struct walkera_dev *w) { - hrtimer_cancel(&w->timer); input_unregister_device(w->input_dev); parport_release(w->pardevice); parport_unregister_device(w->pardevice); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index e01fd4cc..20153fe8 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -900,6 +900,7 @@ static int __init i8042_platform_init(void) int retval; #ifdef CONFIG_X86 + u8 a20_on = 0xdf; /* Just return if pre-detection shows no i8042 controller exist */ if (!x86_platform.i8042_detect()) return -ENODEV; @@ -939,6 +940,14 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; + + /* + * A20 was already enabled during early kernel init. But some buggy + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to + * resume from S3. So we do it here and hope that nothing breaks. + */ + i8042_command(&a20_on, 0x10d1); + i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */ #endif /* CONFIG_X86 */ return retval; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 504e201f..6f8b2688 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1439,6 +1439,8 @@ static void bond_compute_features(struct bonding *bond) struct net_device *bond_dev = bond->dev; u32 vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; + unsigned int gso_max_size = GSO_MAX_SIZE; + u16 gso_max_segs = GSO_MAX_SEGS; int i; read_lock(&bond->lock); @@ -1452,11 +1454,16 @@ static void bond_compute_features(struct bonding *bond) if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; + + gso_max_size = min(gso_max_size, slave->dev->gso_max_size); + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; + bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(bond_dev, gso_max_size); read_unlock(&bond->lock); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 06246109..8a967358 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1524,6 +1524,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, goto out; } + read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (!bond_is_active_slave(slave)) { if (new_value) @@ -1532,6 +1533,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, slave->inactive = 1; } } + read_unlock(&bond->lock); out: return ret; } diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index d0f8c7e6..cc3ea0d5 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -557,8 +557,7 @@ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - if (del_timer_sync(&priv->restart_timer)) - dev_put(dev); + del_timer_sync(&priv->restart_timer); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index efe05bb3..ddec154f 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -221,7 +221,7 @@ static void sirdev_config_fsm(struct work_struct *work) break; case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { + if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 1063093b..e8ee2bc3 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -814,6 +814,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; } + SET_NETDEV_DEV(dev, &pdev->dev); err = do_ne_probe(dev); if (err) { free_netdev(dev); diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index db9b2126..ab43674a 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -62,6 +62,7 @@ #define USB_PRODUCT_IPAD 0x129a #define USB_PRODUCT_IPHONE_4_VZW 0x129c #define USB_PRODUCT_IPHONE_4S 0x12a0 +#define USB_PRODUCT_IPHONE_5 0x12a8 #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 @@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = { USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index f44f2f3a..f1fa7636 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -46,6 +46,7 @@ static struct usb_device_id p54u_table[] = { {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ @@ -81,6 +82,8 @@ static struct usb_device_id p54u_table[] = { {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ + {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ @@ -100,6 +103,7 @@ static struct usb_device_id p54u_table[] = { {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ + /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a6b07dda..a9b12497 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2746,7 +2746,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) if (PCI_FUNC(dev->devfn)) return; /* - * RICOH 0xe823 SD/MMC card reader fails to recognize + * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize * certain types of SD/MMC cards. Lowering the SD base * clock frequency from 200Mhz to 50Mhz fixes this issue. * @@ -2757,7 +2757,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) * 0xf9 - Key register for 0x150 * 0xfc - key register for 0xe1 */ - if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { + if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 || + dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { pci_write_config_byte(dev, 0xf9, 0xfc); pci_write_config_byte(dev, 0x150, 0x10); pci_write_config_byte(dev, 0xf9, 0x00); @@ -2784,6 +2785,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 3f84f84f..8ac530a4 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -57,7 +57,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev) if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ return 0 #define TEST_ALPHA(c) \ - if (!('@' <= (c) || (c) <= 'Z')) \ + if (!('A' <= (c) && (c) <= 'Z')) \ return 0 static int __init ispnpidacpi(const char *id) { diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index efd6066b..8a16f2c7 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c @@ -69,7 +69,7 @@ | ALARM_SEC_BIT) #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */ -#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */ +#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */ @@ -116,7 +116,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S); tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S); tm->tm_mday = bcd2bin(date & DATE_DAY_MASK); - tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S); + tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1; tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S) + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100); tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S; @@ -135,8 +135,9 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) } writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) - | (bin2bcd(tm->tm_mon) << DATE_MONTH_S) - | (bin2bcd(tm->tm_mday)), + | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) + | (bin2bcd(tm->tm_mday)) + | ((tm->tm_year >= 200) << DATE_CENTURY_S), vt8500_rtc->regbase + VT8500_RTC_DS); writel((bin2bcd(tm->tm_wday) << TIME_DOW_S) | (bin2bcd(tm->tm_hour) << TIME_HOUR_S) @@ -246,7 +247,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev) } /* Enable RTC and set it to 24-hour mode */ - writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H, + writel(VT8500_RTC_CR_ENABLE, vt8500_rtc->regbase + VT8500_RTC_CR); vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 5e149509..459dbdea 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c @@ -541,7 +541,7 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL); if (!req) - goto err; + goto err_req; req->complete = pn_rx_complete; fp->out_reqv[i] = req; @@ -550,14 +550,18 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) /* Outgoing USB requests */ fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL); if (!fp->in_req) - goto err; + goto err_req; INFO(cdev, "USB CDC Phonet function\n"); INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name, fp->out_ep->name, fp->in_ep->name); return 0; +err_req: + for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++) + usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); err: + if (fp->out_ep) fp->out_ep->driver_data = NULL; if (fp->in_ep) diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index f7683148..175c574e 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -359,7 +359,8 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev) pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == 0x1E26 || pdev->device == 0x8C2D || - pdev->device == 0x8C26); + pdev->device == 0x8C26 || + pdev->device == 0x9C26); } static void ehci_enable_xhci_companion(void) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 3f623fb5..0f097d36 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -721,6 +721,7 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, } #define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31 +#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31 bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev) { @@ -734,7 +735,8 @@ bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev) { return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI; + (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI); } bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 68b91368..92bd773d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -120,6 +120,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq, unsigned head; int i; + /* + * We require lowmem mappings for the descriptors because + * otherwise virt_to_phys will give us bogus addresses in the + * virtqueue. + */ + gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); + desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 1befe2ec..54639527 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -176,7 +176,10 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) goto _error; bprm->argc ++; - bprm->interp = iname; /* for binfmt_script */ + /* Update interp in case binfmt_script needs it. */ + retval = bprm_change_interp(iname, bprm); + if (retval < 0) + goto _error; interp_file = open_exec (iname); retval = PTR_ERR (interp_file); diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index 396a9884..e39c18af 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -82,7 +82,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs) retval = copy_strings_kernel(1, &i_name, bprm); if (retval) return retval; bprm->argc++; - bprm->interp = interp; + retval = bprm_change_interp(interp, bprm); + if (retval < 0) + return retval; /* * OK, now restart the process with the interpreter's dentry. diff --git a/fs/exec.c b/fs/exec.c index 044c13ff..08f3e4e4 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1192,9 +1192,24 @@ void free_bprm(struct linux_binprm *bprm) mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } + /* If a binfmt changed the interp, free it. */ + if (bprm->interp != bprm->filename) + kfree(bprm->interp); kfree(bprm); } +int bprm_change_interp(char *interp, struct linux_binprm *bprm) +{ + /* If a binfmt changed the interp, free it first. */ + if (bprm->interp != bprm->filename) + kfree(bprm->interp); + bprm->interp = kstrdup(interp, GFP_KERNEL); + if (!bprm->interp) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL(bprm_change_interp); + /* * install the new credentials for this executable */ diff --git a/fs/nfs/client.c b/fs/nfs/client.c index b3dc2b88..0cb731f2 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -673,8 +673,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp, */ static void nfs_destroy_server(struct nfs_server *server) { - if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) || - !(server->flags & NFS_MOUNT_LOCAL_FCNTL)) + if (server->nlm_host) nlmclnt_done(server->nlm_host); } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 9515bc57..4033264e 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1216,11 +1216,14 @@ static int nfs_dentry_delete(const struct dentry *dentry) } +/* Ensure that we revalidate inode->i_nlink */ static void nfs_drop_nlink(struct inode *inode) { spin_lock(&inode->i_lock); - if (inode->i_nlink > 0) - drop_nlink(inode); + /* drop the inode if we're reasonably sure this is the last link */ + if (inode->i_nlink == 1) + clear_nlink(inode); + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; spin_unlock(&inode->i_lock); } @@ -1235,8 +1238,8 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { - drop_nlink(inode); nfs_complete_unlink(dentry, inode); + nfs_drop_nlink(inode); } iput(inode); } @@ -1788,10 +1791,8 @@ static int nfs_safe_remove(struct dentry *dentry) if (inode != NULL) { nfs_inode_return_delegation(inode); error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); - /* The VFS may want to delete this inode */ if (error == 0) nfs_drop_nlink(inode); - nfs_mark_for_revalidate(inode); } else error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); if (error == -ENOENT) diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 23f02232..a5b24197 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1052,7 +1052,7 @@ static int nfs_get_option_str(substring_t args[], char **option) { kfree(*option); *option = match_strdup(args); - return !option; + return !*option; } static int nfs_get_option_ul(substring_t args[], unsigned long *option) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index f91d5899..ecdd18ab 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2682,11 +2682,16 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, len = maxcount; v = 0; while (len > 0) { - pn = resp->rqstp->rq_resused++; + pn = resp->rqstp->rq_resused; + if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */ + maxcount -= len; + break; + } resp->rqstp->rq_vec[v].iov_base = page_address(resp->rqstp->rq_respages[pn]); resp->rqstp->rq_vec[v].iov_len = len < PAGE_SIZE ? len : PAGE_SIZE; + resp->rqstp->rq_resused++; v++; len -= PAGE_SIZE; } @@ -2734,6 +2739,8 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd return nfserr; if (resp->xbuf->page_len) return nfserr_resource; + if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) + return nfserr_resource; page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]); @@ -2783,6 +2790,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 return nfserr; if (resp->xbuf->page_len) return nfserr_resource; + if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) + return nfserr_resource; RESERVE_SPACE(8); /* verifier */ savep = p; diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index e58fa777..34e02747 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -78,6 +78,14 @@ struct mmu_gather_batch { #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ @@ -94,6 +102,7 @@ struct mmu_gather { struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; }; #define HAVE_GENERIC_MMU_GATHER diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8845613f..384e37f6 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -126,6 +126,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, unsigned long stack_top, int executable_stack); extern int bprm_mm_init(struct linux_binprm *bprm); +extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); extern int copy_strings_kernel(int argc, const char *const *argv, struct linux_binprm *bprm); extern int prepare_bprm_creds(struct linux_binprm *bprm); diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 59e4028e..3fd17c24 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -50,6 +50,11 @@ # define inline inline __attribute__((always_inline)) # define __inline__ __inline__ __attribute__((always_inline)) # define __inline __inline __attribute__((always_inline)) +#else +/* A lot of inline functions can cause havoc with function tracing */ +# define inline inline notrace +# define __inline__ __inline__ notrace +# define __inline __inline notrace #endif #define __deprecated __attribute__((deprecated)) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6081493d..cfd77026 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -365,7 +365,7 @@ static inline void ClearPageCompound(struct page *page) * pages on the LRU and/or pagecache. */ TESTPAGEFLAG(Compound, compound) -__PAGEFLAG(Head, compound) +__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) /* * PG_reclaim is used in combination with PG_compound to mark the @@ -377,8 +377,14 @@ __PAGEFLAG(Head, compound) * PG_compound & PG_reclaim => Tail page * PG_compound & ~PG_reclaim => Head page */ +#define PG_head_mask ((1L << PG_compound)) #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) +static inline int PageHead(struct page *page) +{ + return ((page->flags & PG_head_tail_mask) == PG_head_mask); +} + static inline int PageTail(struct page *page) { return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 864daf31..f269c03c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1537,6 +1537,7 @@ #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 +#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822 #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 12b2b18e..10360c71 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -209,7 +209,6 @@ enum LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */ LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */ LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */ - LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */ LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */ LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */ LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */ @@ -231,6 +230,8 @@ enum LINUX_MIB_TCPDEFERACCEPTDROP, LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ + LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */ + LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */ __LINUX_MIB_MAX }; diff --git a/include/net/tcp.h b/include/net/tcp.h index 7377393b..3180f6c6 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -246,6 +246,7 @@ extern int sysctl_tcp_max_ssthresh; extern int sysctl_tcp_cookie_size; extern int sysctl_tcp_thin_linear_timeouts; extern int sysctl_tcp_thin_dupack; +extern int sysctl_tcp_challenge_ack_limit; extern atomic_long_t tcp_memory_allocated; extern struct percpu_counter tcp_sockets_allocated; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 9418559e..7fdf1917 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2665,9 +2665,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, dentry->d_fsdata = cgrp; inc_nlink(parent->d_inode); rcu_assign_pointer(cgrp->dentry, dentry); - dget(dentry); } - dput(dentry); return error; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6b1f12f7..d16cf1ce 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -724,6 +724,7 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { cpumask_var_t mask; + bool valid = true; if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) return; @@ -738,10 +739,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) } raw_spin_lock_irq(&desc->lock); - cpumask_copy(mask, desc->irq_data.affinity); + /* + * This code is triggered unconditionally. Check the affinity + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. + */ + if (desc->irq_data.affinity) + cpumask_copy(mask, desc->irq_data.affinity); + else + valid = false; raw_spin_unlock_irq(&desc->lock); - set_cpus_allowed_ptr(current, mask); + if (valid) + set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); } #else @@ -957,6 +966,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ get_task_struct(t); new->thread = t; + /* + * Tell the thread to set its affinity. This is + * important for shared interrupt handlers as we do + * not invoke setup_affinity() for the secondary + * handlers as everything is already set up. Even for + * interrupts marked with IRQF_NO_BALANCE this is + * correct as we want the thread to move to the cpu(s) + * on which the requesting code placed the interrupt. + */ + set_bit(IRQTF_AFFINITY, &new->thread_flags); } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { diff --git a/mm/memory.c b/mm/memory.c index d49b58ab..7292acb9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -205,10 +205,14 @@ static int tlb_next_batch(struct mmu_gather *tlb) return 1; } + if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) + return 0; + batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) return 0; + tlb->batch_count++; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; @@ -235,6 +239,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; + tlb->batch_count = 0; #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 04282baf..0367beb1 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2308,8 +2308,7 @@ void numa_default_policy(void) */ /* - * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag - * Used only for mpol_parse_str() and mpol_to_str() + * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. */ #define MPOL_LOCAL MPOL_MAX static const char * const policy_modes[] = @@ -2324,28 +2323,21 @@ static const char * const policy_modes[] = #ifdef CONFIG_TMPFS /** - * mpol_parse_str - parse string to mempolicy + * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. - * @no_context: flag whether to "contextualize" the mempolicy + * @unused: redundant argument, to be removed later. * * Format of input: * [=][:] * - * if @no_context is true, save the input nodemask in w.user_nodemask in - * the returned mempolicy. This will be used to "clone" the mempolicy in - * a specific context [cpuset] at a later time. Used to parse tmpfs mpol - * mount option. Note that if 'static' or 'relative' mode flags were - * specified, the input nodemask will already have been saved. Saving - * it again is redundant, but safe. - * * On success, returns 0, else 1 */ -int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) +int mpol_parse_str(char *str, struct mempolicy **mpol, int unused) { struct mempolicy *new = NULL; unsigned short mode; - unsigned short uninitialized_var(mode_flags); + unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); @@ -2433,24 +2425,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) if (IS_ERR(new)) goto out; - if (no_context) { - /* save for contextualization */ - new->w.user_nodemask = nodes; - } else { - int ret; - NODEMASK_SCRATCH(scratch); - if (scratch) { - task_lock(current); - ret = mpol_set_nodemask(new, &nodes, scratch); - task_unlock(current); - } else - ret = -ENOMEM; - NODEMASK_SCRATCH_FREE(scratch); - if (ret) { - mpol_put(new); - goto out; - } - } + /* + * Save nodes for mpol_to_str() to show the tmpfs mount options + * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. + */ + if (mode != MPOL_PREFERRED) + new->v.nodes = nodes; + else if (nodelist) + new->v.preferred_node = first_node(nodes); + else + new->flags |= MPOL_F_LOCAL; + + /* + * Save nodes for contextualization: this will be used to "clone" + * the mempolicy in a specific context [cpuset] at a later time. + */ + new->w.user_nodemask = nodes; + err = 0; out: @@ -2470,13 +2461,13 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted - * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask + * @unused: redundant argument, to be removed later. * * Convert a mempolicy into a string. * Returns the number of characters in buffer (if positive) * or an error (negative) */ -int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) +int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused) { char *p = buffer; int l; @@ -2502,7 +2493,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) case MPOL_PREFERRED: nodes_clear(nodes); if (flags & MPOL_F_LOCAL) - mode = MPOL_LOCAL; /* pseudo-policy */ + mode = MPOL_LOCAL; else node_set(pol->v.preferred_node, nodes); break; @@ -2510,10 +2501,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: - if (no_context) - nodes = pol->w.user_nodemask; - else - nodes = pol->v.nodes; + nodes = pol->v.nodes; break; default: diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 882eda14..4e4824a9 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1581,6 +1581,8 @@ int hci_unregister_dev(struct hci_dev *hdev) for (i = 0; i < NUM_REASSEMBLY; i++) kfree_skb(hdev->reassembly[i]); + cancel_work_sync(&hdev->power_on); + if (!test_bit(HCI_INIT, &hdev->flags) && !test_bit(HCI_SETUP, &hdev->flags) && hdev->dev_type == HCI_BREDR) { diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index b14ec7d0..df0f8155 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -232,7 +232,6 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT), SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV), SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV), - SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN), SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA), SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE), SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY), @@ -254,6 +253,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), + SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK), + SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 57d0752e..46b59501 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -566,6 +566,13 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_challenge_ack_limit", + .data = &sysctl_tcp_challenge_ack_limit, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, #ifdef CONFIG_NET_DMA { .procname = "tcp_dma_copybreak", diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f7c16293..39c6019a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -86,6 +86,9 @@ int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); +/* rfc5961 challenge ack rate limiting */ +int sysctl_tcp_challenge_ack_limit = 100; + int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; @@ -3655,6 +3658,24 @@ static int tcp_process_frto(struct sock *sk, int flag) return 0; } +/* RFC 5961 7 [ACK Throttling] */ +static void tcp_send_challenge_ack(struct sock *sk) +{ + /* unprotected vars, we dont care of overwrites */ + static u32 challenge_timestamp; + static unsigned int challenge_count; + u32 now = jiffies / HZ; + + if (now != challenge_timestamp) { + challenge_timestamp = now; + challenge_count = 0; + } + if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); + tcp_send_ack(sk); + } +} + /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) { @@ -3671,8 +3692,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) /* If the ack is older than previous acks * then we can probably ignore it. */ - if (before(ack, prior_snd_una)) + if (before(ack, prior_snd_una)) { + /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ + if (before(ack, prior_snd_una - tp->max_window)) { + tcp_send_challenge_ack(sk); + return -1; + } goto old_ack; + } /* If the ack includes data we haven't sent yet, discard * this segment (RFC793 Section 3.9). @@ -5190,8 +5217,8 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ -static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, - struct tcphdr *th, int syn_inerr) +static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, + struct tcphdr *th, int syn_inerr) { u8 *hash_location; struct tcp_sock *tp = tcp_sk(sk); @@ -5216,38 +5243,48 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, * an acknowledgment should be sent in reply (unless the RST * bit is set, if so drop the segment and return)". */ - if (!th->rst) + if (!th->rst) { + if (th->syn) + goto syn_challenge; tcp_send_dupack(sk, skb); + } goto discard; } /* Step 2: check RST bit */ if (th->rst) { - tcp_reset(sk); + /* RFC 5961 3.2 : + * If sequence number exactly matches RCV.NXT, then + * RESET the connection + * else + * Send a challenge ACK + */ + if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) + tcp_reset(sk); + else + tcp_send_challenge_ack(sk); goto discard; } - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - /* step 3: check security and precedence [ignored] */ - /* step 4: Check for a SYN in window. */ - if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { + /* step 4: Check for a SYN + * RFC 5691 4.2 : Send a challenge ack + */ + if (th->syn) { +syn_challenge: if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); - tcp_reset(sk); - return -1; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); + tcp_send_challenge_ack(sk); + goto discard; } - return 1; + return true; discard: __kfree_skb(skb); - return 0; + return false; } /* @@ -5277,7 +5314,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len) { struct tcp_sock *tp = tcp_sk(sk); - int res; /* * Header prediction. @@ -5457,14 +5493,18 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, * Standard slow path. */ - res = tcp_validate_incoming(sk, skb, th, 1); - if (res <= 0) - return -res; + if (!tcp_validate_incoming(sk, skb, th, 1)) + return 0; step5: if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ @@ -5769,7 +5809,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int queued = 0; - int res; tp->rx_opt.saw_tstamp = 0; @@ -5824,9 +5863,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, return 0; } - res = tcp_validate_incoming(sk, skb, th, 0); - if (res <= 0) - return -res; + if (!tcp_validate_incoming(sk, skb, th, 0)) + return 0; /* step 5: check the ACK field */ if (th->ack) { @@ -5943,6 +5981,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } else goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + /* step 6: check the URG bit */ tcp_urg(sk, skb, th); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 29b942ce..f08b9166 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -876,7 +876,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) q->now = psched_get_time(); start_at = jiffies; - next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; + next_event = q->now + 5LLU * PSCHED_TICKS_PER_SEC; for (level = 0; level < TC_HTB_MAXDEPTH; level++) { /* common case optimization - skip event handler quickly */ diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 6c855645..0018b653 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, msg = sctp_datamsg_new(GFP_KERNEL); if (!msg) - return NULL; + return ERR_PTR(-ENOMEM); /* Note: Calculate this outside of the loop, so that all fragments * have the same expiration. @@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto errout; + } + err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov); if (err < 0) - goto errout; + goto errout_chunk_free; offset += len; @@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto errout; + } err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov); @@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - (__u8 *)chunk->skb->data); if (err < 0) - goto errout; + goto errout_chunk_free; sctp_datamsg_assign(msg, chunk); list_add_tail(&chunk->frag_list, &msg->chunks); @@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, return msg; +errout_chunk_free: + sctp_chunk_free(chunk); + errout: list_for_each_safe(pos, temp, &msg->chunks) { list_del_init(pos); @@ -339,7 +347,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, sctp_chunk_free(chunk); } sctp_datamsg_put(msg); - return NULL; + return ERR_PTR(err); } /* Check whether this message has expired. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b70a3ee6..8ac6d0b1 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1837,8 +1837,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); - if (!datamsg) { - err = -ENOMEM; + if (IS_ERR(datamsg)) { + err = PTR_ERR(datamsg); goto out_free; } diff --git a/sound/usb/midi.c b/sound/usb/midi.c index a0fe5dd3..43762150 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -115,6 +115,7 @@ struct snd_usb_midi { struct list_head list; struct timer_list error_timer; spinlock_t disc_lock; + struct rw_semaphore disc_rwsem; struct mutex mutex; u32 usb_id; int next_midi_device; @@ -124,8 +125,10 @@ struct snd_usb_midi { struct snd_usb_midi_in_endpoint *in; } endpoints[MIDI_MAX_ENDPOINTS]; unsigned long input_triggered; - unsigned int opened; + bool autopm_reference; + unsigned int opened[2]; unsigned char disconnected; + unsigned char input_running; struct snd_kcontrol *roland_load_ctl; }; @@ -147,7 +150,6 @@ struct snd_usb_midi_out_endpoint { struct snd_usb_midi_out_endpoint* ep; struct snd_rawmidi_substream *substream; int active; - bool autopm_reference; uint8_t cable; /* cable number << 4 */ uint8_t state; #define STATE_UNKNOWN 0 @@ -1016,29 +1018,58 @@ static void update_roland_altsetting(struct snd_usb_midi* umidi) snd_usbmidi_input_start(&umidi->list); } -static void substream_open(struct snd_rawmidi_substream *substream, int open) +static int substream_open(struct snd_rawmidi_substream *substream, int dir, + int open) { struct snd_usb_midi* umidi = substream->rmidi->private_data; struct snd_kcontrol *ctl; + int err; + + down_read(&umidi->disc_rwsem); + if (umidi->disconnected) { + up_read(&umidi->disc_rwsem); + return open ? -ENODEV : 0; + } mutex_lock(&umidi->mutex); if (open) { - if (umidi->opened++ == 0 && umidi->roland_load_ctl) { - ctl = umidi->roland_load_ctl; - ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; - snd_ctl_notify(umidi->card, + if (!umidi->opened[0] && !umidi->opened[1]) { + err = usb_autopm_get_interface(umidi->iface); + umidi->autopm_reference = err >= 0; + if (err < 0 && err != -EACCES) { + mutex_unlock(&umidi->mutex); + up_read(&umidi->disc_rwsem); + return -EIO; + } + if (umidi->roland_load_ctl) { + ctl = umidi->roland_load_ctl; + ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; + snd_ctl_notify(umidi->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); - update_roland_altsetting(umidi); + update_roland_altsetting(umidi); + } } + umidi->opened[dir]++; + if (umidi->opened[1]) + snd_usbmidi_input_start(&umidi->list); } else { - if (--umidi->opened == 0 && umidi->roland_load_ctl) { - ctl = umidi->roland_load_ctl; - ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; - snd_ctl_notify(umidi->card, + umidi->opened[dir]--; + if (!umidi->opened[1]) + snd_usbmidi_input_stop(&umidi->list); + if (!umidi->opened[0] && !umidi->opened[1]) { + if (umidi->roland_load_ctl) { + ctl = umidi->roland_load_ctl; + ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; + snd_ctl_notify(umidi->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); + } + if (umidi->autopm_reference) + usb_autopm_put_interface(umidi->iface); } } mutex_unlock(&umidi->mutex); + up_read(&umidi->disc_rwsem); + return 0; } static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) @@ -1046,7 +1077,6 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) struct snd_usb_midi* umidi = substream->rmidi->private_data; struct usbmidi_out_port* port = NULL; int i, j; - int err; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) if (umidi->endpoints[i].out) @@ -1059,25 +1089,15 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) snd_BUG(); return -ENXIO; } - err = usb_autopm_get_interface(umidi->iface); - port->autopm_reference = err >= 0; - if (err < 0 && err != -EACCES) - return -EIO; + substream->runtime->private_data = port; port->state = STATE_UNKNOWN; - substream_open(substream, 1); - return 0; + return substream_open(substream, 0, 1); } static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { - struct snd_usb_midi* umidi = substream->rmidi->private_data; - struct usbmidi_out_port *port = substream->runtime->private_data; - - substream_open(substream, 0); - if (port->autopm_reference) - usb_autopm_put_interface(umidi->iface); - return 0; + return substream_open(substream, 0, 0); } static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream, int up) @@ -1130,14 +1150,12 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream) { - substream_open(substream, 1); - return 0; + return substream_open(substream, 1, 1); } static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream) { - substream_open(substream, 0); - return 0; + return substream_open(substream, 1, 0); } static void snd_usbmidi_input_trigger(struct snd_rawmidi_substream *substream, int up) @@ -1386,9 +1404,12 @@ void snd_usbmidi_disconnect(struct list_head* p) * a timer may submit an URB. To reliably break the cycle * a flag under lock must be used */ + down_write(&umidi->disc_rwsem); spin_lock_irq(&umidi->disc_lock); umidi->disconnected = 1; spin_unlock_irq(&umidi->disc_lock); + up_write(&umidi->disc_rwsem); + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; if (ep->out) @@ -2043,12 +2064,15 @@ void snd_usbmidi_input_stop(struct list_head* p) unsigned int i, j; umidi = list_entry(p, struct snd_usb_midi, list); + if (!umidi->input_running) + return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; if (ep->in) for (j = 0; j < INPUT_URBS; ++j) usb_kill_urb(ep->in->urbs[j]); } + umidi->input_running = 0; } static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep) @@ -2073,8 +2097,11 @@ void snd_usbmidi_input_start(struct list_head* p) int i; umidi = list_entry(p, struct snd_usb_midi, list); + if (umidi->input_running || !umidi->opened[1]) + return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) snd_usbmidi_input_start_ep(umidi->endpoints[i].in); + umidi->input_running = 1; } /* @@ -2100,6 +2127,7 @@ int snd_usbmidi_create(struct snd_card *card, umidi->usb_protocol_ops = &snd_usbmidi_standard_ops; init_timer(&umidi->error_timer); spin_lock_init(&umidi->disc_lock); + init_rwsem(&umidi->disc_rwsem); mutex_init(&umidi->mutex); umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor), le16_to_cpu(umidi->dev->descriptor.idProduct)); @@ -2201,9 +2229,6 @@ int snd_usbmidi_create(struct snd_card *card, } list_add_tail(&umidi->list, midi_list); - - for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) - snd_usbmidi_input_start_ep(umidi->endpoints[i].in); return 0; } From c380d12e171e7061ca63bd2cefb3bb078df5b28f Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 08:16:46 -0500 Subject: [PATCH 053/117] Linux 3.0.59 --- Makefile | 2 +- arch/powerpc/kernel/head_64.S | 2 +- arch/powerpc/kernel/time.c | 5 - arch/powerpc/kvm/44x_emulate.c | 2 + arch/x86/kernel/cpu/amd.c | 14 ++ drivers/acpi/scan.c | 7 +- drivers/ata/libata-core.c | 1 + drivers/ata/libata-eh.c | 1 + drivers/ata/libata-scsi.c | 6 +- drivers/ata/sata_promise.c | 15 +- drivers/block/aoe/aoe.h | 2 +- drivers/block/aoe/aoeblk.c | 5 - drivers/firewire/net.c | 13 +- drivers/gpu/drm/i915/i915_reg.h | 2 + drivers/gpu/drm/i915/intel_display.c | 6 +- drivers/gpu/drm/i915/intel_lvds.c | 8 - .../gpu/drm/radeon/radeon_legacy_encoders.c | 8 + drivers/infiniband/core/netlink.c | 2 +- drivers/infiniband/hw/nes/nes.h | 1 + drivers/infiniband/hw/nes/nes_hw.c | 9 +- drivers/infiniband/hw/nes/nes_verbs.c | 9 +- drivers/md/dm-ioctl.c | 8 + drivers/mfd/mfd-core.c | 15 +- drivers/net/wimax/i2400m/i2400m-usb.h | 3 + drivers/net/wimax/i2400m/usb.c | 6 + .../wireless/ath/ath9k/ar9003_2p2_initvals.h | 172 +++++++++--------- .../net/wireless/ath/ath9k/ar9003_eeprom.h | 6 +- drivers/pci/intel-iommu.c | 11 +- drivers/s390/cio/device_pgid.c | 10 +- drivers/scsi/mvsas/mv_94xx.h | 14 +- drivers/scsi/mvsas/mv_sas.h | 2 +- drivers/scsi/qla2xxx/qla_os.c | 4 +- drivers/staging/comedi/Kconfig | 1 + drivers/staging/comedi/comedi_fops.c | 21 +++ drivers/staging/comedi/comedidev.h | 1 + drivers/staging/comedi/drivers.c | 34 +--- drivers/staging/comedi/drivers/comedi_test.c | 2 +- drivers/staging/comedi/internal.h | 1 + drivers/staging/rtl8712/usb_intf.c | 2 + drivers/staging/speakup/synth.c | 4 +- drivers/staging/vt6656/dpc.c | 4 +- drivers/staging/vt6656/key.c | 53 ++++-- drivers/staging/vt6656/key.h | 8 +- drivers/staging/vt6656/mac.c | 6 +- drivers/staging/vt6656/rf.c | 3 + drivers/staging/vt6656/rxtx.c | 18 +- drivers/staging/vt6656/ttype.h | 16 +- drivers/staging/vt6656/wcmd.c | 20 +- drivers/staging/vt6656/wpa2.h | 4 +- drivers/target/tcm_fc/tfc_sess.c | 2 +- drivers/usb/class/cdc-acm.c | 3 + drivers/usb/core/hub.c | 12 +- drivers/usb/gadget/dummy_hcd.c | 9 +- drivers/usb/host/xhci-mem.c | 2 + drivers/usb/serial/ftdi_sio.c | 2 + drivers/usb/serial/ftdi_sio_ids.h | 6 + drivers/usb/serial/option.c | 18 +- drivers/video/mxsfb.c | 3 +- fs/eventpoll.c | 22 ++- fs/ext4/acl.c | 6 +- fs/ext4/extents.c | 22 ++- fs/ext4/super.c | 4 +- fs/gfs2/lops.c | 18 +- fs/gfs2/trans.c | 8 + fs/jbd2/transaction.c | 3 +- fs/jffs2/nodemgmt.c | 6 +- fs/udf/inode.c | 11 +- include/linux/if_link.h | 1 + include/linux/netlink.h | 6 +- include/linux/rtnetlink.h | 3 + include/net/rtnetlink.h | 7 +- kernel/trace/ring_buffer.c | 2 + mm/huge_memory.c | 3 + mm/memory.c | 18 +- mm/page_alloc.c | 2 +- net/bridge/br_netlink.c | 15 +- net/core/fib_rules.c | 6 +- net/core/neighbour.c | 11 +- net/core/rtnetlink.c | 127 ++++++++++--- net/dcb/dcbnl.c | 4 +- net/decnet/dn_dev.c | 6 +- net/decnet/dn_fib.c | 4 +- net/decnet/dn_route.c | 5 +- net/ipv4/devinet.c | 6 +- net/ipv4/fib_frontend.c | 6 +- net/ipv4/inet_diag.c | 2 +- net/ipv4/ipmr.c | 3 +- net/ipv4/route.c | 2 +- net/ipv6/addrconf.c | 16 +- net/ipv6/addrlabel.c | 9 +- net/ipv6/ip6_fib.c | 3 +- net/ipv6/ip6mr.c | 3 +- net/ipv6/route.c | 6 +- net/mac80211/sta_info.c | 2 +- net/netfilter/ipset/ip_set_core.c | 2 +- net/netfilter/nf_conntrack_netlink.c | 4 +- net/netlink/af_netlink.c | 17 +- net/netlink/genetlink.c | 2 +- net/phonet/pn_netlink.c | 13 +- net/sched/act_api.c | 7 +- net/sched/cls_api.c | 6 +- net/sched/sch_api.c | 12 +- net/sunrpc/sched.c | 27 ++- net/xfrm/xfrm_user.c | 3 +- sound/arm/pxa2xx-ac97-lib.c | 8 +- sound/soc/codecs/wm2000.c | 4 +- 106 files changed, 728 insertions(+), 381 deletions(-) diff --git a/Makefile b/Makefile index 4f1ae9ca..8b6923f3 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 58 +SUBLEVEL = 59 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index ba504099..e8befeff 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -425,7 +425,7 @@ _STATIC(__after_prom_start) tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ #endif -#ifdef CONFIG_CRASH_DUMP +#ifdef CONFIG_RELOCATABLE /* * Check if the kernel has to be running as relocatable kernel based on the * variable __run_at_load, if it is set the kernel is treated as relocatable diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1becd7b9..818d809e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -859,13 +859,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, void update_vsyscall_tz(void) { - /* Make userspace gettimeofday spin until we're done. */ - ++vdso_data->tb_update_count; - smp_mb(); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; - smp_mb(); - ++vdso_data->tb_update_count; } static void __init clocksource_init(void) diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 549bb2c9..ded8a1a0 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c @@ -79,6 +79,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, run->dcr.dcrn = dcrn; run->dcr.data = 0; run->dcr.is_write = 0; + vcpu->arch.dcr_is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); @@ -100,6 +101,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, run->dcr.dcrn = dcrn; run->dcr.data = kvmppc_get_gpr(vcpu, rs); run->dcr.is_write = 1; + vcpu->arch.dcr_is_write = 1; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3f4b6dac..a93741d8 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -568,6 +568,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + checking_wrmsrl(0xc0011021, val); + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 8cb97421..ea1fe0a2 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -789,8 +789,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, static void acpi_bus_set_run_wake_flags(struct acpi_device *device) { struct acpi_device_id button_device_ids[] = { - {"PNP0C0D", 0}, {"PNP0C0C", 0}, + {"PNP0C0D", 0}, {"PNP0C0E", 0}, {"", 0}, }; @@ -802,6 +802,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) { device->wakeup.flags.run_wake = 1; + if (!acpi_match_device_ids(device, &button_device_ids[1])) { + /* Do not use Lid/sleep button for S5 wakeup */ + if (device->wakeup.sleep_state == ACPI_STATE_S5) + device->wakeup.sleep_state = ACPI_STATE_S4; + } device_set_wakeup_capable(&device->dev, true); return; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 600ede0c..5a72e36a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2543,6 +2543,7 @@ int ata_bus_probe(struct ata_port *ap) * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 311c92d1..1cbb0043 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2602,6 +2602,7 @@ int ata_eh_reset(struct ata_link *link, int classify, * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 927f968e..3b42a5d6 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -308,7 +308,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); - if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY)) + if (atadev && ap->ops->sw_activity_show && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) return ap->ops->sw_activity_show(atadev, buf); return -EINVAL; } @@ -323,7 +324,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, enum sw_activity val; int rc; - if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) { + if (atadev && ap->ops->sw_activity_store && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) { val = simple_strtoul(buf, NULL, 0); switch (val) { case OFF: case BLINK_ON: case BLINK_OFF: diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index a004b1e0..ca4646aa 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -147,6 +147,10 @@ struct pdc_port_priv { dma_addr_t pkt_dma; }; +struct pdc_host_priv { + spinlock_t hard_reset_lock; +}; + static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap) void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1; unsigned int ata_no = pdc_ata_port_to_ata_no(ap); + struct pdc_host_priv *hpriv = ap->host->private_data; u8 tmp; - spin_lock(&ap->host->lock); + spin_lock(&hpriv->hard_reset_lock); tmp = readb(pcictl_b1_mmio); tmp &= ~(0x10 << ata_no); @@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap) writeb(tmp, pcictl_b1_mmio); readb(pcictl_b1_mmio); /* flush */ - spin_unlock(&ap->host->lock); + spin_unlock(&hpriv->hard_reset_lock); } static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, @@ -1183,6 +1188,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev, const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; const struct ata_port_info *ppi[PDC_MAX_PORTS]; struct ata_host *host; + struct pdc_host_priv *hpriv; void __iomem *host_mmio; int n_ports, i, rc; int is_sataii_tx4; @@ -1220,6 +1226,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev, dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n"); return -ENOMEM; } + hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + spin_lock_init(&hpriv->hard_reset_lock); + host->private_data = hpriv; host->iomap = pcim_iomap_table(pdev); is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index db195aba..e49ddd0a 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "47" +#define VERSION "47q" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 528f6318..2a0fdaee 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -277,8 +277,6 @@ aoeblk_gdalloc(void *vp) goto err_mempool; blk_queue_make_request(d->blkq, aoeblk_make_request); d->blkq->backing_dev_info.name = "aoe"; - if (bdi_init(&d->blkq->backing_dev_info)) - goto err_blkq; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; @@ -299,9 +297,6 @@ aoeblk_gdalloc(void *vp) aoedisk_add_sysfs(d); return; -err_blkq: - blk_cleanup_queue(d->blkq); - d->blkq = NULL; err_mempool: mempool_destroy(d->bufpool); err_disk: diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index b9762d07..e74750bc 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -863,8 +863,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { buf_ptr += 2; length -= IEEE1394_GASP_HDR_SIZE; - fwnet_incoming_packet(dev, buf_ptr, length, - source_node_id, -1, true); + fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, + context->card->generation, true); } packet.payload_length = dev->rcv_buffer_size; @@ -959,7 +959,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) break; } - skb_pull(skb, ptask->max_payload); + if (ptask->dest_node == IEEE1394_ALL_NODES) { + skb_pull(skb, + ptask->max_payload + IEEE1394_GASP_HDR_SIZE); + } else { + skb_pull(skb, ptask->max_payload); + } if (ptask->outstanding_pkts > 1) { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, dg_size, fg_off, datagram_label); @@ -1062,7 +1067,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) smp_rmb(); node_id = dev->card->node_id; - p = skb_push(ptask->skb, 8); + p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | RFC2734_SW_VERSION, &p[4]); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 557e007a..8fa4f7bc 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2754,6 +2754,8 @@ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) +#define PF_PIPE_SEL_MASK_IVB (3<<29) +#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) #define PF_FILTER_MASK (3<<23) #define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_MED_3x3 (1<<23) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 36d76989..d1dca923 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2696,7 +2696,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) * as some pre-programmed values are broken, * e.g. x201. */ - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); + if (IS_IVYBRIDGE(dev)) + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | + PF_PIPE_SEL_IVB(pipe)); + else + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 645f8aa3..eebf0028 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -751,14 +751,6 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), }, }, - { - .callback = intel_no_lvds_dmi_callback, - .ident = "ZOTAC ZBOXSD-ID12/ID13", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"), - DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), - }, - }, { .callback = intel_no_lvds_dmi_callback, .ident = "Gigabyte GA-D525TUD", diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 81659539..a9068031 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -617,6 +617,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc enum drm_connector_status found = connector_status_disconnected; bool color = true; + /* just don't bother on RN50 those chip are often connected to remoting + * console hw and often we get failure to load detect those. So to make + * everyone happy report the encoder as always connected. + */ + if (ASIC_IS_RN50(rdev)) { + return connector_status_connected; + } + /* save the regs we need */ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 4a5abaf0..9227f4ac 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; return netlink_dump_start(nls, skb, nlh, client->cb_table[op].dump, - NULL); + NULL, 0); } } diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 6fe79876..6e302738 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -511,6 +511,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); int nes_destroy_cqp(struct nes_device *); int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); void nes_recheck_link_status(struct work_struct *work); +void nes_terminate_timeout(unsigned long context); /* nes_nic.c */ struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 96fa9a4c..ba4814a2 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, static void process_critical_error(struct nes_device *nesdev); static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); -static void nes_terminate_timeout(unsigned long context); static void nes_terminate_start_timer(struct nes_qp *nesqp); #ifdef CONFIG_INFINIBAND_NES_DEBUG @@ -3496,7 +3495,7 @@ static void nes_terminate_received(struct nes_device *nesdev, } /* Timeout routine in case terminate fails to complete */ -static void nes_terminate_timeout(unsigned long context) +void nes_terminate_timeout(unsigned long context) { struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context; @@ -3506,11 +3505,7 @@ static void nes_terminate_timeout(unsigned long context) /* Set a timer in case hw cannot complete the terminate sequence */ static void nes_terminate_start_timer(struct nes_qp *nesqp) { - init_timer(&nesqp->terminate_timer); - nesqp->terminate_timer.function = nes_terminate_timeout; - nesqp->terminate_timer.expires = jiffies + HZ; - nesqp->terminate_timer.data = (unsigned long)nesqp; - add_timer(&nesqp->terminate_timer); + mod_timer(&nesqp->terminate_timer, (jiffies + HZ)); } /** diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 95ca93ce..59db49fb 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1414,6 +1414,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, } nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR); + init_timer(&nesqp->terminate_timer); + nesqp->terminate_timer.function = nes_terminate_timeout; + nesqp->terminate_timer.data = (unsigned long)nesqp; /* update the QP table */ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; @@ -1423,7 +1426,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, return &nesqp->ibqp; } - /** * nes_clean_cq */ @@ -2568,6 +2570,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ibmr; case IWNES_MEMREG_TYPE_QP: case IWNES_MEMREG_TYPE_CQ: + if (!region->length) { + nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n"); + ib_umem_release(region); + return ERR_PTR(-EINVAL); + } nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL); if (!nespbl) { nes_debug(NES_DBG_MR, "Unable to allocate PBL\n"); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 4cacdad2..bd3b294e 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1524,6 +1524,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) if (copy_from_user(dmi, user, tmp.data_size)) goto bad; + /* + * Abort if something changed the ioctl data while it was being copied. + */ + if (dmi->data_size != tmp.data_size) { + DMERR("rejecting ioctl: data size modified while processing parameters"); + goto bad; + } + /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, tmp.data_size)) goto bad; diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index acf9dad6..b36aadb6 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -18,6 +18,10 @@ #include #include +static struct device_type mfd_dev_type = { + .name = "mfd_device", +}; + int mfd_cell_enable(struct platform_device *pdev) { const struct mfd_cell *cell = mfd_get_cell(pdev); @@ -87,6 +91,7 @@ static int mfd_add_device(struct device *parent, int id, goto fail_device; pdev->dev.parent = parent; + pdev->dev.type = &mfd_dev_type; if (cell->pdata_size) { ret = platform_device_add_data(pdev, @@ -182,10 +187,16 @@ EXPORT_SYMBOL(mfd_add_devices); static int mfd_remove_devices_fn(struct device *dev, void *c) { - struct platform_device *pdev = to_platform_device(dev); - const struct mfd_cell *cell = mfd_get_cell(pdev); + struct platform_device *pdev; + const struct mfd_cell *cell; atomic_t **usage_count = c; + if (dev->type != &mfd_dev_type) + return 0; + + pdev = to_platform_device(dev); + cell = mfd_get_cell(pdev); + /* find the base address of usage_count pointers (for freeing) */ if (!*usage_count || (cell->usage_count < *usage_count)) *usage_count = cell->usage_count; diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 6650fde9..9f1e947f 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h @@ -152,6 +152,9 @@ enum { /* Device IDs */ USB_DEVICE_ID_I6050 = 0x0186, USB_DEVICE_ID_I6050_2 = 0x0188, + USB_DEVICE_ID_I6150 = 0x07d6, + USB_DEVICE_ID_I6150_2 = 0x07d7, + USB_DEVICE_ID_I6150_3 = 0x07d9, USB_DEVICE_ID_I6250 = 0x0187, }; diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 298f2b0b..0ddc8db2 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -491,6 +491,9 @@ int i2400mu_probe(struct usb_interface *iface, switch (id->idProduct) { case USB_DEVICE_ID_I6050: case USB_DEVICE_ID_I6050_2: + case USB_DEVICE_ID_I6150: + case USB_DEVICE_ID_I6150_2: + case USB_DEVICE_ID_I6150_3: case USB_DEVICE_ID_I6250: i2400mu->i6050 = 1; break; @@ -740,6 +743,9 @@ static struct usb_device_id i2400mu_id_table[] = { { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) }, { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) }, { USB_DEVICE(0x8086, 0x0181) }, { USB_DEVICE(0x8086, 0x1403) }, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 029773c3..c84c493a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -34,98 +34,98 @@ static const u32 ar9300_2p2_radio_postamble[][5] = { static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, - {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, - {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, - {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, - {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, - {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, - {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, - {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400}, - {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402}, - {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404}, - {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603}, - {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02}, - {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04}, - {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20}, - {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20}, - {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22}, - {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24}, - {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640}, - {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, - {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861}, - {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81}, - {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83}, - {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84}, - {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3}, - {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5}, - {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9}, - {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb}, - {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, - {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, - {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, - {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, - {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202}, - {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400}, - {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402}, - {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404}, - {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603}, - {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02}, - {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04}, - {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20}, - {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20}, - {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22}, - {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24}, - {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640}, - {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660}, - {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861}, - {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81}, - {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83}, - {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84}, - {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3}, - {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5}, - {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9}, - {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb}, - {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, + {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000}, - {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501}, - {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501}, - {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03}, - {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04}, - {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04}, - {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, - {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, - {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000}, + {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000}, + {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501}, + {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501}, + {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03}, + {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04}, + {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04}, + {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, - {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, - {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, - {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index ab21a491..7f7bc947 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -69,13 +69,13 @@ #define AR9300_BASE_ADDR 0x3ff #define AR9300_BASE_ADDR_512 0x1ff -#define AR9300_OTP_BASE 0x14000 -#define AR9300_OTP_STATUS 0x15f18 +#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000) +#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 -#define AR9300_OTP_READ_DATA 0x15f1c +#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 0b4dbcdb..3f9a8912 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1793,10 +1793,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, if (!pte) return -ENOMEM; /* It is large page*/ - if (largepage_lvl > 1) + if (largepage_lvl > 1) { pteval |= DMA_PTE_LARGE_PAGE; - else + /* Ensure that old small page tables are removed to make room + for superpage, if they exist. */ + dma_pte_clear_range(domain, iov_pfn, + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + dma_pte_free_pagetable(domain, iov_pfn, + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; + } } /* We don't need lock here, nobody else diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 07a4fd29..daa6b903 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2) * Determine pathgroup state from PGID data. */ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, - int *mismatch, int *reserved, u8 *reset) + int *mismatch, u8 *reserved, u8 *reset) { struct pgid *pgid = &cdev->private->pgid[0]; struct pgid *first = NULL; @@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, if ((cdev->private->pgid_valid_mask & lpm) == 0) continue; if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) - *reserved = 1; + *reserved |= lpm; if (pgid_is_reset(pgid)) { *reset |= lpm; continue; @@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct pgid *pgid; int mismatch = 0; - int reserved = 0; + u8 reserved = 0; u8 reset = 0; u8 donepm; if (rc) goto out; pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); - if (reserved) + if (reserved == cdev->private->pgid_valid_mask) rc = -EUSERS; else if (mismatch) rc = -EOPNOTSUPP; @@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc) } out: CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " - "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid, + "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, cdev->private->pgid_todo_mask, mismatch, reserved, reset); switch (rc) { diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 8835befe..d72aa618 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -193,21 +193,11 @@ struct mvs_prd { #define SPI_ADDR_VLD_94XX (1U << 1) #define SPI_CTRL_SpiStart_94XX (1U << 0) -#define mv_ffc(x) ffz(x) - static inline int mv_ffc64(u64 v) { - int i; - i = mv_ffc((u32)v); - if (i >= 0) - return i; - i = mv_ffc((u32)(v>>32)); - - if (i != 0) - return 32 + i; - - return -1; + u64 x = ~v; + return x ? __ffs64(x) : -1; } #define r_reg_set_enable(i) \ diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 1367d8b9..efc6965c 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -73,7 +73,7 @@ extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ ((type == EDGE_DEV) || (type == FANOUT_DEV)) -#define bit(n) ((u32)1 << n) +#define bit(n) ((u64)1 << n) #define for_each_phy(__lseq_mask, __mc, __lseq) \ for ((__mc) = (__lseq_mask), (__lseq) = 0; \ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index a2a1a831..7e78020b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3406,9 +3406,9 @@ qla2x00_do_dpc(void *data) base_vha->host_no)); } - if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { + if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, + &base_vha->dpc_flags)) { qla2x00_update_fcports(base_vha); - clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); } if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index 20008a43..727b207d 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -424,6 +424,7 @@ config COMEDI_ADQ12B config COMEDI_NI_AT_A2150 tristate "NI AT-A2150 ISA card support" + select COMEDI_FC depends on COMEDI_NI_COMMON depends on VIRT_TO_BUS default N diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index d80b90fa..39be6732 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -138,6 +138,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, if (cmd == COMEDI_DEVCONFIG) { rc = do_devconfig_ioctl(dev, (struct comedi_devconfig __user *)arg); + if (rc == 0) + /* Evade comedi_auto_unconfig(). */ + dev_file_info->hardware_device = NULL; goto done; } @@ -2207,6 +2210,7 @@ int comedi_alloc_board_minor(struct device *hardware_device) kfree(info); return -ENOMEM; } + info->hardware_device = hardware_device; comedi_device_init(info->device); spin_lock_irqsave(&comedi_file_info_table_lock, flags); for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i) { @@ -2296,6 +2300,23 @@ void comedi_free_board_minor(unsigned minor) } } +int comedi_find_board_minor(struct device *hardware_device) +{ + int minor; + struct comedi_device_file_info *info; + + for (minor = 0; minor < COMEDI_NUM_BOARD_MINORS; minor++) { + spin_lock(&comedi_file_info_table_lock); + info = comedi_file_info_table[minor]; + if (info && info->hardware_device == hardware_device) { + spin_unlock(&comedi_file_info_table_lock); + return minor; + } + spin_unlock(&comedi_file_info_table_lock); + } + return -ENODEV; +} + int comedi_alloc_subdevice_minor(struct comedi_device *dev, struct comedi_subdevice *s) { diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index 68aa9176..5f2745e5 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h @@ -237,6 +237,7 @@ struct comedi_device_file_info { struct comedi_device *device; struct comedi_subdevice *read_subdevice; struct comedi_subdevice *write_subdevice; + struct device *hardware_device; }; #ifdef CONFIG_COMEDI_DEBUG diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 6d60e91b..f9b02860 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -819,25 +819,14 @@ static int comedi_auto_config(struct device *hardware_device, int minor; struct comedi_device_file_info *dev_file_info; int retval; - unsigned *private_data = NULL; - if (!comedi_autoconfig) { - dev_set_drvdata(hardware_device, NULL); + if (!comedi_autoconfig) return 0; - } minor = comedi_alloc_board_minor(hardware_device); if (minor < 0) return minor; - private_data = kmalloc(sizeof(unsigned), GFP_KERNEL); - if (private_data == NULL) { - retval = -ENOMEM; - goto cleanup; - } - *private_data = minor; - dev_set_drvdata(hardware_device, private_data); - dev_file_info = comedi_get_device_file_info(minor); memset(&it, 0, sizeof(it)); @@ -850,25 +839,22 @@ static int comedi_auto_config(struct device *hardware_device, retval = comedi_device_attach(dev_file_info->device, &it); mutex_unlock(&dev_file_info->device->mutex); -cleanup: - if (retval < 0) { - kfree(private_data); + if (retval < 0) comedi_free_board_minor(minor); - } return retval; } static void comedi_auto_unconfig(struct device *hardware_device) { - unsigned *minor = (unsigned *)dev_get_drvdata(hardware_device); - if (minor == NULL) - return; - - BUG_ON(*minor >= COMEDI_NUM_BOARD_MINORS); + int minor; - comedi_free_board_minor(*minor); - dev_set_drvdata(hardware_device, NULL); - kfree(minor); + if (hardware_device == NULL) + return; + minor = comedi_find_board_minor(hardware_device); + if (minor < 0) + return; + BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS); + comedi_free_board_minor(minor); } int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name) diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c index a804742b..2567f9ab 100644 --- a/drivers/staging/comedi/drivers/comedi_test.c +++ b/drivers/staging/comedi/drivers/comedi_test.c @@ -461,7 +461,7 @@ static int waveform_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->timer_running = 0; - del_timer(&devpriv->timer); + del_timer_sync(&devpriv->timer); return 0; } diff --git a/drivers/staging/comedi/internal.h b/drivers/staging/comedi/internal.h index 434ce343..4208fb4c 100644 --- a/drivers/staging/comedi/internal.h +++ b/drivers/staging/comedi/internal.h @@ -7,6 +7,7 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); int comedi_alloc_board_minor(struct device *hardware_device); void comedi_free_board_minor(unsigned minor); +int comedi_find_board_minor(struct device *hardware_device); void comedi_reset_async_buf(struct comedi_async *async); int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long new_size); diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index 6d88d1a4..af28a62c 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -62,6 +62,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = { {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */ /* Belkin */ {USB_DEVICE(0x050D, 0x945A)}, + /* ISY IWL - Belkin clone */ + {USB_DEVICE(0x050D, 0x11F1)}, /* Corega */ {USB_DEVICE(0x07AA, 0x0047)}, /* D-Link */ diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index c241074a..78431115 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -342,7 +342,7 @@ int synth_init(char *synth_name) mutex_lock(&spk_mutex); /* First, check if we already have it loaded. */ - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++) if (strcmp(synths[i]->name, synth_name) == 0) synth = synths[i]; @@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth) int i; int status = 0; mutex_lock(&spk_mutex); - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++) /* synth_remove() is responsible for rotating the array down */ if (in_synth == synths[i]) { mutex_unlock(&spk_mutex); diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c index 921dae5b..5918ef70 100644 --- a/drivers/staging/vt6656/dpc.c +++ b/drivers/staging/vt6656/dpc.c @@ -1256,7 +1256,7 @@ static BOOL s_bHandleRxEncryption ( PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); } else { @@ -1367,7 +1367,7 @@ static BOOL s_bHostWepRxEncryption ( PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index 27bb523c..fd93e837 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -223,7 +223,7 @@ BOOL KeybSetKey( PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -235,7 +235,8 @@ BOOL KeybSetKey( PSKeyItem pKey; unsigned int uKeyIdx; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Enter KeybSetKey: %X\n", dwKeyIndex); j = (MAX_KEY_TABLE-1); for (i=0;i<(MAX_KEY_TABLE-1);i++) { @@ -261,7 +262,9 @@ BOOL KeybSetKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4); @@ -302,9 +305,12 @@ BOOL KeybSetKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", + pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", + pKey->wTSC15_0); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", + pKey->dwKeyIndex); return (TRUE); } @@ -326,7 +332,9 @@ BOOL KeybSetKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(N)[%X]: %d\n", + pTable->KeyTable[j].dwGTKeyIndex, j); } pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4); @@ -367,9 +375,11 @@ BOOL KeybSetKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", + pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", + pKey->dwKeyIndex); return (TRUE); } @@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType, DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n", + pTable->KeyTable[i].dwGTKeyIndex); return (TRUE); } @@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -693,7 +704,10 @@ BOOL KeybSetDefaultKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, + MAX_KEY_TABLE-1); } pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed @@ -744,9 +758,11 @@ BOOL KeybSetDefaultKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n", + pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n", + pKey->dwKeyIndex); return (TRUE); } @@ -772,7 +788,7 @@ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -784,7 +800,8 @@ BOOL KeybSetAllGroupKey( PSKeyItem pKey; unsigned int uKeyIdx; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n", + dwKeyIndex); if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key @@ -801,7 +818,9 @@ BOOL KeybSetAllGroupKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h index f749c7a0..bd35d396 100644 --- a/drivers/staging/vt6656/key.h +++ b/drivers/staging/vt6656/key.h @@ -58,7 +58,7 @@ typedef struct tagSKeyItem { BOOL bKeyValid; - unsigned long uKeyLength; + u32 uKeyLength; BYTE abyKey[MAX_KEY_LEN]; QWORD KeyRSC; DWORD dwTSC47_16; @@ -107,7 +107,7 @@ BOOL KeybSetKey( PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c index 26c19d14..0636d824 100644 --- a/drivers/staging/vt6656/mac.c +++ b/drivers/staging/vt6656/mac.c @@ -262,7 +262,8 @@ BYTE pbyData[24]; dwData1 <<= 16; dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\ + " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl); //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); @@ -279,7 +280,8 @@ BYTE pbyData[24]; dwData2 <<= 8; dwData2 |= *(pbyAddr+0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n", + wOffset, dwData2); //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index 3fd0478a..8cf08818 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c @@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr; return TRUE; } + if (uCH == 0) + return -EINVAL; + switch (uRATE) { case RATE_1M: case RATE_2M: diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index fe218689..3beb126e 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -377,7 +377,8 @@ s_vFillTxKey ( *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV // Append IV&ExtIV after Mac Header *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n", + *pdwExtIV); } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { pTransmitKey->wTSC15_0++; @@ -1753,7 +1754,8 @@ s_bPacketToWirelessUsb( MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((PBYTE)&dwMIC_Priority, 4); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n", + dwMICKey0, dwMICKey1); /////////////////////////////////////////////////////////////////// @@ -2635,7 +2637,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) { MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((PBYTE)&dwMIC_Priority, 4); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\ + " %X, %X\n", dwMICKey0, dwMICKey1); uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen; @@ -2655,7 +2658,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n", + *pdwMIC_L, *pdwMIC_R); } @@ -3029,7 +3033,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n"); } else { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n", + pTransmitKey->dwKeyIndex); bNeedEncryption = TRUE; } } @@ -3043,7 +3048,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) if (pDevice->bEnableHostWEP) { if ((uNodeIndex != 0) && (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n", + pTransmitKey->dwKeyIndex); bNeedEncryption = TRUE; } } diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h index 8e9450ef..dfbf7471 100644 --- a/drivers/staging/vt6656/ttype.h +++ b/drivers/staging/vt6656/ttype.h @@ -29,6 +29,8 @@ #ifndef __TTYPE_H__ #define __TTYPE_H__ +#include + /******* Common definitions and typedefs ***********************************/ typedef int BOOL; @@ -42,17 +44,17 @@ typedef int BOOL; /****** Simple typedefs ***************************************************/ -typedef unsigned char BYTE; // 8-bit -typedef unsigned short WORD; // 16-bit -typedef unsigned long DWORD; // 32-bit +typedef u8 BYTE; +typedef u16 WORD; +typedef u32 DWORD; // QWORD is for those situation that we want // an 8-byte-aligned 8 byte long structure // which is NOT really a floating point number. typedef union tagUQuadWord { struct { - DWORD dwLowDword; - DWORD dwHighDword; + u32 dwLowDword; + u32 dwHighDword; } u; double DoNotUseThisField; } UQuadWord; @@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit /****** Common pointer types ***********************************************/ -typedef unsigned long ULONG_PTR; // 32-bit -typedef unsigned long DWORD_PTR; // 32-bit +typedef u32 ULONG_PTR; +typedef u32 DWORD_PTR; // boolean pointer diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c index 78ea121b..31fb96a5 100644 --- a/drivers/staging/vt6656/wcmd.c +++ b/drivers/staging/vt6656/wcmd.c @@ -316,17 +316,19 @@ s_MgrMakeProbeRequest( return pTxPacket; } -void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond) +void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond) { - PSDevice pDevice = (PSDevice)hDeviceContext; + PSDevice pDevice = (PSDevice)hDeviceContext; - init_timer(&pDevice->sTimerCommand); - pDevice->sTimerCommand.data = (unsigned long)pDevice; - pDevice->sTimerCommand.function = (TimerFunction)vRunCommand; - // RUN_AT :1 msec ~= (HZ/1024) - pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10); - add_timer(&pDevice->sTimerCommand); - return; + init_timer(&pDevice->sTimerCommand); + + pDevice->sTimerCommand.data = (unsigned long)pDevice; + pDevice->sTimerCommand.function = (TimerFunction)vRunCommand; + pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000); + + add_timer(&pDevice->sTimerCommand); + + return; } void vRunCommand(void *hDeviceContext) diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h index 46c29590..c359252a 100644 --- a/drivers/staging/vt6656/wpa2.h +++ b/drivers/staging/vt6656/wpa2.h @@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo { } PMKIDInfo, *PPMKIDInfo; typedef struct tagSPMKIDCache { - unsigned long BSSIDInfoCount; - PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE]; + u32 BSSIDInfoCount; + PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE]; } SPMKIDCache, *PSPMKIDCache; diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index a40541c3..087af197 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -468,7 +468,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu) { struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); - transport_deregister_session(sess->se_sess); kfree(sess); } @@ -476,6 +475,7 @@ static void ft_sess_free(struct kref *kref) { struct ft_sess *sess = container_of(kref, struct ft_sess, kref); + transport_deregister_session(sess->se_sess); call_rcu(&sess->rcu, ft_sess_rcu_free); } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index b107339d..84e69ea2 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1509,6 +1509,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */ .driver_info = NO_UNION_NORMAL, }, + { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */ + .driver_info = NO_UNION_NORMAL, + }, { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b3f63879..e0ca6276 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2122,7 +2122,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define HUB_ROOT_RESET_TIME 50 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 #define HUB_LONG_RESET_TIME 200 -#define HUB_RESET_TIMEOUT 500 +#define HUB_RESET_TIMEOUT 800 static int hub_port_wait_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay) @@ -2501,7 +2501,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) static int finish_port_resume(struct usb_device *udev) { int status = 0; - u16 devstatus; + u16 devstatus = 0; /* caller owns the udev device lock */ dev_dbg(&udev->dev, "%s\n", @@ -2546,7 +2546,13 @@ static int finish_port_resume(struct usb_device *udev) if (status) { dev_dbg(&udev->dev, "gone after usb resume? status %d\n", status); - } else if (udev->actconfig) { + /* + * There are a few quirky devices which violate the standard + * by claiming to have remote wakeup enabled after a reset, + * which crash if the feature is cleared, hence check for + * udev->reset_resume + */ + } else if (udev->actconfig && !udev->reset_resume) { le16_to_cpus(&devstatus); if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { status = usb_control_msg(udev, diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index d3dcabc1..90977fc0 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -122,10 +122,7 @@ static const char ep0name [] = "ep0"; static const char *const ep_name [] = { ep0name, /* everyone has ep0 */ - /* act like a net2280: high speed, six configurable endpoints */ - "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f", - - /* or like pxa250: fifteen fixed function endpoints */ + /* act like a pxa250: fifteen fixed function endpoints */ "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int", "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int", "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso", @@ -133,6 +130,10 @@ static const char *const ep_name [] = { /* or like sa1100: two fixed function endpoints */ "ep1out-bulk", "ep2in-bulk", + + /* and now some generic EPs so we have enough in multi config */ + "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in", + "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out", }; #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index a44f2d45..a2b20fe8 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1026,6 +1026,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { + if (ep->desc.bInterval == 0) + return 0; return xhci_microframes_to_exponent(udev, ep, ep->desc.bInterval, 0, 15); } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index c3770e5c..c855a4af 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -879,6 +879,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, + /* Crucible Devices */ + { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index aedf65fc..dd6edf86 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1259,3 +1259,9 @@ * ATI command output: Cinterion MC55i */ #define FTDI_CINTERION_MC55I_PID 0xA951 + +/* + * Product: Comet Caller ID decoder + * Manufacturer: Crucible Technologies + */ +#define FTDI_CT_COMET_PID 0x8e08 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 872807ba..f233bbbf 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -288,6 +288,7 @@ static void option_instat_callback(struct urb *urb); #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S_X200 0x0000 #define ALCATEL_PRODUCT_X220_X500D 0x0017 +#define ALCATEL_PRODUCT_L100V 0x011e #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 @@ -429,9 +430,12 @@ static void option_instat_callback(struct urb *urb); #define MEDIATEK_VENDOR_ID 0x0e8d #define MEDIATEK_PRODUCT_DC_1COM 0x00a0 #define MEDIATEK_PRODUCT_DC_4COM 0x00a5 +#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7 #define MEDIATEK_PRODUCT_DC_5COM 0x00a4 #define MEDIATEK_PRODUCT_7208_1COM 0x7101 #define MEDIATEK_PRODUCT_7208_2COM 0x7102 +#define MEDIATEK_PRODUCT_7103_2COM 0x7103 +#define MEDIATEK_PRODUCT_7106_2COM 0x7106 #define MEDIATEK_PRODUCT_FP_1COM 0x0003 #define MEDIATEK_PRODUCT_FP_2COM 0x0023 #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043 @@ -441,6 +445,10 @@ static void option_instat_callback(struct urb *urb); #define CELLIENT_VENDOR_ID 0x2692 #define CELLIENT_PRODUCT_MEN200 0x9005 +/* Hyundai Petatel Inc. products */ +#define PETATEL_VENDOR_ID 0x1ff4 +#define PETATEL_PRODUCT_NP10T 0x600e + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -923,7 +931,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, @@ -1190,6 +1199,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), @@ -1294,7 +1305,12 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 0b2f2dd4..2f902900 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -365,7 +365,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info) loop--; } - writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR); + reg = readl(host->base + LCDC_VDCTRL4); + writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4); clk_disable(host->clk); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 35a852a2..35a1a614 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1197,9 +1197,29 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even * otherwise we might miss an event that happens between the * f_op->poll() call and the new event set registering. */ - epi->event.events = event->events; + epi->event.events = event->events; /* need barrier below */ epi->event.data = event->data; /* protected by mtx */ + /* + * The following barrier has two effects: + * + * 1) Flush epi changes above to other CPUs. This ensures + * we do not miss events from ep_poll_callback if an + * event occurs immediately after we call f_op->poll(). + * We need this because we did not take ep->lock while + * changing epi above (but ep_poll_callback does take + * ep->lock). + * + * 2) We also need to ensure we do not miss _past_ events + * when calling f_op->poll(). This barrier also + * pairs with the barrier in wq_has_sleeper (see + * comments for wq_has_sleeper). + * + * This barrier will now guarantee ep_poll_callback or f_op->poll + * (or both) will notice the readiness of an item. + */ + smp_mb(); + /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 21eacd7b..4922087b 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -450,8 +450,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, retry: handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + error = PTR_ERR(handle); + goto release_and_out; + } error = ext4_set_acl(handle, inode, type, acl); ext4_journal_stop(handle); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 57694395..028fc859 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2144,13 +2144,14 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, * last index in the block only. */ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path) + struct ext4_ext_path *path, int depth) { int err; ext4_fsblk_t leaf; /* free index block */ - path--; + depth--; + path = path + depth; leaf = ext4_idx_pblock(path->p_idx); if (unlikely(path->p_hdr->eh_entries == 0)) { EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); @@ -2166,6 +2167,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, ext_debug("index is empty, remove it, free block %llu\n", leaf); ext4_free_blocks(handle, inode, NULL, leaf, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); + + while (--depth >= 0) { + if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) + break; + path--; + err = ext4_ext_get_access(handle, inode, path); + if (err) + break; + path->p_idx->ei_block = (path+1)->p_idx->ei_block; + err = ext4_ext_dirty(handle, inode, path); + if (err) + break; + } return err; } @@ -2513,7 +2527,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, /* if this leaf is free, then we should * remove it from index block above */ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) - err = ext4_ext_rm_idx(handle, inode, path + depth); + err = ext4_ext_rm_idx(handle, inode, path, depth); out: return err; @@ -2643,7 +2657,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, /* index is empty, remove it; * handle must be already prepared by the * truncatei_leaf() */ - err = ext4_ext_rm_idx(handle, inode, path + i); + err = ext4_ext_rm_idx(handle, inode, path, i); } /* root level has p_bh == NULL, brelse() eats this */ brelse(path[i].p_bh); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 21f9173e..23369664 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2248,7 +2248,9 @@ static void ext4_orphan_cleanup(struct super_block *sb, __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); + mutex_lock(&inode->i_mutex); ext4_truncate(inode); + mutex_unlock(&inode->i_mutex); nr_truncates++; } else { ext4_msg(sb, KERN_DEBUG, @@ -4482,7 +4484,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } ext4_setup_system_zone(sb); - if (sbi->s_journal == NULL) + if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) ext4_commit_super(sb, 1); #ifdef CONFIG_QUOTA diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 05bbb124..c1332533 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -139,16 +139,14 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) struct gfs2_meta_header *mh; struct gfs2_trans *tr; - lock_buffer(bd->bd_bh); - gfs2_log_lock(sdp); if (!list_empty(&bd->bd_list_tr)) - goto out; + return; tr = current->journal_info; tr->tr_touched = 1; tr->tr_num_buf++; list_add(&bd->bd_list_tr, &tr->tr_list_buf); if (!list_empty(&le->le_list)) - goto out; + return; set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); gfs2_meta_check(sdp, bd->bd_bh); @@ -159,9 +157,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) sdp->sd_log_num_buf++; list_add(&le->le_list, &sdp->sd_log_le_buf); tr->tr_num_buf_new++; -out: - gfs2_log_unlock(sdp); - unlock_buffer(bd->bd_bh); } static void buf_lo_before_commit(struct gfs2_sbd *sdp) @@ -528,11 +523,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) struct address_space *mapping = bd->bd_bh->b_page->mapping; struct gfs2_inode *ip = GFS2_I(mapping->host); - lock_buffer(bd->bd_bh); - gfs2_log_lock(sdp); if (tr) { if (!list_empty(&bd->bd_list_tr)) - goto out; + return; tr->tr_touched = 1; if (gfs2_is_jdata(ip)) { tr->tr_num_buf++; @@ -540,7 +533,7 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) } } if (!list_empty(&le->le_list)) - goto out; + return; set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); @@ -552,9 +545,6 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) } else { list_add_tail(&le->le_list, &sdp->sd_log_le_ordered); } -out: - gfs2_log_unlock(sdp); - unlock_buffer(bd->bd_bh); } static void gfs2_check_magic(struct buffer_head *bh) diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 9ec73a85..e6453c31 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -145,14 +145,22 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta) struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_bufdata *bd; + lock_buffer(bh); + gfs2_log_lock(sdp); bd = bh->b_private; if (bd) gfs2_assert(sdp, bd->bd_gl == gl); else { + gfs2_log_unlock(sdp); + unlock_buffer(bh); gfs2_attach_bufdata(gl, bh, meta); bd = bh->b_private; + lock_buffer(bh); + gfs2_log_lock(sdp); } lops_add(sdp, &bd->bd_le); + gfs2_log_unlock(sdp); + unlock_buffer(bh); } void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 577fc74a..24eef7b4 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -184,7 +184,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle, write_unlock(&journal->j_state_lock); goto repeat; } - if (!journal->j_running_transaction) { + if (!journal->j_running_transaction && + !journal->j_barrier_count) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index 694aa5b0..e304795b 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -355,14 +355,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, spin_unlock(&c->erase_completion_lock); ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); - if (ret) - return ret; + /* Just lock it again and continue. Nothing much can change because we hold c->alloc_sem anyway. In fact, it's not entirely clear why we hold c->erase_completion_lock in the majority of this function... but that's a question for another (more caffeine-rich) day. */ spin_lock(&c->erase_completion_lock); + if (ret) + return ret; + waste = jeb->free_size; jffs2_link_node_ref(c, jeb, (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 262050f2..957c9747 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -575,6 +575,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, struct udf_inode_info *iinfo = UDF_I(inode); int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; int lastblock = 0; + bool isBeyondEOF; prev_epos.offset = udf_file_entry_alloc_offset(inode); prev_epos.block = iinfo->i_location; @@ -653,7 +654,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, /* Are we beyond EOF? */ if (etype == -1) { int ret; - + isBeyondEOF = 1; if (count) { if (c) laarr[0] = laarr[1]; @@ -696,6 +697,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, endnum = c + 1; lastblock = 1; } else { + isBeyondEOF = 0; endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, @@ -738,10 +740,13 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, goal, err); if (!newblocknum) { brelse(prev_epos.bh); + brelse(cur_epos.bh); + brelse(next_epos.bh); *err = -ENOSPC; return NULL; } - iinfo->i_lenExtents += inode->i_sb->s_blocksize; + if (isBeyondEOF) + iinfo->i_lenExtents += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple @@ -768,6 +773,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); brelse(prev_epos.bh); + brelse(cur_epos.bh); + brelse(next_epos.bh); newblock = udf_get_pblock(inode->i_sb, newblocknum, iinfo->i_location.partitionReferenceNum, 0); diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 0ee969a5..61a48b56 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -137,6 +137,7 @@ enum { IFLA_AF_SPEC, IFLA_GROUP, /* Group the device belongs to */ IFLA_NET_NS_FD, + IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ __IFLA_MAX }; diff --git a/include/linux/netlink.h b/include/linux/netlink.h index a9dd8955..fdd0188a 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -221,7 +221,8 @@ struct netlink_callback { int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); - int family; + u16 family; + u16 min_dump_alloc; long args[6]; }; @@ -259,7 +260,8 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, int (*dump)(struct sk_buff *skb, struct netlink_callback*), - int (*done)(struct netlink_callback*)); + int (*done)(struct netlink_callback*), + u16 min_dump_alloc); #define NL_NONROOT_RECV 0x1 diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index bbad657a..5415dfb2 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -600,6 +600,9 @@ struct tcamsg { #define TCA_ACT_TAB 1 /* attr type must be >=1 */ #define TCAA_MAX 1 +/* New extended info filters for IFLA_EXT_MASK */ +#define RTEXT_FILTER_VF (1 << 0) + /* End of information exported to user level */ #ifdef __KERNEL__ diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 4093ca78..37029390 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -6,11 +6,14 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); +typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *); extern int __rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func); + rtnl_doit_func, rtnl_dumpit_func, + rtnl_calcit_func); extern void rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func); + rtnl_doit_func, rtnl_dumpit_func, + rtnl_calcit_func); extern int rtnl_unregister(int protocol, int msgtype); extern void rtnl_unregister_all(int protocol); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b0c7aa40..20dff64b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2926,6 +2926,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) * Splice the empty reader page into the list around the head. */ reader = rb_set_head_page(cpu_buffer); + if (!reader) + goto out; cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); cpu_buffer->reader_page->list.prev = reader->list.prev; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8cc11dda..a9ab45ec 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -920,6 +920,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, count_vm_event(THP_FAULT_FALLBACK); ret = do_huge_pmd_wp_page_fallback(mm, vma, address, pmd, orig_pmd, page, haddr); + if (ret & VM_FAULT_OOM) + split_huge_page(page); put_page(page); goto out; } @@ -927,6 +929,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { put_page(new_page); + split_huge_page(page); put_page(page); ret |= VM_FAULT_OOM; goto out; diff --git a/mm/memory.c b/mm/memory.c index 7292acb9..4da0f8ad 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3470,6 +3470,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); +retry: pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) @@ -3483,13 +3484,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, pmd, flags); } else { pmd_t orig_pmd = *pmd; + int ret; + barrier(); if (pmd_trans_huge(orig_pmd)) { if (flags & FAULT_FLAG_WRITE && !pmd_write(orig_pmd) && - !pmd_trans_splitting(orig_pmd)) - return do_huge_pmd_wp_page(mm, vma, address, - pmd, orig_pmd); + !pmd_trans_splitting(orig_pmd)) { + ret = do_huge_pmd_wp_page(mm, vma, address, pmd, + orig_pmd); + /* + * If COW results in an oom, the huge pmd will + * have been split, so retry the fault on the + * pte for a smaller charge. + */ + if (unlikely(ret & VM_FAULT_OOM)) + goto retry; + return ret; + } return 0; } } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a260aaed..74316b03 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5526,7 +5526,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) pfn &= (PAGES_PER_SECTION-1); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else - pfn = pfn - zone->zone_start_pfn; + pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif /* CONFIG_SPARSEMEM */ } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 71861a9c..d372df23 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -219,19 +219,24 @@ int __init br_netlink_init(void) if (err < 0) goto err1; - err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo); + err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, + br_dump_ifinfo, NULL); if (err) goto err2; - err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL); + err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, + br_rtm_setlink, NULL, NULL); if (err) goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL); + err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, + br_fdb_add, NULL, NULL); if (err) goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL); + err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, + br_fdb_delete, NULL, NULL); if (err) goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump); + err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, + NULL, br_fdb_dump, NULL); if (err) goto err3; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index f39ef5c6..3231b468 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -740,9 +740,9 @@ static struct pernet_operations fib_rules_net_ops = { static int __init fib_rules_init(void) { int err; - rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL); - rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL); - rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule); + rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); err = register_pernet_subsys(&fib_rules_net_ops); if (err < 0) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 65c3cbb5..4a74c8cc 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2924,12 +2924,13 @@ EXPORT_SYMBOL(neigh_sysctl_unregister); static int __init neigh_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL); - rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL); - rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info); + rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL); - rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info); - rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL); + rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, + NULL); + rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL); return 0; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ac49ad51..49f281e6 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -56,6 +56,7 @@ struct rtnl_link { rtnl_doit_func doit; rtnl_dumpit_func dumpit; + rtnl_calcit_func calcit; }; static DEFINE_MUTEX(rtnl_mutex); @@ -144,12 +145,28 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) return tab ? tab[msgindex].dumpit : NULL; } +static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex) +{ + struct rtnl_link *tab; + + if (protocol <= RTNL_FAMILY_MAX) + tab = rtnl_msg_handlers[protocol]; + else + tab = NULL; + + if (tab == NULL || tab[msgindex].calcit == NULL) + tab = rtnl_msg_handlers[PF_UNSPEC]; + + return tab ? tab[msgindex].calcit : NULL; +} + /** * __rtnl_register - Register a rtnetlink message type * @protocol: Protocol family or PF_UNSPEC * @msgtype: rtnetlink message type * @doit: Function pointer called for each request message * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message + * @calcit: Function pointer to calc size of dump message * * Registers the specified function pointers (at least one of them has * to be non-NULL) to be called whenever a request message for the @@ -162,7 +179,8 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) * Returns 0 on success or a negative error code. */ int __rtnl_register(int protocol, int msgtype, - rtnl_doit_func doit, rtnl_dumpit_func dumpit) + rtnl_doit_func doit, rtnl_dumpit_func dumpit, + rtnl_calcit_func calcit) { struct rtnl_link *tab; int msgindex; @@ -185,6 +203,9 @@ int __rtnl_register(int protocol, int msgtype, if (dumpit) tab[msgindex].dumpit = dumpit; + if (calcit) + tab[msgindex].calcit = calcit; + return 0; } EXPORT_SYMBOL_GPL(__rtnl_register); @@ -199,9 +220,10 @@ EXPORT_SYMBOL_GPL(__rtnl_register); * of memory implies no sense in continuing. */ void rtnl_register(int protocol, int msgtype, - rtnl_doit_func doit, rtnl_dumpit_func dumpit) + rtnl_doit_func doit, rtnl_dumpit_func dumpit, + rtnl_calcit_func calcit) { - if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0) + if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0) panic("Unable to register rtnetlink message handler, " "protocol = %d, message type = %d\n", protocol, msgtype); @@ -704,10 +726,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) } /* All VF info */ -static inline int rtnl_vfinfo_size(const struct net_device *dev) +static inline int rtnl_vfinfo_size(const struct net_device *dev, + u32 ext_filter_mask) { - if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { - + if (dev->dev.parent && dev_is_pci(dev->dev.parent) && + (ext_filter_mask & RTEXT_FILTER_VF)) { int num_vfs = dev_num_vf(dev->dev.parent); size_t size = nla_total_size(sizeof(struct nlattr)); size += nla_total_size(num_vfs * sizeof(struct nlattr)); @@ -745,7 +768,8 @@ static size_t rtnl_port_size(const struct net_device *dev) return port_self_size; } -static noinline size_t if_nlmsg_size(const struct net_device *dev) +static noinline size_t if_nlmsg_size(const struct net_device *dev, + u32 ext_filter_mask) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ @@ -763,8 +787,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev) + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ - + nla_total_size(4) /* IFLA_NUM_VF */ - + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ + + nla_total_size(ext_filter_mask + & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ + + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ @@ -847,7 +872,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev) static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, - unsigned int flags) + unsigned int flags, u32 ext_filter_mask) { struct ifinfomsg *ifm; struct nlmsghdr *nlh; @@ -920,10 +945,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, goto nla_put_failure; copy_rtnl_link_stats64(nla_data(attr), stats); - if (dev->dev.parent) + if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); - if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { + if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent + && (ext_filter_mask & RTEXT_FILTER_VF)) { int i; struct nlattr *vfinfo, *vf; @@ -1010,11 +1036,21 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) struct net_device *dev; struct hlist_head *head; struct hlist_node *node; + struct nlattr *tb[IFLA_MAX+1]; + u32 ext_filter_mask = 0; s_h = cb->args[0]; s_idx = cb->args[1]; rcu_read_lock(); + + if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + ifla_policy) >= 0) { + + if (tb[IFLA_EXT_MASK]) + ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); + } + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; head = &net->dev_index_head[h]; @@ -1024,7 +1060,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0, - NLM_F_MULTI) <= 0) + NLM_F_MULTI, + ext_filter_mask) <= 0) goto out; cont: idx++; @@ -1058,6 +1095,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_VF_PORTS] = { .type = NLA_NESTED }, [IFLA_PORT_SELF] = { .type = NLA_NESTED }, [IFLA_AF_SPEC] = { .type = NLA_NESTED }, + [IFLA_EXT_MASK] = { .type = NLA_U32 }, }; EXPORT_SYMBOL(ifla_policy); @@ -1790,6 +1828,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) struct net_device *dev = NULL; struct sk_buff *nskb; int err; + u32 ext_filter_mask = 0; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); if (err < 0) @@ -1798,6 +1837,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); + if (tb[IFLA_EXT_MASK]) + ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); + ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(net, ifm->ifi_index); @@ -1809,12 +1851,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (dev == NULL) return -ENODEV; - nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); + nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); if (nskb == NULL) return -ENOBUFS; err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, - nlh->nlmsg_seq, 0, 0); + nlh->nlmsg_seq, 0, 0, ext_filter_mask); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size */ WARN_ON(err == -EMSGSIZE); @@ -1825,6 +1867,35 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) return err; } +static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + struct net *net = sock_net(skb->sk); + struct net_device *dev; + struct nlattr *tb[IFLA_MAX+1]; + u32 ext_filter_mask = 0; + u16 min_ifinfo_dump_size = 0; + + if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + ifla_policy) >= 0) { + if (tb[IFLA_EXT_MASK]) + ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); + } + + if (!ext_filter_mask) + return NLMSG_GOODSIZE; + /* + * traverse the list of net devices and compute the minimum + * buffer size based upon the filter mask. + */ + list_for_each_entry(dev, &net->dev_base_head, dev_list) { + min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size, + if_nlmsg_size(dev, + ext_filter_mask)); + } + + return min_ifinfo_dump_size; +} + static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) { int idx; @@ -1854,12 +1925,13 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) struct net *net = dev_net(dev); struct sk_buff *skb; int err = -ENOBUFS; + size_t if_info_size; - skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); + skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL); if (skb == NULL) goto errout; - err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0); + err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -1909,14 +1981,20 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { struct sock *rtnl; rtnl_dumpit_func dumpit; + rtnl_calcit_func calcit; + u16 min_dump_alloc = 0; dumpit = rtnl_get_dumpit(family, type); if (dumpit == NULL) return -EOPNOTSUPP; + calcit = rtnl_get_calcit(family, type); + if (calcit) + min_dump_alloc = calcit(skb, nlh); __rtnl_unlock(); rtnl = net->rtnl; - err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); + err = netlink_dump_start(rtnl, skb, nlh, dumpit, + NULL, min_dump_alloc); rtnl_lock(); return err; } @@ -2026,12 +2104,13 @@ void __init rtnetlink_init(void) netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); register_netdevice_notifier(&rtnetlink_dev_notifier); - rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo); - rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL); - rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL); - rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL); + rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, + rtnl_dump_ifinfo, rtnl_calcit); + rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all); - rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); + rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); + rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); } diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 3609eaca..ed1bb8c6 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1819,8 +1819,8 @@ static int __init dcbnl_init(void) { INIT_LIST_HEAD(&dcb_app_list); - rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL); - rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL); + rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL); return 0; } diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index cf26ac74..3780fd6e 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1414,9 +1414,9 @@ void __init dn_dev_init(void) dn_dev_devices_on(); - rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL); - rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL); - rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr); + rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, NULL); + rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL); + rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL); proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 1c74ed36..104324d6 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -763,8 +763,8 @@ void __init dn_fib_init(void) register_dnaddr_notifier(&dn_fib_dnaddr_notifier); - rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL); - rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL); + rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, NULL); + rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, NULL); } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index b91b6036..82d62507 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1843,10 +1843,11 @@ void __init dn_route_init(void) proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); #ifdef CONFIG_DECNET_ROUTER - rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump); + rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, + dn_fib_dump, NULL); #else rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, - dn_cache_dump); + dn_cache_dump, NULL); #endif } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index c48323ad..76db5920 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1844,8 +1844,8 @@ void __init devinet_init(void) rtnl_af_register(&inet_af_ops); - rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL); - rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL); - rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); + rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL); + rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); + rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 22524716..92fc5f69 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1124,9 +1124,9 @@ static struct pernet_operations fib_net_ops = { void __init ip_fib_init(void) { - rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL); - rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL); - rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib); + rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); + rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); + rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); register_pernet_subsys(&fib_net_ops); register_netdevice_notifier(&fib_netdev_notifier); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 3267d389..389a2e6a 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -869,7 +869,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } return netlink_dump_start(idiagnl, skb, nlh, - inet_diag_dump, NULL); + inet_diag_dump, NULL, 0); } return inet_diag_get_exact(skb, nlh); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index ec7d8e7e..dc897145 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2554,7 +2554,8 @@ int __init ip_mr_init(void) goto add_proto_fail; } #endif - rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute); + rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, + NULL, ipmr_rtm_dumproute, NULL); return 0; #ifdef CONFIG_IP_PIMSM_V2 diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 54b5c6be..ebff20a4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3470,7 +3470,7 @@ int __init ip_rt_init(void) xfrm_init(); xfrm4_init(ip_rt_max_size); #endif - rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); + rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); #ifdef CONFIG_SYSCTL register_pernet_subsys(&sysctl_route_ops); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a7c19466..d717c7b7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4732,16 +4732,20 @@ int __init addrconf_init(void) if (err < 0) goto errout_af; - err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo); + err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo, + NULL); if (err < 0) goto errout; /* Only the first call to __rtnl_register can fail */ - __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL); - __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL); - __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr, inet6_dump_ifaddr); - __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL, inet6_dump_ifmcaddr); - __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, inet6_dump_ifacaddr); + __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL); + __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL); + __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr, + inet6_dump_ifaddr, NULL); + __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL, + inet6_dump_ifmcaddr, NULL); + __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, + inet6_dump_ifacaddr, NULL); ipv6_addr_label_rtnl_register(); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index c8993e5a..2d8ddba9 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -592,8 +592,11 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, void __init ipv6_addr_label_rtnl_register(void) { - __rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel, NULL); - __rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel, NULL); - __rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get, ip6addrlbl_dump); + __rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel, + NULL, NULL); + __rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel, + NULL, NULL); + __rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get, + ip6addrlbl_dump, NULL); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 0f9b37a1..320d91d2 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1586,7 +1586,8 @@ int __init fib6_init(void) if (ret) goto out_kmem_cache_create; - ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); + ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib, + NULL); if (ret) goto out_unregister_subsys; out: diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 86e3cc10..def0538e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1356,7 +1356,8 @@ int __init ip6_mr_init(void) goto add_proto_fail; } #endif - rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute); + rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, + ip6mr_rtm_dumproute, NULL); return 0; #ifdef CONFIG_IPV6_PIMSM_V2 add_proto_fail: diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a7ab5613..abba5cbf 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2961,9 +2961,9 @@ int __init ip6_route_init(void) goto fib6_rules_init; ret = -ENOBUFS; - if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) || - __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) || - __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL)) + if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) || + __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) || + __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL)) goto out_register_late_subsys; ret = register_netdevice_notifier(&ip6_route_dev_notifier); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ca7bf105..4b40a3ba 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -793,7 +793,7 @@ void sta_info_init(struct ieee80211_local *local) void sta_info_stop(struct ieee80211_local *local) { - del_timer(&local->sta_cleanup); + del_timer_sync(&local->sta_cleanup); sta_info_flush(local, NULL); } diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 42aa64b6..ee37ae5d 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1120,7 +1120,7 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb, return netlink_dump_start(ctnl, skb, nlh, ip_set_dump_start, - ip_set_dump_done); + ip_set_dump_done, 0); } /* Add, del and test */ diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c76e7e27..9492cd2d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -970,7 +970,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, if (nlh->nlmsg_flags & NLM_F_DUMP) return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, - ctnetlink_done); + ctnetlink_done, 0); err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); if (err < 0) @@ -1840,7 +1840,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, if (nlh->nlmsg_flags & NLM_F_DUMP) { return netlink_dump_start(ctnl, skb, nlh, ctnetlink_exp_dump_table, - ctnetlink_exp_done); + ctnetlink_exp_done, 0); } err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index cc18a962..f2967333 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1684,13 +1684,10 @@ static int netlink_dump(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); struct netlink_callback *cb; - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int len, err = -ENOBUFS; - - skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL); - if (!skb) - goto errout; + int alloc_size; mutex_lock(nlk->cb_mutex); @@ -1700,6 +1697,12 @@ static int netlink_dump(struct sock *sk) goto errout_skb; } + alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); + + skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL); + if (!skb) + goto errout; + len = cb->dump(skb, cb); if (len > 0) { @@ -1742,7 +1745,8 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, int (*dump)(struct sk_buff *skb, struct netlink_callback *), - int (*done)(struct netlink_callback *)) + int (*done)(struct netlink_callback *), + u16 min_dump_alloc) { struct netlink_callback *cb; struct sock *sk; @@ -1756,6 +1760,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb->dump = dump; cb->done = done; cb->nlh = nlh; + cb->min_dump_alloc = min_dump_alloc; atomic_inc(&skb->users); cb->skb = skb; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 1781d991..482fa571 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -525,7 +525,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) genl_unlock(); err = netlink_dump_start(net->genl_sock, skb, nlh, - ops->dumpit, ops->done); + ops->dumpit, ops->done, 0); genl_lock(); return err; } diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 438accb7..d61f6761 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -289,15 +289,16 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) int __init phonet_netlink_register(void) { - int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); + int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, + NULL, NULL); if (err) return err; /* Further __rtnl_register() cannot fail */ - __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); - __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); - __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL); - __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL); - __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit); + __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, NULL); + __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, NULL); + __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, NULL); + __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, NULL); + __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, NULL); return 0; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index a6060258..2f64262a 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1115,9 +1115,10 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) static int __init tc_action_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL); - rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL); - rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action); + rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, + NULL); return 0; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index bb2c523f..9563887f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -610,10 +610,10 @@ EXPORT_SYMBOL(tcf_exts_dump_stats); static int __init tc_filter_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL); - rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, - tc_dump_tfilter); + tc_dump_tfilter, NULL); return 0; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 6b862766..8182aefa 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1792,12 +1792,12 @@ static int __init pktsched_init(void) register_qdisc(&pfifo_head_drop_qdisc_ops); register_qdisc(&mq_qdisc_ops); - rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); - rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); - rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc); - rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL); - rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL); - rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass); + rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL); return 0; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index c57f97f4..d7824ec3 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -844,16 +844,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) return task; } +/* + * rpc_free_task - release rpc task and perform cleanups + * + * Note that we free up the rpc_task _after_ rpc_release_calldata() + * in order to work around a workqueue dependency issue. + * + * Tejun Heo states: + * "Workqueue currently considers two work items to be the same if they're + * on the same address and won't execute them concurrently - ie. it + * makes a work item which is queued again while being executed wait + * for the previous execution to complete. + * + * If a work function frees the work item, and then waits for an event + * which should be performed by another work item and *that* work item + * recycles the freed work item, it can create a false dependency loop. + * There really is no reliable way to detect this short of verifying + * every memory free." + * + */ static void rpc_free_task(struct rpc_task *task) { - const struct rpc_call_ops *tk_ops = task->tk_ops; - void *calldata = task->tk_calldata; + unsigned short tk_flags = task->tk_flags; + + rpc_release_calldata(task->tk_ops, task->tk_calldata); - if (task->tk_flags & RPC_TASK_DYNAMIC) { + if (tk_flags & RPC_TASK_DYNAMIC) { dprintk("RPC: %5u freeing task\n", task->tk_pid); mempool_free(task, rpc_task_mempool); } - rpc_release_calldata(tk_ops, calldata); } static void rpc_async_release(struct work_struct *work) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ffe5768f..fd3e0a59 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2330,7 +2330,8 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (link->dump == NULL) return -EINVAL; - return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done); + return netlink_dump_start(net->xfrm.nlsk, skb, nlh, + link->dump, link->done, 0); } err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c index 88eec384..05bdcb28 100644 --- a/sound/arm/pxa2xx-ac97-lib.c +++ b/sound/arm/pxa2xx-ac97-lib.c @@ -146,6 +146,8 @@ static inline void pxa_ac97_warm_pxa27x(void) static inline void pxa_ac97_cold_pxa27x(void) { + unsigned int timeout; + GCR &= GCR_COLD_RST; /* clear everything but nCRST */ GCR &= ~GCR_COLD_RST; /* then assert nCRST */ @@ -155,8 +157,10 @@ static inline void pxa_ac97_cold_pxa27x(void) clk_enable(ac97conf_clk); udelay(5); clk_disable(ac97conf_clk); - GCR = GCR_COLD_RST; - udelay(50); + GCR = GCR_COLD_RST | GCR_WARM_RST; + timeout = 100; /* wait for the codec-ready bit to be set */ + while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--) + mdelay(1); } #endif diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a3b9cbb2..ba03dc20 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -224,9 +224,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY); if (wm2000->speech_clarity) - ret &= ~WM2000_SPEECH_CLARITY; - else ret |= WM2000_SPEECH_CLARITY; + else + ret &= ~WM2000_SPEECH_CLARITY; wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret); wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33); From 6d46956d04af9a47a2aabd37344b1fd38cf61aa6 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 08:24:59 -0500 Subject: [PATCH 054/117] Linux 3.0.60 --- Makefile | 2 +- arch/powerpc/platforms/embedded6xx/wii.c | 6 +- arch/s390/include/asm/timex.h | 28 +++++++++ arch/s390/kernel/time.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/sh/include/asm/elf.h | 4 +- arch/x86/kernel/entry_32.S | 1 - arch/x86/kernel/setup.c | 78 ++++++++++++++++++++++++ drivers/block/drbd/drbd_req.c | 1 + drivers/pci/intel-iommu.c | 31 ++++++++++ drivers/staging/vt6656/bssdb.h | 1 - drivers/staging/vt6656/int.h | 1 - drivers/staging/vt6656/iocmd.h | 33 +++++----- drivers/staging/vt6656/iowpa.h | 8 +-- drivers/target/tcm_fc/tfc_sess.c | 12 +++- drivers/tty/serial/ifx6x60.c | 1 + drivers/usb/core/message.c | 53 ++++++++-------- drivers/usb/host/xhci-mem.c | 11 +++- drivers/usb/serial/option.c | 9 ++- fs/ext4/inode.c | 2 + 20 files changed, 224 insertions(+), 62 deletions(-) diff --git a/Makefile b/Makefile index 8b6923f3..2241b917 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 59 +SUBLEVEL = 60 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 1b5dc1a2..daf793b1 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -85,9 +85,11 @@ void __init wii_memory_fixups(void) wii_hole_start = p[0].base + p[0].size; wii_hole_size = p[1].base - wii_hole_start; - pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size); + pr_info("MEM1: <%08llx %08llx>\n", + (unsigned long long) p[0].base, (unsigned long long) p[0].size); pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size); - pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size); + pr_info("MEM2: <%08llx %08llx>\n", + (unsigned long long) p[1].base, (unsigned long long) p[1].size); p[0].size += wii_hole_size + p[1].size; diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 88829a40..a4b8f600 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -126,4 +126,32 @@ static inline unsigned long long get_clock_monotonic(void) return get_clock_xt() - sched_clock_base_cc; } +/** + * tod_to_ns - convert a TOD format value to nanoseconds + * @todval: to be converted TOD format value + * Returns: number of nanoseconds that correspond to the TOD format value + * + * Converting a 64 Bit TOD format value to nanoseconds means that the value + * must be divided by 4.096. In order to achieve that we multiply with 125 + * and divide by 512: + * + * ns = (todval * 125) >> 9; + * + * In order to avoid an overflow with the multiplication we can rewrite this. + * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) + * we end up with + * + * ns = ((2^32 * th + tl) * 125 ) >> 9; + * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); + * + */ +static inline unsigned long long tod_to_ns(unsigned long long todval) +{ + unsigned long long ns; + + ns = ((todval >> 32) << 23) * 125; + ns += ((todval & 0xffffffff) * 125) >> 9; + return ns; +} + #endif diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index dff93306..943ea0e4 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return (get_clock_monotonic() * 125) >> 9; + return tod_to_ns(get_clock_monotonic()); } /* diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 35c21bf9..a3db4c80 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -358,7 +358,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return 0; } - sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; + sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index f38112be..978b7fd6 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -202,9 +202,9 @@ extern void __kernel_vsyscall; if (vdso_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ else \ - NEW_AUX_ENT(AT_IGNORE, 0); + NEW_AUX_ENT(AT_IGNORE, 0) #else -#define VSYSCALL_AUX_ENT +#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0) #endif /* CONFIG_VSYSCALL */ #ifdef CONFIG_SH_FPU diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 268b40d3..2df12522 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1078,7 +1078,6 @@ ENTRY(xen_failsafe_callback) lea 16(%esp),%esp CFI_ADJUST_CFA_OFFSET -16 jz 5f - addl $16,%esp jmp iret_exc 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index af19a614..eb9eb8be 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -631,6 +631,81 @@ static __init void reserve_ibft_region(void) static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; +static bool __init snb_gfx_workaround_needed(void) +{ + int i; + u16 vendor, devid; + static const u16 snb_ids[] = { + 0x0102, + 0x0112, + 0x0122, + 0x0106, + 0x0116, + 0x0126, + 0x010a, + }; + + /* Assume no if something weird is going on with PCI */ + if (!early_pci_allowed()) + return false; + + vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID); + if (vendor != 0x8086) + return false; + + devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID); + for (i = 0; i < ARRAY_SIZE(snb_ids); i++) + if (devid == snb_ids[i]) + return true; + + return false; +} + +/* + * Sandy Bridge graphics has trouble with certain ranges, exclude + * them from allocation. + */ +static void __init trim_snb_memory(void) +{ + static const unsigned long bad_pages[] = { + 0x20050000, + 0x20110000, + 0x20130000, + 0x20138000, + 0x40004000, + }; + int i; + + if (!snb_gfx_workaround_needed()) + return; + + printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n"); + + /* + * Reserve all memory below the 1 MB mark that has not + * already been reserved. + */ + memblock_reserve(0, 1<<20); + + for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { + if (memblock_reserve(bad_pages[i], PAGE_SIZE)) + printk(KERN_WARNING "failed to reserve 0x%08lx\n", + bad_pages[i]); + } +} + +/* + * Here we put platform-specific memory range workarounds, i.e. + * memory known to be corrupt or otherwise in need to be reserved on + * specific platforms. + * + * If this gets used more widely it could use a real dispatch mechanism. + */ +static void __init trim_platform_memory_ranges(void) +{ + trim_snb_memory(); +} + static void __init trim_bios_range(void) { /* @@ -651,6 +726,7 @@ static void __init trim_bios_range(void) * take them out. */ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } @@ -929,6 +1005,8 @@ void __init setup_arch(char **cmdline_p) setup_trampolines(); + trim_platform_memory_ranges(); + init_gbpages(); /* max_pfn_mapped is updated here */ diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3424d675..e59f5367 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req const int rw = bio_data_dir(bio); int cpu; cpu = part_stat_lock(); + part_round_stats(cpu, &mdev->vdisk->part0); part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); part_inc_in_flight(&mdev->vdisk->part0, rw); diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3f9a8912..ae762ecc 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -2289,8 +2289,39 @@ static int domain_add_dev_info(struct dmar_domain *domain, return 0; } +static bool device_has_rmrr(struct pci_dev *dev) +{ + struct dmar_rmrr_unit *rmrr; + int i; + + for_each_rmrr_units(rmrr) { + for (i = 0; i < rmrr->devices_cnt; i++) { + /* + * Return TRUE if this RMRR contains the device that + * is passed in. + */ + if (rmrr->devices[i] == dev) + return true; + } + } + return false; +} + static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + + /* + * We want to prevent any device associated with an RMRR from + * getting placed into the SI Domain. This is done because + * problems exist when devices are moved in and out of domains + * and their respective RMRR info is lost. We exempt USB devices + * from this process due to their usage of RMRRs that are known + * to not be needed after BIOS hand-off to OS. + */ + if (device_has_rmrr(pdev) && + (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) + return 0; + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return 1; diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h index a8f97ebb..991ce3ee 100644 --- a/drivers/staging/vt6656/bssdb.h +++ b/drivers/staging/vt6656/bssdb.h @@ -92,7 +92,6 @@ typedef struct tagSRSNCapObject { } SRSNCapObject, *PSRSNCapObject; // BSS info(AP) -#pragma pack(1) typedef struct tagKnownBSS { // BSS info BOOL bActive; diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h index 3176c8d0..c731b120 100644 --- a/drivers/staging/vt6656/int.h +++ b/drivers/staging/vt6656/int.h @@ -34,7 +34,6 @@ #include "device.h" /*--------------------- Export Definitions -------------------------*/ -#pragma pack(1) typedef struct tagSINTData { BYTE byTSR0; BYTE byPkt0; diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h index 22710cef..ae6e2d23 100644 --- a/drivers/staging/vt6656/iocmd.h +++ b/drivers/staging/vt6656/iocmd.h @@ -95,13 +95,12 @@ typedef enum tagWZONETYPE { // Ioctl interface structure // Command structure // -#pragma pack(1) typedef struct tagSCmdRequest { u8 name[16]; void *data; u16 wResult; u16 wCmdCode; -} SCmdRequest, *PSCmdRequest; +} __packed SCmdRequest, *PSCmdRequest; // // Scan @@ -111,7 +110,7 @@ typedef struct tagSCmdScan { u8 ssid[SSID_MAXLEN + 2]; -} SCmdScan, *PSCmdScan; +} __packed SCmdScan, *PSCmdScan; // // BSS Join @@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin { BOOL bPSEnable; BOOL bShareKeyAuth; -} SCmdBSSJoin, *PSCmdBSSJoin; +} __packed SCmdBSSJoin, *PSCmdBSSJoin; // // Zonetype Setting @@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet { BOOL bWrite; WZONETYPE ZoneType; -} SCmdZoneTypeSet, *PSCmdZoneTypeSet; +} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet; typedef struct tagSWPAResult { char ifname[100]; @@ -145,7 +144,7 @@ typedef struct tagSWPAResult { u8 key_mgmt; u8 eap_type; BOOL authenticated; -} SWPAResult, *PSWPAResult; +} __packed SWPAResult, *PSWPAResult; typedef struct tagSCmdStartAP { @@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP { BOOL bShareKeyAuth; u8 byBasicRate; -} SCmdStartAP, *PSCmdStartAP; +} __packed SCmdStartAP, *PSCmdStartAP; typedef struct tagSCmdSetWEP { @@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP { BOOL bWepKeyAvailable[WEP_NKEYS]; u32 auWepKeyLength[WEP_NKEYS]; -} SCmdSetWEP, *PSCmdSetWEP; +} __packed SCmdSetWEP, *PSCmdSetWEP; typedef struct tagSBSSIDItem { @@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem { BOOL bWEPOn; u32 uRSSI; -} SBSSIDItem; +} __packed SBSSIDItem; typedef struct tagSBSSIDList { u32 uItem; SBSSIDItem sBSSIDList[0]; -} SBSSIDList, *PSBSSIDList; +} __packed SBSSIDList, *PSBSSIDList; typedef struct tagSNodeItem { @@ -208,7 +207,7 @@ typedef struct tagSNodeItem { u32 uTxAttempts; u16 wFailureRatio; -} SNodeItem; +} __packed SNodeItem; typedef struct tagSNodeList { @@ -216,7 +215,7 @@ typedef struct tagSNodeList { u32 uItem; SNodeItem sNodeList[0]; -} SNodeList, *PSNodeList; +} __packed SNodeList, *PSNodeList; typedef struct tagSCmdLinkStatus { @@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus { u32 uChannel; u32 uLinkRate; -} SCmdLinkStatus, *PSCmdLinkStatus; +} __packed SCmdLinkStatus, *PSCmdLinkStatus; // // 802.11 counter @@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount { u32 ReceivedFragmentCount; u32 MulticastReceivedFrameCount; u32 FCSErrorCount; -} SDot11MIBCount, *PSDot11MIBCount; +} __packed SDot11MIBCount, *PSDot11MIBCount; @@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount { u32 ullTxBroadcastBytes[2]; u32 ullTxMulticastBytes[2]; u32 ullTxDirectedBytes[2]; -} SStatMIBCount, *PSStatMIBCount; +} __packed SStatMIBCount, *PSStatMIBCount; typedef struct tagSCmdValue { u32 dwValue; -} SCmdValue, *PSCmdValue; +} __packed SCmdValue, *PSCmdValue; // // hostapd & viawget ioctl related @@ -431,7 +430,7 @@ struct viawget_hostapd_param { u8 ssid[32]; } scan_req; } u; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h index 959c8868..2522ddec 100644 --- a/drivers/staging/vt6656/iowpa.h +++ b/drivers/staging/vt6656/iowpa.h @@ -67,12 +67,11 @@ enum { -#pragma pack(1) typedef struct viawget_wpa_header { u8 type; u16 req_ie_len; u16 resp_ie_len; -} viawget_wpa_header; +} __packed viawget_wpa_header; struct viawget_wpa_param { u32 cmd; @@ -113,9 +112,8 @@ struct viawget_wpa_param { u8 *buf; } scan_results; } u; -}; +} __packed; -#pragma pack(1) struct viawget_scan_result { u8 bssid[6]; u8 ssid[32]; @@ -130,7 +128,7 @@ struct viawget_scan_result { int noise; int level; int maxrate; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 087af197..9a084b8d 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -393,11 +393,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, tport = ft_tport_create(rdata->local_port); if (!tport) - return 0; /* not a target for this local port */ + goto not_target; /* not a target for this local port */ acl = ft_acl_get(tport->tpg, rdata); if (!acl) - return 0; + goto not_target; /* no target for this remote */ if (!rspp) goto fill; @@ -434,12 +434,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, /* * OR in our service parameters with other provider (initiator), if any. - * TBD XXX - indicate RETRY capability? */ fill: fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_RETRY; spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN); return FC_SPP_RESP_ACK; + +not_target: + fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_TARG_FCN; + spp->spp_params = htonl(fcp_parm); + return 0; } /** diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 53155252..5d0d4f61 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -551,6 +551,7 @@ static void ifx_port_shutdown(struct tty_port *port) container_of(port, struct ifx_spi_device, tty_port); mrdy_set_low(ifx_dev); + del_timer(&ifx_dev->spi_timer); clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); tasklet_kill(&ifx_dev->io_work_tasklet); } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 275d906e..29bcc46f 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1770,30 +1770,6 @@ int usb_set_configuration(struct usb_device *dev, int configuration) goto free_interfaces; } - dev->actconfig = cp; - if (cp) - usb_notify_config_device(dev); - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_SET_CONFIGURATION, 0, configuration, 0, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { - /* All the old state is gone, so what else can we do? - * The device is probably useless now anyway. - */ - dev->actconfig = cp = NULL; - } - - if (!cp) { - usb_notify_config_device(dev); - usb_set_device_state(dev, USB_STATE_ADDRESS); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - usb_autosuspend_device(dev); - goto free_interfaces; - } - mutex_unlock(hcd->bandwidth_mutex); - usb_set_device_state(dev, USB_STATE_CONFIGURED); - /* Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ @@ -1838,6 +1814,35 @@ int usb_set_configuration(struct usb_device *dev, int configuration) } kfree(new_interfaces); + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + USB_REQ_SET_CONFIGURATION, 0, configuration, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); + if (ret < 0 && cp) { + /* + * All the old state is gone, so what else can we do? + * The device is probably useless now anyway. + */ + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + for (i = 0; i < nintf; ++i) { + usb_disable_interface(dev, cp->interface[i], true); + put_device(&cp->interface[i]->dev); + cp->interface[i] = NULL; + } + cp = NULL; + } + + dev->actconfig = cp; + mutex_unlock(hcd->bandwidth_mutex); + + if (!cp) { + usb_set_device_state(dev, USB_STATE_ADDRESS); + + /* Leave LPM disabled while the device is unconfigured. */ + usb_autosuspend_device(dev); + return ret; + } + usb_set_device_state(dev, USB_STATE_CONFIGURED); + if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index a2b20fe8..af65322e 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -180,8 +180,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, struct xhci_segment *next; next = xhci_segment_alloc(xhci, flags); - if (!next) + if (!next) { + prev = ring->first_seg; + while (prev) { + next = prev->next; + xhci_segment_free(xhci, prev); + prev = next; + } goto fail; + } xhci_link_segments(xhci, prev, next, link_trbs, isoc); prev = next; @@ -201,7 +208,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, return ring; fail: - xhci_ring_free(xhci, ring); + kfree(ring); return NULL; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index f233bbbf..9db3e239 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -449,6 +449,10 @@ static void option_instat_callback(struct urb *urb); #define PETATEL_VENDOR_ID 0x1ff4 #define PETATEL_PRODUCT_NP10T 0x600e +/* TP-LINK Incorporated products */ +#define TPLINK_VENDOR_ID 0x2357 +#define TPLINK_PRODUCT_MA180 0x0201 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -930,7 +934,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, @@ -1311,6 +1316,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, + { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 1dbf758c..7e56946a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2199,6 +2199,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) index = mpd->first_page; end = mpd->next_page - 1; + + pagevec_init(&pvec, 0); while (index <= end) { nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); if (nr_pages == 0) From e1423877573834ba24d5cd1d41a48b327e25abaf Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 08:28:41 -0500 Subject: [PATCH 055/117] Linux 3.0.61 --- Makefile | 2 +- arch/x86/include/asm/traps.h | 26 ++++++++ drivers/acpi/processor_idle.c | 3 + drivers/ata/ahci.c | 6 ++ drivers/dma/ioat/dma_v3.c | 2 +- drivers/firmware/dmi_scan.c | 78 +++++++++++++++++----- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 21 ++++++ drivers/gpu/drm/i915/i915_reg.h | 3 + drivers/gpu/drm/i915/intel_display.c | 4 ++ drivers/misc/sgi-xp/xpc_main.c | 34 +++++++++- drivers/pci/pcie/aspm.c | 3 + drivers/scsi/sd.c | 13 ++-- drivers/staging/usbip/usbip_common.c | 11 ++- drivers/staging/usbip/usbip_common.h | 2 +- drivers/staging/usbip/vhci_rx.c | 3 +- drivers/tty/serial/8250.c | 2 +- drivers/usb/host/uhci-hcd.c | 15 +++-- include/linux/pci_ids.h | 2 + kernel/trace/ftrace.c | 2 +- 19 files changed, 188 insertions(+), 44 deletions(-) diff --git a/Makefile b/Makefile index 2241b917..6a46e917 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 60 +SUBLEVEL = 61 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 0310da67..1d449037 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_TRAPS_H #define _ASM_X86_TRAPS_H +#include #include #include /* TRAP_TRACE, ... */ @@ -87,4 +88,29 @@ asmlinkage void smp_thermal_interrupt(void); asmlinkage void mce_threshold_interrupt(void); #endif +/* Interrupts/Exceptions */ +enum { + X86_TRAP_DE = 0, /* 0, Divide-by-zero */ + X86_TRAP_DB, /* 1, Debug */ + X86_TRAP_NMI, /* 2, Non-maskable Interrupt */ + X86_TRAP_BP, /* 3, Breakpoint */ + X86_TRAP_OF, /* 4, Overflow */ + X86_TRAP_BR, /* 5, Bound Range Exceeded */ + X86_TRAP_UD, /* 6, Invalid Opcode */ + X86_TRAP_NM, /* 7, Device Not Available */ + X86_TRAP_DF, /* 8, Double Fault */ + X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */ + X86_TRAP_TS, /* 10, Invalid TSS */ + X86_TRAP_NP, /* 11, Segment Not Present */ + X86_TRAP_SS, /* 12, Stack Segment Fault */ + X86_TRAP_GP, /* 13, General Protection Fault */ + X86_TRAP_PF, /* 14, Page Fault */ + X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */ + X86_TRAP_MF, /* 16, x87 Floating-Point Exception */ + X86_TRAP_AC, /* 17, Alignment Check */ + X86_TRAP_MC, /* 18, Machine Check */ + X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */ + X86_TRAP_IRET = 32, /* 32, IRET Exception */ +}; + #endif /* _ASM_X86_TRAPS_H */ diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 431ab11c..65976cb7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -991,6 +991,9 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) return -EINVAL; } + if (!dev) + return -EINVAL; + dev->cpu = pr->id; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { dev->states[i].name[0] = '\0'; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 83002506..75a8d0f2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -402,6 +402,12 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ + /* Asmedia */ + { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d845dc4b..6e339269 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -949,7 +949,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } } - dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); /* skip validate if the capability is not present */ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 02a52d13..66b63156 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -16,6 +16,7 @@ */ static char dmi_empty_string[] = " "; +static u16 __initdata dmi_ver; /* * Catch too early calls to dmi_check_system(): */ @@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, return 0; } -static int __init dmi_checksum(const u8 *buf) +static int __init dmi_checksum(const u8 *buf, u8 len) { u8 sum = 0; int a; - for (a = 0; a < 15; a++) + for (a = 0; a < len; a++) sum += buf[a]; return sum == 0; @@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde return; for (i = 0; i < 16 && (is_ff || is_00); i++) { - if(d[i] != 0x00) is_ff = 0; - if(d[i] != 0xFF) is_00 = 0; + if (d[i] != 0x00) + is_00 = 0; + if (d[i] != 0xFF) + is_ff = 0; } if (is_ff || is_00) @@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde if (!s) return; - sprintf(s, "%pUB", d); + /* + * As of version 2.6 of the SMBIOS specification, the first 3 fields of + * the UUID are supposed to be little-endian encoded. The specification + * says that this is the defacto standard. + */ + if (dmi_ver >= 0x0206) + sprintf(s, "%pUL", d); + else + sprintf(s, "%pUB", d); dmi_ident[slot] = s; } @@ -404,29 +415,57 @@ static int __init dmi_present(const char __iomem *p) u8 buf[15]; memcpy_fromio(buf, p, 15); - if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { + if (dmi_checksum(buf, 15)) { dmi_num = (buf[13] << 8) | buf[12]; dmi_len = (buf[7] << 8) | buf[6]; dmi_base = (buf[11] << 24) | (buf[10] << 16) | (buf[9] << 8) | buf[8]; - /* - * DMI version 0.0 means that the real version is taken from - * the SMBIOS version, which we don't know at this point. - */ - if (buf[14] != 0) - printk(KERN_INFO "DMI %d.%d present.\n", - buf[14] >> 4, buf[14] & 0xF); - else - printk(KERN_INFO "DMI present.\n"); if (dmi_walk_early(dmi_decode) == 0) { + if (dmi_ver) + pr_info("SMBIOS %d.%d present.\n", + dmi_ver >> 8, dmi_ver & 0xFF); + else { + dmi_ver = (buf[14] & 0xF0) << 4 | + (buf[14] & 0x0F); + pr_info("Legacy DMI %d.%d present.\n", + dmi_ver >> 8, dmi_ver & 0xFF); + } dmi_dump_ids(); return 0; } } + dmi_ver = 0; return 1; } +static int __init smbios_present(const char __iomem *p) +{ + u8 buf[32]; + int offset = 0; + + memcpy_fromio(buf, p, 32); + if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) { + dmi_ver = (buf[6] << 8) + buf[7]; + + /* Some BIOS report weird SMBIOS version, fix that up */ + switch (dmi_ver) { + case 0x021F: + case 0x0221: + pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", + dmi_ver & 0xFF, 3); + dmi_ver = 0x0203; + break; + case 0x0233: + pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6); + dmi_ver = 0x0206; + break; + } + offset = 16; + } + return dmi_present(buf + offset); +} + void __init dmi_scan_machine(void) { char __iomem *p, *q; @@ -444,7 +483,7 @@ void __init dmi_scan_machine(void) if (p == NULL) goto error; - rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ + rc = smbios_present(p); dmi_iounmap(p, 32); if (!rc) { dmi_available = 1; @@ -462,7 +501,12 @@ void __init dmi_scan_machine(void) goto error; for (q = p; q < p + 0x10000; q += 16) { - rc = dmi_present(q); + if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0) + rc = smbios_present(q); + else if (memcmp(q, "_DMI_", 5) == 0) + rc = dmi_present(q); + else + continue; if (!rc) { dmi_available = 1; dmi_iounmap(p, 0x10000); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index bc927ae3..490ab6b1 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -655,6 +655,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, total = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; + u64 invalid_offset = (u64)-1; + int j; user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; @@ -665,6 +667,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } + /* As we do not update the known relocation offsets after + * relocating (due to the complexities in lock handling), + * we need to mark them as invalid now so that we force the + * relocation processing next time. Just in case the target + * object is evicted and then rebound into its old + * presumed_offset before the next execbuffer - if that + * happened we would make the mistake of assuming that the + * relocations were valid. + */ + for (j = 0; j < exec[i].relocation_count; j++) { + if (copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + } + reloc_offset[i] = total; total += exec[i].relocation_count; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8fa4f7bc..5dc3b6d3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -27,6 +27,8 @@ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. @@ -354,6 +356,7 @@ * the enables for writing to the corresponding low bit. */ #define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) #define _3D_CHICKEN2 0x0208c /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d1dca923..b4f4d12e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7412,6 +7412,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); + /* WaDisableHiZPlanesWhenMSAAEnabled */ + I915_WRITE(_3D_CHICKEN, + _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); + I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 8d082b46..d9718171 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -53,6 +53,10 @@ #include #include "xpc.h" +#ifdef CONFIG_X86_64 +#include +#endif + /* define two XPC debug device structures to be used with dev_dbg() et al */ struct device_driver xpc_dbg_name = { @@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) return NOTIFY_DONE; } +/* Used to only allow one cpu to complete disconnect */ +static unsigned int xpc_die_disconnecting; + /* * Notify other partitions to deactivate from us by first disengaging from all * references to our memory. @@ -1092,6 +1099,9 @@ xpc_die_deactivate(void) long keep_waiting; long wait_to_print; + if (cmpxchg(&xpc_die_disconnecting, 0, 1)) + return; + /* keep xpc_hb_checker thread from doing anything (just in case) */ xpc_exiting = 1; @@ -1159,7 +1169,7 @@ xpc_die_deactivate(void) * about the lack of a heartbeat. */ static int -xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) +xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) { #ifdef CONFIG_IA64 /* !!! temporary kludge */ switch (event) { @@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) break; } #else - xpc_die_deactivate(); + struct die_args *die_args = _die_args; + + switch (event) { + case DIE_TRAP: + if (die_args->trapnr == X86_TRAP_DF) + xpc_die_deactivate(); + + if (((die_args->trapnr == X86_TRAP_MF) || + (die_args->trapnr == X86_TRAP_XF)) && + !user_mode_vm(die_args->regs)) + xpc_die_deactivate(); + + break; + case DIE_INT3: + case DIE_DEBUG: + break; + case DIE_OOPS: + case DIE_GPF: + default: + xpc_die_deactivate(); + } #endif return NOTIFY_DONE; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 0ff0182f..9b9305ae 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -790,6 +790,9 @@ void pcie_clear_aspm(struct pci_bus *bus) { struct pci_dev *child; + if (aspm_force) + return; + /* * Clear any ASPM setup that the firmware has carried out on this bus */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7d8b5d8d..515ec6d6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2826,10 +2826,6 @@ static int __init init_sd(void) if (err) goto err_out; - err = scsi_register_driver(&sd_template.gendrv); - if (err) - goto err_out_class; - sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE, 0, 0, NULL); if (!sd_cdb_cache) { @@ -2843,8 +2839,15 @@ static int __init init_sd(void) goto err_out_cache; } + err = scsi_register_driver(&sd_template.gendrv); + if (err) + goto err_out_driver; + return 0; +err_out_driver: + mempool_destroy(sd_cdb_pool); + err_out_cache: kmem_cache_destroy(sd_cdb_cache); @@ -2867,10 +2870,10 @@ static void __exit exit_sd(void) SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); + scsi_unregister_driver(&sd_template.gendrv); mempool_destroy(sd_cdb_pool); kmem_cache_destroy(sd_cdb_cache); - scsi_unregister_driver(&sd_template.gendrv); class_unregister(&sd_disk_class); for (i = 0; i < SD_MAJORS; i++) diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c index 433a3b62..1547cf23 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/staging/usbip/usbip_common.c @@ -761,26 +761,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso); * buffer and iso packets need to be stored and be in propeper endian in urb * before calling this function */ -int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) +void usbip_pad_iso(struct usbip_device *ud, struct urb *urb) { int np = urb->number_of_packets; int i; - int ret; int actualoffset = urb->actual_length; if (!usb_pipeisoc(urb->pipe)) - return 0; + return; /* if no packets or length of data is 0, then nothing to unpack */ if (np == 0 || urb->actual_length == 0) - return 0; + return; /* * if actual_length is transfer_buffer_length then no padding is * present. */ if (urb->actual_length == urb->transfer_buffer_length) - return 0; + return; /* * loop over all packets from last to first (to prevent overwritting @@ -792,8 +791,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) urb->transfer_buffer + actualoffset, urb->iso_frame_desc[i].actual_length); } - - return ret; } EXPORT_SYMBOL_GPL(usbip_pad_iso); diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h index 4a641c55..072743ec 100644 --- a/drivers/staging/usbip/usbip_common.h +++ b/drivers/staging/usbip/usbip_common.h @@ -327,7 +327,7 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); /* some members of urb must be substituted before. */ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); /* some members of urb must be substituted before. */ -int usbip_pad_iso(struct usbip_device *ud, struct urb *urb); +void usbip_pad_iso(struct usbip_device *ud, struct urb *urb); void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); /* usbip_event.c */ diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c index 5c4b5d94..c851433f 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/staging/usbip/vhci_rx.c @@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, return; /* restore the padding in iso packets */ - if (usbip_pad_iso(ud, urb) < 0) - return; + usbip_pad_iso(ud, urb); if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c index 762ce722..7f50999e 100644 --- a/drivers/tty/serial/8250.c +++ b/drivers/tty/serial/8250.c @@ -81,7 +81,7 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ #define DEBUG_INTR(fmt...) do { } while (0) #endif -#define PASS_LIMIT 256 +#define PASS_LIMIT 512 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index fba99b12..18cd76b7 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -446,6 +446,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) return IRQ_NONE; uhci_writew(uhci, status, USBSTS); /* Clear it */ + spin_lock(&uhci->lock); + if (unlikely(!uhci->is_initialized)) /* not yet configured */ + goto done; + if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { if (status & USBSTS_HSE) dev_err(uhci_dev(uhci), "host system error, " @@ -454,7 +458,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) dev_err(uhci_dev(uhci), "host controller process " "error, something bad happened!\n"); if (status & USBSTS_HCH) { - spin_lock(&uhci->lock); if (uhci->rh_state >= UHCI_RH_RUNNING) { dev_err(uhci_dev(uhci), "host controller halted, " @@ -472,15 +475,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) * pending unlinks */ mod_timer(&hcd->rh_timer, jiffies); } - spin_unlock(&uhci->lock); } } - if (status & USBSTS_RD) + if (status & USBSTS_RD) { + spin_unlock(&uhci->lock); usb_hcd_poll_rh_status(hcd); - else { - spin_lock(&uhci->lock); + } else { uhci_scan_schedule(uhci); + done: spin_unlock(&uhci->lock); } @@ -658,9 +661,9 @@ static int uhci_start(struct usb_hcd *hcd) */ mb(); + spin_lock_irq(&uhci->lock); configure_hc(uhci); uhci->is_initialized = 1; - spin_lock_irq(&uhci->lock); start_rh(uhci); spin_unlock_irq(&uhci->lock); return 0; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f269c03c..4c1a75fa 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2390,6 +2390,8 @@ #define PCI_VENDOR_ID_AZWAVE 0x1a3b +#define PCI_VENDOR_ID_ASMEDIA 0x1b21 + #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f88ea18d..e96eee3a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3460,7 +3460,7 @@ static int ftrace_module_notify(struct notifier_block *self, struct notifier_block ftrace_module_nb = { .notifier_call = ftrace_module_notify, - .priority = 0, + .priority = INT_MAX, /* Run before anything that can use kprobes */ }; extern unsigned long __start_mcount_loc[]; From 3cbb39bac7d0d115698bedd90bc836f7bfb9479e Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 29 Jan 2013 09:40:56 -0500 Subject: [PATCH 056/117] Update version --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6a46e917..4029ee93 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 0 SUBLEVEL = 61 -EXTRAVERSION = +EXTRAVERSION = -Ermagerd-13.01.30 NAME = Sneaky Weasel # *DOCUMENTATION* From 4d17f53df03cfaf274c94e8dae3cf0c2e3d8d035 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Mon, 19 Mar 2012 17:11:57 -0700 Subject: [PATCH 057/117] lib/memcopy: use glibc version the kernel's memcpy and memmove is very inefficient. But the glibc version is quite fast, in some cases it is 10 times faster than the kernel version. So I introduce some memory copy macros and functions of the glibc to improve the kernel version's performance. The strategy of the memory functions is: 1. Copy bytes until the destination pointer is aligned. 2. Copy words in unrolled loops. If the source and destination are not aligned in the same way, use word memory operations, but shift and merge two read words before writing. 3. Copy the few remaining bytes. Signed-off-by: Miao Xie --- include/linux/memcopy.h | 226 ++++++++++++++++++++++ lib/Makefile | 3 +- lib/memcopy.c | 403 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 631 insertions(+), 1 deletion(-) create mode 100644 include/linux/memcopy.h create mode 100644 lib/memcopy.c diff --git a/include/linux/memcopy.h b/include/linux/memcopy.h new file mode 100644 index 00000000..a7b15739 --- /dev/null +++ b/include/linux/memcopy.h @@ -0,0 +1,226 @@ +/* + * memcopy.h -- definitions for memory copy functions. Generic C version. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2.1 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * The code is derived from the GNU C Library. + * Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc. + */ +#ifndef _LINUX_MEMCOPY_H_ +#define _LINUX_MEMCOPY_H_ + +/* + * The strategy of the memory functions is: + * + * 1. Copy bytes until the destination pointer is aligned. + * + * 2. Copy words in unrolled loops. If the source and destination + * are not aligned in the same way, use word memory operations, + * but shift and merge two read words before writing. + * + * 3. Copy the few remaining bytes. + * + * This is fast on processors that have at least 10 registers for + * allocation by GCC, and that can access memory at reg+const in one + * instruction. + */ + +#include +#include +#include + +/* + * The macros defined in this file are: + * + * BYTE_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy) + * + * BYTE_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy) + * + * WORD_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_remaining, nbytes_to_copy) + * + * WORD_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_remaining, nbytes_to_copy) + * + * MERGE(old_word, sh_1, new_word, sh_2) + * + * MEM_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy) + * + * MEM_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy) + */ + +#define OP_T_THRESHOLD 16 + +/* + * Type to use for aligned memory operations. + * This should normally be the biggest type supported by a single load + * and store. + */ +#define op_t unsigned long int +#define OPSIZ (sizeof(op_t)) + +/* Type to use for unaligned operations. */ +typedef unsigned char byte; + +#ifndef MERGE +# ifdef __LITTLE_ENDIAN +# define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2))) +# elif defined(__BIG_ENDIAN) +# define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2))) +# else +# error "Macro MERGE() hasn't defined!" +# endif +#endif + +/* + * Copy exactly NBYTES bytes from SRC_BP to DST_BP, + * without any assumptions about alignment of the pointers. + */ +#ifndef BYTE_COPY_FWD +#define BYTE_COPY_FWD(dst_bp, src_bp, nbytes) \ +do { \ + size_t __nbytes = (nbytes); \ + while (__nbytes > 0) { \ + byte __x = ((byte *) src_bp)[0]; \ + src_bp += 1; \ + __nbytes -= 1; \ + ((byte *) dst_bp)[0] = __x; \ + dst_bp += 1; \ + } \ +} while (0) +#endif + +/* + * Copy exactly NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR, + * beginning at the bytes right before the pointers and continuing towards + * smaller addresses. Don't assume anything about alignment of the + * pointers. + */ +#ifndef BYTE_COPY_BWD +#define BYTE_COPY_BWD(dst_ep, src_ep, nbytes) \ +do { \ + size_t __nbytes = (nbytes); \ + while (__nbytes > 0) { \ + byte __x; \ + src_ep -= 1; \ + __x = ((byte *) src_ep)[0]; \ + dst_ep -= 1; \ + __nbytes -= 1; \ + ((byte *) dst_ep)[0] = __x; \ + } \ +} while (0) +#endif +/* + * Copy *up to* NBYTES bytes from SRC_BP to DST_BP, with + * the assumption that DST_BP is aligned on an OPSIZ multiple. If + * not all bytes could be easily copied, store remaining number of bytes + * in NBYTES_LEFT, otherwise store 0. + */ +extern void _wordcopy_fwd_aligned(long int, long int, size_t); +extern void _wordcopy_fwd_dest_aligned(long int, long int, size_t); +#ifndef WORD_COPY_FWD +#define WORD_COPY_FWD(dst_bp, src_bp, nbytes_left, nbytes) \ +do { \ + if (src_bp % OPSIZ == 0) \ + _wordcopy_fwd_aligned (dst_bp, src_bp, (nbytes) / OPSIZ); \ + else \ + _wordcopy_fwd_dest_aligned (dst_bp, src_bp, (nbytes) / OPSIZ);\ + \ + src_bp += (nbytes) & -OPSIZ; \ + dst_bp += (nbytes) & -OPSIZ; \ + (nbytes_left) = (nbytes) % OPSIZ; \ +} while (0) +#endif + +/* + * Copy *up to* NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR, + * beginning at the words (of type op_t) right before the pointers and + * continuing towards smaller addresses. May take advantage of that + * DST_END_PTR is aligned on an OPSIZ multiple. If not all bytes could be + * easily copied, store remaining number of bytes in NBYTES_REMAINING, + * otherwise store 0. + */ +extern void _wordcopy_bwd_aligned(long int, long int, size_t); +extern void _wordcopy_bwd_dest_aligned(long int, long int, size_t); +#ifndef WORD_COPY_BWD +#define WORD_COPY_BWD(dst_ep, src_ep, nbytes_left, nbytes) \ +do { \ + if (src_ep % OPSIZ == 0) \ + _wordcopy_bwd_aligned (dst_ep, src_ep, (nbytes) / OPSIZ); \ + else \ + _wordcopy_bwd_dest_aligned (dst_ep, src_ep, (nbytes) / OPSIZ);\ + \ + src_ep -= (nbytes) & -OPSIZ; \ + dst_ep -= (nbytes) & -OPSIZ; \ + (nbytes_left) = (nbytes) % OPSIZ; \ +} while (0) +#endif + +/* Copy memory from the beginning to the end */ +#ifndef MEM_COPY_FWD +static __always_inline void mem_copy_fwd(unsigned long dstp, + unsigned long srcp, + size_t count) +{ + /* If there not too few bytes to copy, use word copy. */ + if (count >= OP_T_THRESHOLD) { + /* Copy just a few bytes to make dstp aligned. */ + count -= (-dstp) % OPSIZ; + BYTE_COPY_FWD(dstp, srcp, (-dstp) % OPSIZ); + + /* + * Copy from srcp to dstp taking advantage of the known + * alignment of dstp. Number if bytes remaining is put in + * the third argument. + */ + WORD_COPY_FWD(dstp, srcp, count, count); + + /* Fall out and copy the tail. */ + } + + /* There are just a few bytes to copy. Use byte memory operations. */ + BYTE_COPY_FWD(dstp, srcp, count); +} +#endif + +/* Copy memory from the end to the beginning. */ +#ifndef MEM_COPY_BWD +static __always_inline void mem_copy_bwd(unsigned long dstp, + unsigned long srcp, + size_t count) +{ + srcp += count; + dstp += count; + + /* If there not too few bytes to copy, use word copy. */ + if (count >= OP_T_THRESHOLD) { + /* Copy just a few bytes to make dstp aligned. */ + count -= dstp % OPSIZ; + BYTE_COPY_BWD(dstp, srcp, dstp % OPSIZ); + + /* + * Copy from srcp to dstp taking advantage of the known + * alignment of dstp. Number if bytes remaining is put in + * the third argument. + */ + WORD_COPY_BWD(dstp, srcp, count, count); + + /* Fall out and copy the tail. */ + } + + /* There are just a few bytes to copy. Use byte memory operations. */ + BYTE_COPY_BWD (dstp, srcp, count); +} +#endif + +#endif diff --git a/lib/Makefile b/lib/Makefile index 4a93f551..d6f14e51 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -12,7 +12,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o prio_heap.o ratelimit.o show_mem.o \ - is_single_threaded.o plist.o decompress.o find_next_bit.o memory_alloc.o + is_single_threaded.o plist.o decompress.o find_next_bit.o memory_alloc.o \ + memcopy.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o diff --git a/lib/memcopy.c b/lib/memcopy.c new file mode 100644 index 00000000..d2d3376f --- /dev/null +++ b/lib/memcopy.c @@ -0,0 +1,403 @@ +/* + * memcopy.c -- subroutines for memory copy functions. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2.1 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * The code is derived from the GNU C Library. + * Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc. + */ + +/* BE VERY CAREFUL IF YOU CHANGE THIS CODE...! */ + +#include + +/* + * _wordcopy_fwd_aligned -- Copy block beginning at SRCP to block beginning + * at DSTP with LEN `op_t' words (not LEN bytes!). + * Both SRCP and DSTP should be aligned for memory operations on `op_t's. + */ +void _wordcopy_fwd_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1; + + switch (len % 8) { + case 2: + a0 = ((op_t *) srcp)[0]; + srcp -= 6 * OPSIZ; + dstp -= 7 * OPSIZ; + len += 6; + goto do1; + case 3: + a1 = ((op_t *) srcp)[0]; + srcp -= 5 * OPSIZ; + dstp -= 6 * OPSIZ; + len += 5; + goto do2; + case 4: + a0 = ((op_t *) srcp)[0]; + srcp -= 4 * OPSIZ; + dstp -= 5 * OPSIZ; + len += 4; + goto do3; + case 5: + a1 = ((op_t *) srcp)[0]; + srcp -= 3 * OPSIZ; + dstp -= 4 * OPSIZ; + len += 3; + goto do4; + case 6: + a0 = ((op_t *) srcp)[0]; + srcp -= 2 * OPSIZ; + dstp -= 3 * OPSIZ; + len += 2; + goto do5; + case 7: + a1 = ((op_t *) srcp)[0]; + srcp -= 1 * OPSIZ; + dstp -= 2 * OPSIZ; + len += 1; + goto do6; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + a0 = ((op_t *) srcp)[0]; + srcp -= 0 * OPSIZ; + dstp -= 1 * OPSIZ; + goto do7; + case 1: + a1 = ((op_t *) srcp)[0]; + srcp -=-1 * OPSIZ; + dstp -= 0 * OPSIZ; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do8; /* No-op. */ + } + + do { +do8: + a0 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = a1; +do7: + a1 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = a0; +do6: + a0 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = a1; +do5: + a1 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = a0; +do4: + a0 = ((op_t *) srcp)[4]; + ((op_t *) dstp)[4] = a1; +do3: + a1 = ((op_t *) srcp)[5]; + ((op_t *) dstp)[5] = a0; +do2: + a0 = ((op_t *) srcp)[6]; + ((op_t *) dstp)[6] = a1; +do1: + a1 = ((op_t *) srcp)[7]; + ((op_t *) dstp)[7] = a0; + + srcp += 8 * OPSIZ; + dstp += 8 * OPSIZ; + len -= 8; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[0] = a1; +} + +/* + * _wordcopy_fwd_dest_aligned -- Copy block beginning at SRCP to block + * beginning at DSTP with LEN `op_t' words (not LEN bytes!). DSTP should + * be aligned for memory operations on `op_t's, but SRCP must *not* be aligned. + */ + +void _wordcopy_fwd_dest_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1, a2, a3; + int sh_1, sh_2; + + /* + * Calculate how to shift a word read at the memory operation aligned + * srcp to make it aligned for copy. + */ + sh_1 = 8 * (srcp % OPSIZ); + sh_2 = 8 * OPSIZ - sh_1; + + /* + * Make SRCP aligned by rounding it down to the beginning of the `op_t' + * it points in the middle of. + */ + srcp &= -OPSIZ; + + switch (len % 4) { + case 2: + a1 = ((op_t *) srcp)[0]; + a2 = ((op_t *) srcp)[1]; + srcp -= 1 * OPSIZ; + dstp -= 3 * OPSIZ; + len += 2; + goto do1; + case 3: + a0 = ((op_t *) srcp)[0]; + a1 = ((op_t *) srcp)[1]; + srcp -= 0 * OPSIZ; + dstp -= 2 * OPSIZ; + len += 1; + goto do2; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + a3 = ((op_t *) srcp)[0]; + a0 = ((op_t *) srcp)[1]; + srcp -=-1 * OPSIZ; + dstp -= 1 * OPSIZ; + len += 0; + goto do3; + case 1: + a2 = ((op_t *) srcp)[0]; + a3 = ((op_t *) srcp)[1]; + srcp -=-2 * OPSIZ; + dstp -= 0 * OPSIZ; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do4; /* No-op. */ + } + + do { +do4: + a0 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2); +do3: + a1 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = MERGE (a3, sh_1, a0, sh_2); +do2: + a2 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = MERGE (a0, sh_1, a1, sh_2); +do1: + a3 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = MERGE (a1, sh_1, a2, sh_2); + + srcp += 4 * OPSIZ; + dstp += 4 * OPSIZ; + len -= 4; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2); +} + +/* + * _wordcopy_bwd_aligned -- Copy block finishing right before + * SRCP to block finishing right before DSTP with LEN `op_t' words (not LEN + * bytes!). Both SRCP and DSTP should be aligned for memory operations + * on `op_t's. + */ +void _wordcopy_bwd_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1; + + switch (len % 8) { + case 2: + srcp -= 2 * OPSIZ; + dstp -= 1 * OPSIZ; + a0 = ((op_t *) srcp)[1]; + len += 6; + goto do1; + case 3: + srcp -= 3 * OPSIZ; + dstp -= 2 * OPSIZ; + a1 = ((op_t *) srcp)[2]; + len += 5; + goto do2; + case 4: + srcp -= 4 * OPSIZ; + dstp -= 3 * OPSIZ; + a0 = ((op_t *) srcp)[3]; + len += 4; + goto do3; + case 5: + srcp -= 5 * OPSIZ; + dstp -= 4 * OPSIZ; + a1 = ((op_t *) srcp)[4]; + len += 3; + goto do4; + case 6: + srcp -= 6 * OPSIZ; + dstp -= 5 * OPSIZ; + a0 = ((op_t *) srcp)[5]; + len += 2; + goto do5; + case 7: + srcp -= 7 * OPSIZ; + dstp -= 6 * OPSIZ; + a1 = ((op_t *) srcp)[6]; + len += 1; + goto do6; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + srcp -= 8 * OPSIZ; + dstp -= 7 * OPSIZ; + a0 = ((op_t *) srcp)[7]; + goto do7; + case 1: + srcp -= 9 * OPSIZ; + dstp -= 8 * OPSIZ; + a1 = ((op_t *) srcp)[8]; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do8; /* No-op. */ + } + + do { +do8: + a0 = ((op_t *) srcp)[7]; + ((op_t *) dstp)[7] = a1; +do7: + a1 = ((op_t *) srcp)[6]; + ((op_t *) dstp)[6] = a0; +do6: + a0 = ((op_t *) srcp)[5]; + ((op_t *) dstp)[5] = a1; +do5: + a1 = ((op_t *) srcp)[4]; + ((op_t *) dstp)[4] = a0; +do4: + a0 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = a1; +do3: + a1 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = a0; +do2: + a0 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = a1; +do1: + a1 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = a0; + + srcp -= 8 * OPSIZ; + dstp -= 8 * OPSIZ; + len -= 8; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[7] = a1; +} + +/* + * _wordcopy_bwd_dest_aligned -- Copy block finishing right before SRCP to + * block finishing right before DSTP with LEN `op_t' words (not LEN bytes!). + * DSTP should be aligned for memory operations on `op_t', but SRCP must *not* + * be aligned. + */ +void _wordcopy_bwd_dest_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1, a2, a3; + int sh_1, sh_2; + + /* + * Calculate how to shift a word read at the memory operation aligned + * srcp to make it aligned for copy. + */ + + sh_1 = 8 * (srcp % OPSIZ); + sh_2 = 8 * OPSIZ - sh_1; + + /* + * Make srcp aligned by rounding it down to the beginning of the op_t + * it points in the middle of. + */ + srcp &= -OPSIZ; + srcp += OPSIZ; + + switch (len % 4) { + case 2: + srcp -= 3 * OPSIZ; + dstp -= 1 * OPSIZ; + a2 = ((op_t *) srcp)[2]; + a1 = ((op_t *) srcp)[1]; + len += 2; + goto do1; + case 3: + srcp -= 4 * OPSIZ; + dstp -= 2 * OPSIZ; + a3 = ((op_t *) srcp)[3]; + a2 = ((op_t *) srcp)[2]; + len += 1; + goto do2; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + srcp -= 5 * OPSIZ; + dstp -= 3 * OPSIZ; + a0 = ((op_t *) srcp)[4]; + a3 = ((op_t *) srcp)[3]; + goto do3; + case 1: + srcp -= 6 * OPSIZ; + dstp -= 4 * OPSIZ; + a1 = ((op_t *) srcp)[5]; + a0 = ((op_t *) srcp)[4]; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do4; /* No-op. */ + } + + do { +do4: + a3 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2); +do3: + a2 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = MERGE (a3, sh_1, a0, sh_2); +do2: + a1 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = MERGE (a2, sh_1, a3, sh_2); +do1: + a0 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = MERGE (a1, sh_1, a2, sh_2); + + srcp -= 4 * OPSIZ; + dstp -= 4 * OPSIZ; + len -= 4; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2); +} + From 29631e92da8713c912ec26e823e2bb8fc6a77414 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Mon, 19 Mar 2012 17:14:04 -0700 Subject: [PATCH 058/117] lib/string: use glibc version the performance of memcpy and memmove of the general version is very inefficient, this patch improved them. Signed-off-by: Miao Xie --- lib/string.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/lib/string.c b/lib/string.c index 01fad9b2..ed166883 100644 --- a/lib/string.c +++ b/lib/string.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifndef __HAVE_ARCH_STRNICMP /** @@ -596,11 +597,11 @@ EXPORT_SYMBOL(memset); */ void *memcpy(void *dest, const void *src, size_t count) { - char *tmp = dest; - const char *s = src; + unsigned long dstp = (unsigned long)dest; + unsigned long srcp = (unsigned long)src; - while (count--) - *tmp++ = *s++; + /* Copy from the beginning to the end */ + mem_copy_fwd(dstp, srcp, count); return dest; } EXPORT_SYMBOL(memcpy); @@ -617,21 +618,15 @@ EXPORT_SYMBOL(memcpy); */ void *memmove(void *dest, const void *src, size_t count) { - char *tmp; - const char *s; + unsigned long dstp = (unsigned long)dest; + unsigned long srcp = (unsigned long)src; - if (dest <= src) { - tmp = dest; - s = src; - while (count--) - *tmp++ = *s++; + if (dest - src >= count) { + /* Copy from the beginning to the end */ + mem_copy_fwd(dstp, srcp, count); } else { - tmp = dest; - tmp += count; - s = src; - s += count; - while (count--) - *--tmp = *--s; + /* Copy from the end to the beginning */ + mem_copy_bwd(dstp, srcp, count); } return dest; } From 2b7d4f7ee2e0c4dd0088bbadd129645b70a79761 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Fri, 13 Apr 2012 23:26:42 +0200 Subject: [PATCH 059/117] [patch] iosched: prevent aliased requests from starving other I/O http://lkml.indiana.edu/hypermail/linux/kernel/1106.0/00548.html Hi, Jens, If you recall, I posted an RFC patch for this back in July of last year: http://lkml.org/lkml/2010/7/13/279 The basic problem is that a process can issue a never-ending stream of async direct I/Os to the same sector on a device, thus starving out other I/O in the system (due to the way the alias handling works in both cfq and deadline). The solution I proposed back then was to start dispatching from the fifo after a certain number of aliases had been dispatched. Vivek asked why we had to treat aliases differently at all, and I never had a good answer. So, I put together a simple patch which allows aliases to be added to the rb tree (it adds them to the right, though that doesn't matter as the order isn't guaranteed anyway). I think this is the preferred solution, as it doesn't break up time slices in CFQ or batches in deadline. I've tested it, and it does solve the starvation issue. Let me know what you think. Cheers, Jeff --- block/cfq-iosched.c | 9 ++------- block/deadline-iosched.c | 4 +--- block/elevator.c | 7 ++----- include/linux/elevator.h | 2 +- 4 files changed, 6 insertions(+), 16 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index fc4c0a69..3627bd0b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1503,16 +1503,11 @@ static void cfq_add_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; - struct request *__alias, *prev; + struct request *prev; cfqq->queued[rq_is_sync(rq)]++; - /* - * looks a little odd, but the first insert might return an alias. - * if that happens, put the alias on the dispatch list - */ - while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) - cfq_dispatch_insert(cfqd->queue, __alias); + elv_rb_add(&cfqq->sort_list, rq); if (!cfq_cfqq_on_rr(cfqq)) cfq_add_cfqq_rr(cfqd, cfqq); diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 5139c0ea..c644137d 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -77,10 +77,8 @@ static void deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) { struct rb_root *root = deadline_rb_root(dd, rq); - struct request *__alias; - while (unlikely(__alias = elv_rb_add(root, rq))) - deadline_move_request(dd, __alias); + elv_rb_add(root, rq); } static inline void diff --git a/block/elevator.c b/block/elevator.c index b0b38ce0..a3b64bc7 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -353,7 +353,7 @@ static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) * RB-tree support functions for inserting/lookup/removal of requests * in a sorted RB tree. */ -struct request *elv_rb_add(struct rb_root *root, struct request *rq) +void elv_rb_add(struct rb_root *root, struct request *rq) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; @@ -365,15 +365,12 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq) if (blk_rq_pos(rq) < blk_rq_pos(__rq)) p = &(*p)->rb_left; - else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) + else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) p = &(*p)->rb_right; - else - return __rq; } rb_link_node(&rq->rb_node, parent, p); rb_insert_color(&rq->rb_node, root); - return NULL; } EXPORT_SYMBOL(elv_rb_add); diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 21a8ebf2..d800d514 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -146,7 +146,7 @@ extern struct request *elv_rb_latter_request(struct request_queue *, struct requ /* * rb support functions. */ -extern struct request *elv_rb_add(struct rb_root *, struct request *); +extern void elv_rb_add(struct rb_root *, struct request *); extern void elv_rb_del(struct rb_root *, struct request *); extern struct request *elv_rb_find(struct rb_root *, sector_t); From 7ec58c1d04d948c886847ff9cfa58e142ff33ffb Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Sat, 12 Jan 2013 05:38:37 -0800 Subject: [PATCH 060/117] Optimized Async I/O --- fs/aio.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index 278ed7dc..99bb7446 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1618,7 +1618,6 @@ long do_io_submit(aio_context_t ctx_id, long nr, struct kioctx *ctx; long ret = 0; int i; - struct blk_plug plug; if (unlikely(nr < 0)) return -EINVAL; @@ -1635,8 +1634,6 @@ long do_io_submit(aio_context_t ctx_id, long nr, return -EINVAL; } - blk_start_plug(&plug); - /* * AKPM: should this return a partial result if some of the IOs were * successfully submitted? @@ -1659,7 +1656,6 @@ long do_io_submit(aio_context_t ctx_id, long nr, if (ret) break; } - blk_finish_plug(&plug); put_ioctx(ctx); return i ? i : ret; From db5f1576ffee5735f1718ea8ce340aca15624598 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 4 Feb 2013 13:11:30 -0500 Subject: [PATCH 061/117] Lower VFS cache pressure VFS cache pressure defines how readily the kernel will drop caches to recovery memory. We lower it here to allow more RAM to be used for caches, which should increase performance of frequently used apps. The kernel will still drop the caches when we need the memory. We don't make it too low, because otherwise, performance could suffer due to insufficient free memory. --- fs/dcache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/dcache.c b/fs/dcache.c index 8b64f383..4afa8686 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -76,7 +76,7 @@ * dentry1->d_lock * dentry2->d_lock */ -int sysctl_vfs_cache_pressure __read_mostly = 100; +int sysctl_vfs_cache_pressure __read_mostly = 20; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); From ec4591cb9e5ac05eabcb6eb6de6e75f8f272d07e Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 4 Feb 2013 13:20:27 -0500 Subject: [PATCH 062/117] Increase dirty ratio Delay writing of data to SD for as long as possible to improve performance --- mm/page-writeback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 955fe35d..fd24adf1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -61,7 +61,7 @@ static inline long sync_writeback_pages(unsigned long dirtied) /* * Start background writeback (via writeback threads) at this percentage */ -int dirty_background_ratio = 10; +int dirty_background_ratio = 70; /* * dirty_background_bytes starts at 0 (disabled) so that it is a function of @@ -78,7 +78,7 @@ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ -int vm_dirty_ratio = 20; +int vm_dirty_ratio = 90; /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of From 530703afdcc8b915b45f31dec47fd3a4d256f440 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Mon, 4 Feb 2013 13:24:18 -0500 Subject: [PATCH 063/117] Reduce swappiness When using swap, don't swap until necessary. --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index d80b5322..3d3870ca 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -148,7 +148,7 @@ struct scan_control { /* * From 0 .. 100. Higher means more swappy. */ -int vm_swappiness = 60; +int vm_swappiness = 10; long vm_total_pages; /* The total number of pages which the VM controls */ static LIST_HEAD(shrinker_list); From fd08dbf667a861277bcf74d89d4973519efc5aa5 Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Sun, 30 Dec 2012 09:55:54 -0800 Subject: [PATCH 064/117] Compile with neon --- arch/arm/vfp/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile index 6de73aab..ec624f01 100644 --- a/arch/arm/vfp/Makefile +++ b/arch/arm/vfp/Makefile @@ -7,7 +7,7 @@ # ccflags-y := -DDEBUG # asflags-y := -DDEBUG -KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp) +KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=neon) LDFLAGS +=--no-warn-mismatch obj-y += vfp.o From b8a4b6591dc9bf47e2d2ef691528dbbc444c3dea Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Thu, 3 Jan 2013 21:53:15 -0800 Subject: [PATCH 065/117] Added optimized ARM RWSEM algorithm --- arch/arm/Kconfig | 3 +- arch/arm/configs/vigor_aosp_defconfig | 3 +- arch/arm/include/asm/rwsem.h | 138 ++++++++++++++++++++++++++ 3 files changed, 141 insertions(+), 3 deletions(-) create mode 100644 arch/arm/include/asm/rwsem.h diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d97e48d7..11c09e29 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -139,10 +139,9 @@ config ARM_TICKET_LOCKS config RWSEM_GENERIC_SPINLOCK bool - default y config RWSEM_XCHGADD_ALGORITHM - bool + def_bool y config ARCH_HAS_ILOG2_U32 bool diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 9f3e167f..11670f8d 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -17,7 +17,8 @@ CONFIG_HARDIRQS_SW_RESEND=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_LOCKBREAK=y CONFIG_ARM_TICKET_LOCKS=y -CONFIG_RWSEM_GENERIC_SPINLOCK=y +#CONFIG_RWSEM_GENERIC_SPINLOCK is not set +CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_ARCH_HAS_CPUFREQ=y CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y CONFIG_GENERIC_HWEIGHT=y diff --git a/arch/arm/include/asm/rwsem.h b/arch/arm/include/asm/rwsem.h new file mode 100644 index 00000000..2066674d --- /dev/null +++ b/arch/arm/include/asm/rwsem.h @@ -0,0 +1,138 @@ +/* rwsem.h: R/W semaphores implemented using ARM atomic functions. + * + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_ARM_RWSEM_H +#define _ASM_ARM_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ +#include +#include + +#define RWSEM_UNLOCKED_VALUE 0x00000000 +#define RWSEM_ACTIVE_BIAS 0x00000001 +#define RWSEM_ACTIVE_MASK 0x0000ffff +#define RWSEM_WAITING_BIAS (-0x00010000) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (atomic_inc_return((atomic_t *)(&sem->count)) < 0) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + int tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)); + if (tmp != RWSEM_ACTIVE_WRITE_BIAS) + rwsem_down_write_failed(sem); +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + int tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_dec_return((atomic_t *)(&sem->count)); + if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)) < 0) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +{ + atomic_add(delta, (atomic_t *)(&sem->count)); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +{ + __down_write(sem); +} + +/* + * implement exchange and add functionality + */ +static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +{ + return atomic_add_return(delta, (atomic_t *)(&sem->count)); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_ARM_RWSEM_H */ From fc429fb63e1ab1455971ba2271d6eac4ddff3424 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 5 Feb 2013 07:31:42 -0500 Subject: [PATCH 066/117] Update version --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4029ee93..a9460814 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 0 SUBLEVEL = 61 -EXTRAVERSION = -Ermagerd-13.01.30 +EXTRAVERSION = -Ermagerd-13.02.05 NAME = Sneaky Weasel # *DOCUMENTATION* From fefff1381e23e39f242139ef71300a80384da946 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 12 Feb 2013 11:27:23 -0500 Subject: [PATCH 067/117] Linux 3.0.62 --- Makefile | 2 +- arch/arm/mm/dma-mapping.c | 18 ++++++++++-------- arch/x86/kernel/msr.c | 3 +++ arch/x86/kernel/setup.c | 2 ++ arch/x86/platform/efi/efi_64.c | 22 +++++++++++++++++----- drivers/edac/edac_pci_sysfs.c | 2 +- drivers/net/can/c_can/c_can.c | 4 ++-- drivers/net/can/pch_can.c | 2 +- drivers/net/can/ti_hecc.c | 4 ++-- drivers/net/wireless/ath/ath9k/beacon.c | 1 + drivers/net/wireless/ath/ath9k/htc_hst.c | 2 ++ fs/cifs/cifs_dfs_ref.c | 2 ++ kernel/smp.c | 13 ++++++++++++- net/bluetooth/hci_event.c | 2 +- net/bluetooth/hidp/core.c | 2 +- sound/usb/mixer.c | 17 ++++++++++++----- 16 files changed, 70 insertions(+), 28 deletions(-) diff --git a/Makefile b/Makefile index a9460814..4462eff9 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 61 +SUBLEVEL = 62 EXTRAVERSION = -Ermagerd-13.02.05 NAME = Sneaky Weasel diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 340c2d06..875114ff 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -472,25 +472,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { + unsigned long pfn; + size_t left = size; + + pfn = page_to_pfn(page) + offset / PAGE_SIZE; + offset %= PAGE_SIZE; + /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ - size_t left = size; do { size_t len = left; void *vaddr; + page = pfn_to_page(pfn); + if (PageHighMem(page)) { - if (len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; - } + if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; @@ -507,7 +509,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, op(vaddr, len, dir); } offset = 0; - page++; + pfn++; left -= len; } while (left); } diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 12fcbe2c..f7d1a649 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -175,6 +175,9 @@ static int msr_open(struct inode *inode, struct file *file) unsigned int cpu; struct cpuinfo_x86 *c; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + cpu = iminor(file->f_path.dentry->d_inode); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index eb9eb8be..6c4e9ffc 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -633,6 +633,7 @@ static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; static bool __init snb_gfx_workaround_needed(void) { +#ifdef CONFIG_PCI int i; u16 vendor, devid; static const u16 snb_ids[] = { @@ -657,6 +658,7 @@ static bool __init snb_gfx_workaround_needed(void) for (i = 0; i < ARRAY_SIZE(snb_ids); i++) if (devid == snb_ids[i]) return true; +#endif return false; } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac3aa54e..0fba86da 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -38,7 +38,7 @@ #include #include -static pgd_t save_pgd __initdata; +static pgd_t *save_pgd __initdata; static unsigned long efi_flags __initdata; static void __init early_code_mapping_set_exec(int executable) @@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable) void __init efi_call_phys_prelog(void) { unsigned long vaddress; + int pgd; + int n_pgds; early_code_mapping_set_exec(1); local_irq_save(efi_flags); - vaddress = (unsigned long)__va(0x0UL); - save_pgd = *pgd_offset_k(0x0UL); - set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); + + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); + + for (pgd = 0; pgd < n_pgds; pgd++) { + save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); + vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); + } __flush_tlb_all(); } @@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void) /* * After the lock is released, the original page table is restored. */ - set_pgd(pgd_offset_k(0x0UL), save_pgd); + int pgd; + int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); + for (pgd = 0; pgd < n_pgds; pgd++) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); __flush_tlb_all(); local_irq_restore(efi_flags); early_code_mapping_set_exec(0); diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 495198ad..8cc8676f 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -257,7 +257,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj, struct edac_pci_dev_attribute *edac_pci_dev; edac_pci_dev = (struct edac_pci_dev_attribute *)attr; - if (edac_pci_dev->show) + if (edac_pci_dev->store) return edac_pci_dev->store(edac_pci_dev->value, buffer, count); return -EIO; } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 1bf80328..61958684 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -916,7 +916,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[2] |= (CAN_ERR_PROT_LOC_ACK | + cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | CAN_ERR_PROT_LOC_ACK_DEL); break; case LEC_BIT1_ERROR: @@ -929,7 +929,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL); break; default: diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index d11fbb2b..b508a638 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -559,7 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) stats->rx_errors++; break; case PCH_CRC_ERR: - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; priv->can.can_stats.bus_error++; stats->rx_errors++; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 6ea2c090..10b23947 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -734,12 +734,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } if (err_status & HECC_CANES_CRCE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; } if (err_status & HECC_CANES_ACKE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); - cf->data[2] |= CAN_ERR_PROT_LOC_ACK | + cf->data[3] |= CAN_ERR_PROT_LOC_ACK | CAN_ERR_PROT_LOC_ACK_DEL; } } diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index d4d8cece..b109c470 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -159,6 +159,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); bf->bf_buf_addr = 0; + bf->bf_mpdu = NULL; } /* Get a new beacon from mac80211 */ diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 1b90ed87..4f7843ae 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -342,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, skb, htc_hdr->endpoint_id, txok); + } else { + kfree_skb(skb); } } diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 8d8f28c9..51feb1af 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -225,6 +225,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata, compose_mount_options_err: kfree(mountdata); mountdata = ERR_PTR(rc); + kfree(*devname); + *devname = NULL; goto compose_mount_options_out; } diff --git a/kernel/smp.c b/kernel/smp.c index fb67dfa8..38d9e033 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -31,6 +31,7 @@ struct call_function_data { struct call_single_data csd; atomic_t refs; cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); @@ -54,6 +55,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return notifier_from_errno(-ENOMEM); + if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, + cpu_to_node(cpu))) + return notifier_from_errno(-ENOMEM); break; #ifdef CONFIG_HOTPLUG_CPU @@ -63,6 +67,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: case CPU_DEAD_FROZEN: free_cpumask_var(cfd->cpumask); + free_cpumask_var(cfd->cpumask_ipi); break; #endif }; @@ -524,6 +529,12 @@ void smp_call_function_many(const struct cpumask *mask, return; } + /* + * After we put an entry into the list, data->cpumask + * may be cleared again when another CPU sends another IPI for + * a SMP function call, so data->cpumask will be zero. + */ + cpumask_copy(data->cpumask_ipi, data->cpumask); raw_spin_lock_irqsave(&call_function.lock, flags); /* * Place entry at the _HEAD_ of the list, so that any cpu still @@ -547,7 +558,7 @@ void smp_call_function_many(const struct cpumask *mask, smp_mb(); /* Send a message to all CPUs in the map */ - arch_send_call_function_ipi_mask(data->cpumask); + arch_send_call_function_ipi_mask(data->cpumask_ipi); /* Optionally wait for the CPUs to complete */ if (wait) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 61430005..10687f4e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2292,7 +2292,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk if (ev->opcode != HCI_OP_NOP) del_timer(&hdev->cmd_timer); - if (ev->ncmd) { + if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) tasklet_schedule(&hdev->cmd_task); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index c8a54028..4d533e26 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -803,7 +803,7 @@ static int hidp_setup_hid(struct hidp_session *session, hid->version = req->version; hid->country = req->country; - strncpy(hid->name, req->name, 128); + strncpy(hid->name, req->name, sizeof(req->name) - 1); strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 1d369e2b..4d68f90b 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1238,16 +1238,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void } channels = (hdr->bLength - 7) / csize - 1; bmaControls = hdr->bmaControls; + if (hdr->bLength < 7 + csize) { + snd_printk(KERN_ERR "usbaudio: unit %u: " + "invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } } else { struct uac2_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; - } - - if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) { - snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid); - return -EINVAL; + if (hdr->bLength < 6 + csize) { + snd_printk(KERN_ERR "usbaudio: unit %u: " + "invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } } /* parse the source unit */ From fa6b803f955b8ae746422df4133656bf6d220404 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 12 Feb 2013 11:35:05 -0500 Subject: [PATCH 068/117] Linux 3.0.63 --- Makefile | 2 +- arch/x86/ia32/ia32entry.S | 4 +- drivers/gpu/drm/radeon/evergreen.c | 2 + drivers/gpu/drm/radeon/radeon_combios.c | 8 + drivers/gpu/drm/radeon/radeon_display.c | 4 +- drivers/rtc/rtc-isl1208.c | 3 + drivers/usb/host/ehci-sched.c | 2 +- drivers/usb/host/xhci-ring.c | 6 +- drivers/usb/serial/ftdi_sio.c | 2 + drivers/usb/serial/ftdi_sio_ids.h | 9 +- drivers/usb/serial/option.c | 13 + drivers/usb/serial/qcserial.c | 1 + drivers/usb/storage/initializers.c | 76 +++++- drivers/usb/storage/initializers.h | 4 +- drivers/usb/storage/unusual_devs.h | 329 +----------------------- drivers/usb/storage/usb.c | 12 + drivers/usb/storage/usual-tables.c | 15 ++ fs/nilfs2/ioctl.c | 5 +- 18 files changed, 158 insertions(+), 339 deletions(-) diff --git a/Makefile b/Makefile index 4462eff9..324fd047 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 62 +SUBLEVEL = 63 EXTRAVERSION = -Ermagerd-13.02.05 NAME = Sneaky Weasel diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index c1870ddd..26af1e31 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -208,7 +208,7 @@ sysexit_from_sys_call: testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) jnz ia32_ret_from_sys_call TRACE_IRQS_ON - sti + ENABLE_INTERRUPTS(CLBR_NONE) movl %eax,%esi /* second arg, syscall return value */ cmpl $0,%eax /* is it < 0? */ setl %al /* 1 if so, 0 if not */ @@ -218,7 +218,7 @@ sysexit_from_sys_call: GET_THREAD_INFO(%r10) movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi - cli + DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %edi,TI_flags(%r10) jz \exit diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 314e2172..a75d290b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1065,6 +1065,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav WREG32(EVERGREEN_D5VGA_CONTROL, 0); WREG32(EVERGREEN_D6VGA_CONTROL, 0); } + /* wait for the MC to settle */ + udelay(100); } void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 859df6b5..2157e770 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2338,6 +2338,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) 1), ATOM_DEVICE_CRT1_SUPPORT); } + /* RV100 board with external TDMS bit mis-set. + * Actually uses internal TMDS, clear the bit. + */ + if (dev->pdev->device == 0x5159 && + dev->pdev->subsystem_vendor == 0x1014 && + dev->pdev->subsystem_device == 0x029A) { + tmp &= ~(1 << 4); + } if ((tmp >> 4) & 0x1) { devices |= ATOM_DEVICE_DFP2_SUPPORT; radeon_add_legacy_encoder(dev, diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ed085ce9..0896faed 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1158,8 +1158,10 @@ radeon_user_framebuffer_create(struct drm_device *dev, } radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); - if (radeon_fb == NULL) + if (radeon_fb == NULL) { + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(-ENOMEM); + } radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index da8beb8c..627b66aa 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct i2c_client *client = data; + struct rtc_device *rtc = i2c_get_clientdata(client); int handled = 0, sr, err; /* @@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data) if (sr & ISL1208_REG_SR_ALM) { dev_dbg(&client->dev, "alarm!\n"); + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); + /* Clear the alarm */ sr &= ~ISL1208_REG_SR_ALM; sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 2542ca01..bd727626 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -215,7 +215,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) } static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; /* carryover low/fullspeed bandwidth that crosses uframe boundries */ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1a382815..cb436fe1 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2506,6 +2506,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, (trb_comp_code != COMP_STALL && trb_comp_code != COMP_BABBLE)) xhci_urb_free_priv(xhci, urb_priv); + else + kfree(urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && @@ -3613,9 +3615,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td = urb_priv->td[i]; for (j = 0; j < trbs_per_td; j++) { u32 remainder = 0; - field = TRB_TBC(burst_count) | TRB_TLBPC(residue); + field = 0; if (first_trb) { + field = TRB_TBC(burst_count) | + TRB_TLBPC(residue); /* Queue the isoc TRB */ field |= TRB_TYPE(TRB_ISOC); /* Assume URB_ISO_ASAP is set */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index c855a4af..149198f6 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -588,6 +588,7 @@ static struct usb_device_id id_table_combined [] = { /* * ELV devices: */ + { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, @@ -674,6 +675,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index dd6edf86..97e0a6bb 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -147,6 +147,11 @@ #define XSENS_CONVERTER_6_PID 0xD38E #define XSENS_CONVERTER_7_PID 0xD38F +/** + * Zolix (www.zolix.com.cb) product ids + */ +#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */ + /* * NDI (www.ndigital.com) product ids */ @@ -204,7 +209,7 @@ /* * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). - * All of these devices use FTDI's vendor ID (0x0403). + * Almost all of these devices use FTDI's vendor ID (0x0403). * Further IDs taken from ELV Windows .inf file. * * The previously included PID for the UO 100 module was incorrect. @@ -212,6 +217,8 @@ * * Armin Laeuger originally sent the PID for the UM 100 module. */ +#define FTDI_ELV_VID 0x1B1F /* ELV AG */ +#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */ #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 9db3e239..52cd8141 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_CC864_DUAL 0x1005 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 +#define TELIT_PRODUCT_LE920 0x1200 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb); #define TPLINK_VENDOR_ID 0x2357 #define TPLINK_PRODUCT_MA180 0x0201 +/* Changhong products */ +#define CHANGHONG_VENDOR_ID 0x2077 +#define CHANGHONG_PRODUCT_CH690 0x7001 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = { .reserved = BIT(3) | BIT(4), }; +static const struct option_blacklist_info telit_le920_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(5), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 7cd2c269..03d5f932 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -54,6 +54,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ + {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ /* Gobi 2000 devices */ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 105d9001..16b0bf05 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us) return 0; } -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us) +/* This places the HUAWEI usb dongles in multi-port mode */ +static int usb_stor_huawei_feature_init(struct us_data *us) { int result; @@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us) US_DEBUGP("Huawei mode set result is %d\n", result); return 0; } + +/* + * It will send a scsi switch command called rewind' to huawei dongle. + * When the dongle receives this command at the first time, + * it will reboot immediately. After rebooted, it will ignore this command. + * So it is unnecessary to read its response. + */ +static int usb_stor_huawei_scsi_init(struct us_data *us) +{ + int result = 0; + int act_len = 0; + struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; + char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); + bcbw->Tag = 0; + bcbw->DataTransferLength = 0; + bcbw->Flags = bcbw->Lun = 0; + bcbw->Length = sizeof(rewind_cmd); + memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); + memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); + + result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, + US_BULK_CB_WRAP_LEN, &act_len); + US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); + return result; +} + +/* + * It tries to find the supported Huawei USB dongles. + * In Huawei, they assign the following product IDs + * for all of their mobile broadband dongles, + * including the new dongles in the future. + * So if the product ID is not included in this list, + * it means it is not Huawei's mobile broadband dongles. + */ +static int usb_stor_huawei_dongles_pid(struct us_data *us) +{ + struct usb_interface_descriptor *idesc; + int idProduct; + + idesc = &us->pusb_intf->cur_altsetting->desc; + idProduct = us->pusb_dev->descriptor.idProduct; + /* The first port is CDROM, + * means the dongle in the single port mode, + * and a switch command is required to be sent. */ + if (idesc && idesc->bInterfaceNumber == 0) { + if ((idProduct == 0x1001) + || (idProduct == 0x1003) + || (idProduct == 0x1004) + || (idProduct >= 0x1401 && idProduct <= 0x1500) + || (idProduct >= 0x1505 && idProduct <= 0x1600) + || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { + return 1; + } + } + return 0; +} + +int usb_stor_huawei_init(struct us_data *us) +{ + int result = 0; + + if (usb_stor_huawei_dongles_pid(us)) { + if (us->pusb_dev->descriptor.idProduct >= 0x1446) + result = usb_stor_huawei_scsi_init(us); + else + result = usb_stor_huawei_feature_init(us); + } + return result; +} diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 529327fb..5376d4fc 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us); * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us); -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us); +/* This places the HUAWEI usb dongles in multi-port mode */ +int usb_stor_huawei_init(struct us_data *us); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index fa8a1b2c..12640efc 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1515,335 +1515,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, /* Reported by fangxiaozhi * This brings the HUAWEI data card devices into multi-port mode */ -UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, +UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, "HUAWEI MOBILE", "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 0), /* Reported by Vilius Bilinkevicius i_sb, argv, kbufs); + } nilfs_remove_all_gcinodes(nilfs); clear_nilfs_gc_running(nilfs); From 182f765af05517a121e7c7fd91e6febd62db18ac Mon Sep 17 00:00:00 2001 From: Dennis Date: Thu, 6 Sep 2012 06:39:59 -0400 Subject: [PATCH 069/117] Use the git implementation of SHA-1. From MikeC84. --- arch/arm/kernel/armksyms.c | 3 - arch/arm/lib/Makefile | 2 +- include/linux/cryptohash.h | 2 +- lib/sha1.c | 211 +++++++++++++++++++++++++++---------- 4 files changed, 160 insertions(+), 58 deletions(-) diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 263eaaf3..15f151e0 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -108,9 +108,6 @@ EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); #endif - /* crypto hash */ -EXPORT_SYMBOL(sha_transform); - /* gcc lib functions */ EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 59ff42dd..cf73a7f7 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -12,7 +12,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ strchr.o strrchr.o \ testchangebit.o testclearbit.o testsetbit.o \ ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ - ucmpdi2.o lib1funcs.o div64.o sha1.o \ + ucmpdi2.o lib1funcs.o div64.o \ io-readsb.o io-writesb.o io-readsl.o io-writesl.o mmu-y := clear_user.o copy_page.o getuser.o putuser.o diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h index d2984fbe..2cd9f1cf 100644 --- a/include/linux/cryptohash.h +++ b/include/linux/cryptohash.h @@ -3,7 +3,7 @@ #define SHA_DIGEST_WORDS 5 #define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) -#define SHA_WORKSPACE_WORDS 80 +#define SHA_WORKSPACE_WORDS 16 void sha_init(__u32 *buf); void sha_transform(__u32 *digest, const char *data, __u32 *W); diff --git a/lib/sha1.c b/lib/sha1.c index 4c45fd50..61d730df 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -1,31 +1,72 @@ /* - * SHA transform algorithm, originally taken from code written by - * Peter Gutmann, and placed in the public domain. + * SHA1 routine optimized to do word accesses rather than byte accesses, + * and to avoid unnecessary copies into the context array. + * + * This was based on the git SHA1 implementation. */ #include #include -#include +#include +#include + +/* + * If you have 32 registers or more, the compiler can (and should) + * try to change the array[] accesses into registers. However, on + * machines with less than ~25 registers, that won't really work, + * and at least gcc will make an unholy mess of it. + * + * So to avoid that mess which just slows things down, we force + * the stores to memory to actually happen (we might be better off + * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as + * suggested by Artur Skawina - that will also make gcc unable to + * try to do the silly "optimize away loads" part because it won't + * see what the value will be). + * + * Ben Herrenschmidt reports that on PPC, the C version comes close + * to the optimized asm with this (ie on PPC you don't want that + * 'volatile', since there are lots of registers). + * + * On ARM we get the best code generation by forcing a full memory barrier + * between each SHA_ROUND, otherwise gcc happily get wild with spilling and + * the stack frame size simply explode and performance goes down the drain. + */ + +#ifdef CONFIG_X86 + #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) +#elif defined(CONFIG_ARM) + #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) +#else + #define setW(x, val) (W(x) = (val)) +#endif -/* The SHA f()-functions. */ +/* This "rolls" over the 512-bit array */ +#define W(x) (array[(x)&15]) -#define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ -#define f2(x,y,z) (x ^ y ^ z) /* XOR */ -#define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ +/* + * Where do we get the source from? The first 16 iterations get it from + * the input data, the next mix it from the 512-bit array. + */ +#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) +#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) -/* The SHA Mysterious Constants */ +#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ + __u32 TEMP = input(t); setW(t, TEMP); \ + E += TEMP + rol32(A,5) + (fn) + (constant); \ + B = ror32(B, 2); } while (0) -#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ -#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ -#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ -#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ +#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) +#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) +#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) /** * sha_transform - single block SHA1 transform * * @digest: 160 bit digest to update * @data: 512 bits of data to hash - * @W: 80 words of workspace (see note) + * @array: 16 words of workspace (see note) * * This function generates a SHA1 digest for a single 512-bit block. * Be warned, it does not handle padding and message digest, do not @@ -36,47 +77,111 @@ * to clear the workspace. This is left to the caller to avoid * unnecessary clears between chained hashing operations. */ -void sha_transform(__u32 *digest, const char *in, __u32 *W) +void sha_transform(__u32 *digest, const char *data, __u32 *array) { - __u32 a, b, c, d, e, t, i; - - for (i = 0; i < 16; i++) - W[i] = be32_to_cpu(((const __be32 *)in)[i]); - - for (i = 0; i < 64; i++) - W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); - - a = digest[0]; - b = digest[1]; - c = digest[2]; - d = digest[3]; - e = digest[4]; - - for (i = 0; i < 20; i++) { - t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - for (; i < 40; i ++) { - t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - for (; i < 60; i ++) { - t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - for (; i < 80; i ++) { - t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - digest[0] += a; - digest[1] += b; - digest[2] += c; - digest[3] += d; - digest[4] += e; + __u32 A, B, C, D, E; + + A = digest[0]; + B = digest[1]; + C = digest[2]; + D = digest[3]; + E = digest[4]; + + /* Round 1 - iterations 0-16 take their input from 'data' */ + T_0_15( 0, A, B, C, D, E); + T_0_15( 1, E, A, B, C, D); + T_0_15( 2, D, E, A, B, C); + T_0_15( 3, C, D, E, A, B); + T_0_15( 4, B, C, D, E, A); + T_0_15( 5, A, B, C, D, E); + T_0_15( 6, E, A, B, C, D); + T_0_15( 7, D, E, A, B, C); + T_0_15( 8, C, D, E, A, B); + T_0_15( 9, B, C, D, E, A); + T_0_15(10, A, B, C, D, E); + T_0_15(11, E, A, B, C, D); + T_0_15(12, D, E, A, B, C); + T_0_15(13, C, D, E, A, B); + T_0_15(14, B, C, D, E, A); + T_0_15(15, A, B, C, D, E); + + /* Round 1 - tail. Input from 512-bit mixing array */ + T_16_19(16, E, A, B, C, D); + T_16_19(17, D, E, A, B, C); + T_16_19(18, C, D, E, A, B); + T_16_19(19, B, C, D, E, A); + + /* Round 2 */ + T_20_39(20, A, B, C, D, E); + T_20_39(21, E, A, B, C, D); + T_20_39(22, D, E, A, B, C); + T_20_39(23, C, D, E, A, B); + T_20_39(24, B, C, D, E, A); + T_20_39(25, A, B, C, D, E); + T_20_39(26, E, A, B, C, D); + T_20_39(27, D, E, A, B, C); + T_20_39(28, C, D, E, A, B); + T_20_39(29, B, C, D, E, A); + T_20_39(30, A, B, C, D, E); + T_20_39(31, E, A, B, C, D); + T_20_39(32, D, E, A, B, C); + T_20_39(33, C, D, E, A, B); + T_20_39(34, B, C, D, E, A); + T_20_39(35, A, B, C, D, E); + T_20_39(36, E, A, B, C, D); + T_20_39(37, D, E, A, B, C); + T_20_39(38, C, D, E, A, B); + T_20_39(39, B, C, D, E, A); + + /* Round 3 */ + T_40_59(40, A, B, C, D, E); + T_40_59(41, E, A, B, C, D); + T_40_59(42, D, E, A, B, C); + T_40_59(43, C, D, E, A, B); + T_40_59(44, B, C, D, E, A); + T_40_59(45, A, B, C, D, E); + T_40_59(46, E, A, B, C, D); + T_40_59(47, D, E, A, B, C); + T_40_59(48, C, D, E, A, B); + T_40_59(49, B, C, D, E, A); + T_40_59(50, A, B, C, D, E); + T_40_59(51, E, A, B, C, D); + T_40_59(52, D, E, A, B, C); + T_40_59(53, C, D, E, A, B); + T_40_59(54, B, C, D, E, A); + T_40_59(55, A, B, C, D, E); + T_40_59(56, E, A, B, C, D); + T_40_59(57, D, E, A, B, C); + T_40_59(58, C, D, E, A, B); + T_40_59(59, B, C, D, E, A); + + /* Round 4 */ + T_60_79(60, A, B, C, D, E); + T_60_79(61, E, A, B, C, D); + T_60_79(62, D, E, A, B, C); + T_60_79(63, C, D, E, A, B); + T_60_79(64, B, C, D, E, A); + T_60_79(65, A, B, C, D, E); + T_60_79(66, E, A, B, C, D); + T_60_79(67, D, E, A, B, C); + T_60_79(68, C, D, E, A, B); + T_60_79(69, B, C, D, E, A); + T_60_79(70, A, B, C, D, E); + T_60_79(71, E, A, B, C, D); + T_60_79(72, D, E, A, B, C); + T_60_79(73, C, D, E, A, B); + T_60_79(74, B, C, D, E, A); + T_60_79(75, A, B, C, D, E); + T_60_79(76, E, A, B, C, D); + T_60_79(77, D, E, A, B, C); + T_60_79(78, C, D, E, A, B); + T_60_79(79, B, C, D, E, A); + + digest[0] += A; + digest[1] += B; + digest[2] += C; + digest[3] += D; + digest[4] += E; } EXPORT_SYMBOL(sha_transform); From 6e8bde32657d7c56995c7020731e41c1acafaff3 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Sat, 17 Dec 2011 17:43:27 -0800 Subject: [PATCH 070/117] Remove ARM sha1 routines Since commit 8827a4a6b397b07a341ba7fd36ffb91a7835207d ("lib/sha1: use the git implementation of SHA-1"), the ARM SHA1 routines no longer work. The reason? They depended on the larger 320-byte workspace, and now the sha1 workspace is just 16 words (64 bytes). So the assembly version would overwrite the stack randomly. The optimized asm version is also probably slower than the new improved C version, so there's no reason to keep it around. At least that was the case in git, where what appears to be the same assembly language version was removed two years ago because the optimized C BLK_SHA1 code was faster. Reported-and-tested-by: Joachim Eastwood Cc: Andreas Schwab Cc: Nicolas Pitre Signed-off-by: Linus Torvalds --- arch/arm/lib/sha1.S | 211 -------------------------------------------- 1 file changed, 211 deletions(-) delete mode 100644 arch/arm/lib/sha1.S diff --git a/arch/arm/lib/sha1.S b/arch/arm/lib/sha1.S deleted file mode 100644 index eb0edb80..00000000 --- a/arch/arm/lib/sha1.S +++ /dev/null @@ -1,211 +0,0 @@ -/* - * linux/arch/arm/lib/sha1.S - * - * SHA transform optimized for ARM - * - * Copyright: (C) 2005 by Nicolas Pitre - * Created: September 17, 2005 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * The reference implementation for this code is linux/lib/sha1.c - */ - -#include - - .text - - -/* - * void sha_transform(__u32 *digest, const char *in, __u32 *W) - * - * Note: the "in" ptr may be unaligned. - */ - -ENTRY(sha_transform) - - stmfd sp!, {r4 - r8, lr} - - @ for (i = 0; i < 16; i++) - @ W[i] = be32_to_cpu(in[i]); - -#ifdef __ARMEB__ - mov r4, r0 - mov r0, r2 - mov r2, #64 - bl memcpy - mov r2, r0 - mov r0, r4 -#else - mov r3, r2 - mov lr, #16 -1: ldrb r4, [r1], #1 - ldrb r5, [r1], #1 - ldrb r6, [r1], #1 - ldrb r7, [r1], #1 - subs lr, lr, #1 - orr r5, r5, r4, lsl #8 - orr r6, r6, r5, lsl #8 - orr r7, r7, r6, lsl #8 - str r7, [r3], #4 - bne 1b -#endif - - @ for (i = 0; i < 64; i++) - @ W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31); - - sub r3, r2, #4 - mov lr, #64 -2: ldr r4, [r3, #4]! - subs lr, lr, #1 - ldr r5, [r3, #8] - ldr r6, [r3, #32] - ldr r7, [r3, #52] - eor r4, r4, r5 - eor r4, r4, r6 - eor r4, r4, r7 - mov r4, r4, ror #31 - str r4, [r3, #64] - bne 2b - - /* - * The SHA functions are: - * - * f1(B,C,D) = (D ^ (B & (C ^ D))) - * f2(B,C,D) = (B ^ C ^ D) - * f3(B,C,D) = ((B & C) | (D & (B | C))) - * - * Then the sub-blocks are processed as follows: - * - * A' = ror(A, 27) + f(B,C,D) + E + K + *W++ - * B' = A - * C' = ror(B, 2) - * D' = C - * E' = D - * - * We therefore unroll each loop 5 times to avoid register shuffling. - * Also the ror for C (and also D and E which are successivelyderived - * from it) is applied in place to cut on an additional mov insn for - * each round. - */ - - .macro sha_f1, A, B, C, D, E - ldr r3, [r2], #4 - eor ip, \C, \D - add \E, r1, \E, ror #2 - and ip, \B, ip, ror #2 - add \E, \E, \A, ror #27 - eor ip, ip, \D, ror #2 - add \E, \E, r3 - add \E, \E, ip - .endm - - .macro sha_f2, A, B, C, D, E - ldr r3, [r2], #4 - add \E, r1, \E, ror #2 - eor ip, \B, \C, ror #2 - add \E, \E, \A, ror #27 - eor ip, ip, \D, ror #2 - add \E, \E, r3 - add \E, \E, ip - .endm - - .macro sha_f3, A, B, C, D, E - ldr r3, [r2], #4 - add \E, r1, \E, ror #2 - orr ip, \B, \C, ror #2 - add \E, \E, \A, ror #27 - and ip, ip, \D, ror #2 - add \E, \E, r3 - and r3, \B, \C, ror #2 - orr ip, ip, r3 - add \E, \E, ip - .endm - - ldmia r0, {r4 - r8} - - mov lr, #4 - ldr r1, .L_sha_K + 0 - - /* adjust initial values */ - mov r6, r6, ror #30 - mov r7, r7, ror #30 - mov r8, r8, ror #30 - -3: subs lr, lr, #1 - sha_f1 r4, r5, r6, r7, r8 - sha_f1 r8, r4, r5, r6, r7 - sha_f1 r7, r8, r4, r5, r6 - sha_f1 r6, r7, r8, r4, r5 - sha_f1 r5, r6, r7, r8, r4 - bne 3b - - ldr r1, .L_sha_K + 4 - mov lr, #4 - -4: subs lr, lr, #1 - sha_f2 r4, r5, r6, r7, r8 - sha_f2 r8, r4, r5, r6, r7 - sha_f2 r7, r8, r4, r5, r6 - sha_f2 r6, r7, r8, r4, r5 - sha_f2 r5, r6, r7, r8, r4 - bne 4b - - ldr r1, .L_sha_K + 8 - mov lr, #4 - -5: subs lr, lr, #1 - sha_f3 r4, r5, r6, r7, r8 - sha_f3 r8, r4, r5, r6, r7 - sha_f3 r7, r8, r4, r5, r6 - sha_f3 r6, r7, r8, r4, r5 - sha_f3 r5, r6, r7, r8, r4 - bne 5b - - ldr r1, .L_sha_K + 12 - mov lr, #4 - -6: subs lr, lr, #1 - sha_f2 r4, r5, r6, r7, r8 - sha_f2 r8, r4, r5, r6, r7 - sha_f2 r7, r8, r4, r5, r6 - sha_f2 r6, r7, r8, r4, r5 - sha_f2 r5, r6, r7, r8, r4 - bne 6b - - ldmia r0, {r1, r2, r3, ip, lr} - add r4, r1, r4 - add r5, r2, r5 - add r6, r3, r6, ror #2 - add r7, ip, r7, ror #2 - add r8, lr, r8, ror #2 - stmia r0, {r4 - r8} - - ldmfd sp!, {r4 - r8, pc} - -ENDPROC(sha_transform) - - .align 2 -.L_sha_K: - .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 - - -/* - * void sha_init(__u32 *buf) - */ - - .align 2 -.L_sha_initial_digest: - .word 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 - -ENTRY(sha_init) - - str lr, [sp, #-4]! - adr r1, .L_sha_initial_digest - ldmia r1, {r1, r2, r3, ip, lr} - stmia r0, {r1, r2, r3, ip, lr} - ldr pc, [sp], #4 - -ENDPROC(sha_init) From 9b7e286b0820fad7c7bc902f3fcd502bc8e18b8d Mon Sep 17 00:00:00 2001 From: Dennis Boyce Date: Fri, 21 Sep 2012 09:00:00 -0400 Subject: [PATCH 071/117] Added Fugeswap. --- drivers/staging/android/lowmemorykiller.c | 27 ++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index f578bd88..30f9603c 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -44,6 +44,11 @@ #include #include +#ifdef CONFIG_SWAP +#include +#include +#endif + #define DEBUG_LEVEL_DEATHPENDING 6 static uint32_t lowmem_debug_level = 2; @@ -77,7 +82,9 @@ static int lowmem_minfile_size = 6; static unsigned long lowmem_deathpending_timeout; extern int compact_nodes(int); static uint32_t lowmem_check_filepages = 0; - +#ifdef CONFIG_SWAP +static int fudgeswap = 512; +#endif #define lowmem_print(level, x...) \ do { \ if (lowmem_debug_level >= (level)) { \ @@ -143,6 +150,20 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) } } + #ifdef CONFIG_SWAP + if(fudgeswap != 0){ + struct sysinfo si; + si_swapinfo(&si); + + if(si.freeswap > 0){ + if(fudgeswap > si.freeswap) + other_file += si.freeswap; + else + other_file += fudgeswap; + } + } + #endif + if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) @@ -261,6 +282,10 @@ module_param_named(check_filepages , lowmem_check_filepages, uint, module_param_array_named(minfile, lowmem_minfile, uint, &lowmem_minfile_size, S_IRUGO | S_IWUSR); +#ifdef CONFIG_SWAP +module_param_named(fudgeswap, fudgeswap, int, S_IRUGO | S_IWUSR); +#endif + module_init(lowmem_init); module_exit(lowmem_exit); From 78b416084ba50362711d9103e00863dd2d90f091 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 12 Feb 2013 12:59:22 -0500 Subject: [PATCH 072/117] Increase default SD readahead --- include/linux/mm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 2009b318..4d4bd4fb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1472,7 +1472,7 @@ int write_one_page(struct page *page, int wait); void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ -#define VM_MAX_READAHEAD 128 /* kbytes */ +#define VM_MAX_READAHEAD 1024 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, From 09855c9ec6ffb1a85f70f4dd05b58f4327970de6 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 12 Feb 2013 13:05:33 -0500 Subject: [PATCH 073/117] Disable GENTLE_FAIR_SLEEPERS --- kernel/sched_features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 1e7066d7..69956b40 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -3,7 +3,7 @@ * them to run sooner, but does not allow tons of sleepers to * rip the spread apart. */ -SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) +SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 0) /* * Place new tasks ahead so that they do not starve already running From 079a8acc3265de841ba48f218ab6fabae780d4df Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 12 Feb 2013 13:15:57 -0500 Subject: [PATCH 074/117] Reduce CPU transition latency --- arch/arm/plat-omap/cpu-omap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c index da4f68db..ff7af088 100644 --- a/arch/arm/plat-omap/cpu-omap.c +++ b/arch/arm/plat-omap/cpu-omap.c @@ -127,7 +127,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) } /* FIXME: what's the actual transition time? */ - policy->cpuinfo.transition_latency = 300 * 1000; + policy->cpuinfo.transition_latency = 30 * 1000; return 0; } From a5de03eb9be0feef74706ff19059d1207ebc2ef7 Mon Sep 17 00:00:00 2001 From: Mike Date: Sat, 1 Sep 2012 16:14:22 -0700 Subject: [PATCH 075/117] mm: optimize SLUB --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index 721a1fa0..30d95739 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2244,7 +2244,7 @@ EXPORT_SYMBOL(kmem_cache_free); * take the list_lock. */ static int slub_min_order; -static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; +static int slub_max_order; static int slub_min_objects; /* From 786892eddcb3fef9f401c9ce8ceb5c13c8205a61 Mon Sep 17 00:00:00 2001 From: franciscofranco Date: Thu, 8 Dec 2011 04:54:25 +0000 Subject: [PATCH 076/117] Add dynamic writeback feature from 3.1 --- fs/block_dev.c | 23 +- fs/fs-writeback.c | 376 ++++++++++++++++--------------- fs/inode.c | 5 +- fs/internal.h | 3 + fs/super.c | 37 ++- include/linux/backing-dev.h | 10 +- include/linux/writeback.h | 43 +++- include/trace/events/writeback.h | 183 +++++++++++++-- mm/backing-dev.c | 114 ++++++---- mm/page-writeback.c | 276 +++++++++++++++++++---- 10 files changed, 762 insertions(+), 308 deletions(-) diff --git a/fs/block_dev.c b/fs/block_dev.c index 3e218296..7454bb79 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -44,24 +44,28 @@ inline struct block_device *I_BDEV(struct inode *inode) { return &BDEV_I(inode)->bdev; } - EXPORT_SYMBOL(I_BDEV); /* - * move the inode from it's current bdi to the a new bdi. if the inode is dirty - * we need to move it onto the dirty list of @dst so that the inode is always - * on the right list. + * Move the inode from its current bdi to a new bdi. If the inode is dirty we + * need to move it onto the dirty list of @dst so that the inode is always on + * the right list. */ static void bdev_inode_switch_bdi(struct inode *inode, struct backing_dev_info *dst) { - spin_lock(&inode_wb_list_lock); + struct backing_dev_info *old = inode->i_data.backing_dev_info; + + if (unlikely(dst == old)) /* deadlock avoidance */ + return; + bdi_lock_two(&old->wb, &dst->wb); spin_lock(&inode->i_lock); inode->i_data.backing_dev_info = dst; if (inode->i_state & I_DIRTY) list_move(&inode->i_wb_list, &dst->wb.b_dirty); spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&old->wb.list_lock); + spin_unlock(&dst->wb.list_lock); } sector_t blkdev_max_block(struct block_device *bdev) @@ -1430,11 +1434,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); - /* ->release can cause the old bdi to disappear, - * so must switch it out first - */ - bdev_inode_switch_bdi(bdev->bd_inode, - &default_backing_dev_info); } if (bdev->bd_contains == bdev) { if (disk->fops->release) @@ -1446,6 +1445,8 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) disk_put_part(bdev->bd_part); bdev->bd_part = NULL; bdev->bd_disk = NULL; + bdev_inode_switch_bdi(bdev->bd_inode, + &default_backing_dev_info); if (bdev != bdev->bd_contains) victim = bdev->bd_contains; bdev->bd_contains = NULL; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 49c8c984..04cf3b91 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -35,6 +35,7 @@ struct wb_writeback_work { long nr_pages; struct super_block *sb; + unsigned long *older_than_this; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages:1; unsigned int for_kupdate:1; @@ -181,12 +182,13 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi) */ void inode_wb_list_del(struct inode *inode) { - spin_lock(&inode_wb_list_lock); + struct backing_dev_info *bdi = inode_to_bdi(inode); + + spin_lock(&bdi->wb.list_lock); list_del_init(&inode->i_wb_list); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&bdi->wb.list_lock); } - /* * Redirty an inode: set its when-it-was dirtied timestamp and move it to the * furthest end of its superblock's dirty-inode list. @@ -196,11 +198,9 @@ void inode_wb_list_del(struct inode *inode) * the case then the inode must have been redirtied while it was being written * out and we don't reset its dirtied_when. */ -static void redirty_tail(struct inode *inode) +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) { - struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; - - assert_spin_locked(&inode_wb_list_lock); + assert_spin_locked(&wb->list_lock); if (!list_empty(&wb->b_dirty)) { struct inode *tail; @@ -214,11 +214,9 @@ static void redirty_tail(struct inode *inode) /* * requeue inode for re-scanning after bdi->b_io list is exhausted. */ -static void requeue_io(struct inode *inode) +static void requeue_io(struct inode *inode, struct bdi_writeback *wb) { - struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; - - assert_spin_locked(&inode_wb_list_lock); + assert_spin_locked(&wb->list_lock); list_move(&inode->i_wb_list, &wb->b_more_io); } @@ -226,7 +224,7 @@ static void inode_sync_complete(struct inode *inode) { /* * Prevent speculative execution through - * spin_unlock(&inode_wb_list_lock); + * spin_unlock(&wb->list_lock); */ smp_mb(); @@ -251,15 +249,16 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) /* * Move expired dirty inodes from @delaying_queue to @dispatch_queue. */ -static void move_expired_inodes(struct list_head *delaying_queue, +static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - unsigned long *older_than_this) + unsigned long *older_than_this) { LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; struct inode *inode; int do_sb_sort = 0; + int moved = 0; while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); @@ -270,12 +269,13 @@ static void move_expired_inodes(struct list_head *delaying_queue, do_sb_sort = 1; sb = inode->i_sb; list_move(&inode->i_wb_list, &tmp); + moved++; } /* just one sb in list, splice to dispatch_queue and we're done */ if (!do_sb_sort) { list_splice(&tmp, dispatch_queue); - return; + goto out; } /* Move inodes from one superblock together */ @@ -287,6 +287,8 @@ static void move_expired_inodes(struct list_head *delaying_queue, list_move(&inode->i_wb_list, dispatch_queue); } } +out: + return moved; } /* @@ -302,9 +304,11 @@ static void move_expired_inodes(struct list_head *delaying_queue, */ static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) { - assert_spin_locked(&inode_wb_list_lock); + int moved; + assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); + trace_writeback_queue_io(wb, older_than_this, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -317,7 +321,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc) /* * Wait for writeback on an inode to complete. */ -static void inode_wait_for_writeback(struct inode *inode) +static void inode_wait_for_writeback(struct inode *inode, + struct bdi_writeback *wb) { DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); wait_queue_head_t *wqh; @@ -325,15 +330,15 @@ static void inode_wait_for_writeback(struct inode *inode) wqh = bit_waitqueue(&inode->i_state, __I_SYNC); while (inode->i_state & I_SYNC) { spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); } } /* - * Write out an inode's dirty pages. Called under inode_wb_list_lock and + * Write out an inode's dirty pages. Called under wb->list_lock and * inode->i_lock. Either the caller has an active reference on the inode or * the inode has I_WILL_FREE set. * @@ -344,13 +349,15 @@ static void inode_wait_for_writeback(struct inode *inode) * livelocks, etc. */ static int -writeback_single_inode(struct inode *inode, struct writeback_control *wbc) +writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, + struct writeback_control *wbc) { struct address_space *mapping = inode->i_mapping; + long nr_to_write = wbc->nr_to_write; unsigned dirty; int ret; - assert_spin_locked(&inode_wb_list_lock); + assert_spin_locked(&wb->list_lock); assert_spin_locked(&inode->i_lock); if (!atomic_read(&inode->i_count)) @@ -368,14 +375,16 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * completed a full scan of b_io. */ if (wbc->sync_mode != WB_SYNC_ALL) { - requeue_io(inode); + requeue_io(inode, wb); + trace_writeback_single_inode_requeue(inode, wbc, + nr_to_write); return 0; } /* * It's a data-integrity sync. We must wait. */ - inode_wait_for_writeback(inode); + inode_wait_for_writeback(inode, wb); } BUG_ON(inode->i_state & I_SYNC); @@ -384,7 +393,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) inode->i_state |= I_SYNC; inode->i_state &= ~I_DIRTY_PAGES; spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); ret = do_writepages(mapping, wbc); @@ -415,7 +424,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) ret = err; } - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); inode->i_state &= ~I_SYNC; if (!(inode->i_state & I_FREEING)) { @@ -438,7 +447,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) /* * slice used up: queue for next turn */ - requeue_io(inode); + requeue_io(inode, wb); } else { /* * Writeback blocked by something other than @@ -447,7 +456,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * retrying writeback of the dirty page/inode * that cannot be performed immediately. */ - redirty_tail(inode); + redirty_tail(inode, wb); } } else if (inode->i_state & I_DIRTY) { /* @@ -456,7 +465,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * submission or metadata updates after data IO * completion. */ - redirty_tail(inode); + redirty_tail(inode, wb); } else { /* * The inode is clean. At this point we either have @@ -467,33 +476,39 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) } } inode_sync_complete(inode); + trace_writeback_single_inode(inode, wbc, nr_to_write); return ret; } -/* - * For background writeback the caller does not have the sb pinned - * before calling writeback. So make sure that we do pin it, so it doesn't - * go away while we are writing inodes from it. - */ -static bool pin_sb_for_writeback(struct super_block *sb) +static long writeback_chunk_size(struct backing_dev_info *bdi, + struct wb_writeback_work *work) { - spin_lock(&sb_lock); - if (list_empty(&sb->s_instances)) { - spin_unlock(&sb_lock); - return false; - } - - sb->s_count++; - spin_unlock(&sb_lock); + long pages; - if (down_read_trylock(&sb->s_umount)) { - if (sb->s_root) - return true; - up_read(&sb->s_umount); + /* + * WB_SYNC_ALL mode does livelock avoidance by syncing dirty + * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX + * here avoids calling into writeback_inodes_wb() more than once. + * + * The intended call sequence for WB_SYNC_ALL writeback is: + * + * wb_writeback() + * writeback_sb_inodes() <== called only once + * write_cache_pages() <== called once for each inode + * (quickly) tag currently dirty pages + * (maybe slowly) sync all tagged pages + */ + if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) + pages = LONG_MAX; + else { + pages = min(bdi->avg_write_bandwidth / 2, + global_dirty_limit / DIRTY_SCOPE); + pages = min(pages, work->nr_pages); + pages = round_down(pages + MIN_WRITEBACK_PAGES, + MIN_WRITEBACK_PAGES); } - put_super(sb); - return false; + return pages; } /* @@ -503,24 +518,36 @@ static bool pin_sb_for_writeback(struct super_block *sb) * inodes. Otherwise write only ones which go sequentially * in reverse order. * - * Return 1, if the caller writeback routine should be - * interrupted. Otherwise return 0. + * Return the number of pages and/or inodes written. */ -static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, - struct writeback_control *wbc, bool only_this_sb) +static long writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct wb_writeback_work *work) { + struct writeback_control wbc = { + .sync_mode = work->sync_mode, + .tagged_writepages = work->tagged_writepages, + .for_kupdate = work->for_kupdate, + .for_background = work->for_background, + .range_cyclic = work->range_cyclic, + .range_start = 0, + .range_end = LLONG_MAX, + }; + unsigned long start_time = jiffies; + long write_chunk; + long wrote = 0; /* count both pages and inodes */ + while (!list_empty(&wb->b_io)) { - long pages_skipped; struct inode *inode = wb_inode(wb->b_io.prev); if (inode->i_sb != sb) { - if (only_this_sb) { + if (work->sb) { /* * We only want to write back data for this * superblock, move all inodes not belonging * to it back onto the dirty list. */ - redirty_tail(inode); + redirty_tail(inode, wb); continue; } @@ -529,7 +556,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, * Bounce back to the caller to unpin this and * pin the next superblock. */ - return 0; + break; } /* @@ -540,95 +567,96 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); - requeue_io(inode); + redirty_tail(inode, wb); continue; } - - /* - * Was this inode dirtied after sync_sb_inodes was called? - * This keeps sync from extra jobs and livelock. - */ - if (inode_dirtied_after(inode, wbc->wb_start)) { - spin_unlock(&inode->i_lock); - return 1; - } - __iget(inode); + write_chunk = writeback_chunk_size(wb->bdi, work); + wbc.nr_to_write = write_chunk; + wbc.pages_skipped = 0; - pages_skipped = wbc->pages_skipped; - writeback_single_inode(inode, wbc); - if (wbc->pages_skipped != pages_skipped) { + writeback_single_inode(inode, wb, &wbc); + + work->nr_pages -= write_chunk - wbc.nr_to_write; + wrote += write_chunk - wbc.nr_to_write; + if (!(inode->i_state & I_DIRTY)) + wrote++; + if (wbc.pages_skipped) { /* * writeback is not making progress due to locked * buffers. Skip this inode for now. */ - redirty_tail(inode); + redirty_tail(inode, wb); } spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); iput(inode); cond_resched(); - spin_lock(&inode_wb_list_lock); - if (wbc->nr_to_write <= 0) { - wbc->more_io = 1; - return 1; + spin_lock(&wb->list_lock); + /* + * bail out to wb_writeback() often enough to check + * background threshold and other termination conditions. + */ + if (wrote) { + if (time_is_before_jiffies(start_time + HZ / 10UL)) + break; + if (work->nr_pages <= 0) + break; } - if (!list_empty(&wb->b_more_io)) - wbc->more_io = 1; } - /* b_io is empty */ - return 1; + return wrote; } -void writeback_inodes_wb(struct bdi_writeback *wb, - struct writeback_control *wbc) +static long __writeback_inodes_wb(struct bdi_writeback *wb, + struct wb_writeback_work *work) { - int ret = 0; - - if (!wbc->wb_start) - wbc->wb_start = jiffies; /* livelock avoidance */ - spin_lock(&inode_wb_list_lock); - if (!wbc->for_kupdate || list_empty(&wb->b_io)) - queue_io(wb, wbc->older_than_this); + unsigned long start_time = jiffies; + long wrote = 0; while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); struct super_block *sb = inode->i_sb; - if (!pin_sb_for_writeback(sb)) { - requeue_io(inode); + if (!grab_super_passive(sb)) { + /* + * grab_super_passive() may fail consistently due to + * s_umount being grabbed by someone else. Don't use + * requeue_io() to avoid busy retrying the inode/sb. + */ + redirty_tail(inode, wb); continue; } - ret = writeback_sb_inodes(sb, wb, wbc, false); + wrote += writeback_sb_inodes(sb, wb, work); drop_super(sb); - if (ret) - break; + /* refer to the same tests at the end of writeback_sb_inodes */ + if (wrote) { + if (time_is_before_jiffies(start_time + HZ / 10UL)) + break; + if (work->nr_pages <= 0) + break; + } } - spin_unlock(&inode_wb_list_lock); /* Leave any unwritten inodes on b_io */ + return wrote; } -static void __writeback_inodes_sb(struct super_block *sb, - struct bdi_writeback *wb, struct writeback_control *wbc) +long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages) { - WARN_ON(!rwsem_is_locked(&sb->s_umount)); + struct wb_writeback_work work = { + .nr_pages = nr_pages, + .sync_mode = WB_SYNC_NONE, + .range_cyclic = 1, + }; - spin_lock(&inode_wb_list_lock); - if (!wbc->for_kupdate || list_empty(&wb->b_io)) - queue_io(wb, wbc->older_than_this); - writeback_sb_inodes(sb, wb, wbc, true); - spin_unlock(&inode_wb_list_lock); -} + spin_lock(&wb->list_lock); + if (list_empty(&wb->b_io)) + queue_io(wb, NULL); + __writeback_inodes_wb(wb, &work); + spin_unlock(&wb->list_lock); -/* - * The maximum number of pages to writeout in a single bdi flush/kupdate - * operation. We do this so we don't hold I_SYNC against an inode for - * enormous amounts of time, which would block a userspace task which has - * been forced to throttle against that inode. Also, the code reevaluates - * the dirty each time it has written this many pages. - */ -#define MAX_WRITEBACK_PAGES 1024 + return nr_pages - work.nr_pages; +} static inline bool over_bground_thresh(void) { @@ -640,6 +668,16 @@ static inline bool over_bground_thresh(void) global_page_state(NR_UNSTABLE_NFS) > background_thresh); } +/* + * Called under wb->list_lock. If there are multiple wb per bdi, + * only the flusher working on the first wb should do it. + */ +static void wb_update_bandwidth(struct bdi_writeback *wb, + unsigned long start_time) +{ + __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time); +} + /* * Explicit flushing or periodic writeback of "old" data. * @@ -658,46 +696,16 @@ static inline bool over_bground_thresh(void) static long wb_writeback(struct bdi_writeback *wb, struct wb_writeback_work *work) { - struct writeback_control wbc = { - .sync_mode = work->sync_mode, - .tagged_writepages = work->tagged_writepages, - .older_than_this = NULL, - .for_kupdate = work->for_kupdate, - .for_background = work->for_background, - .range_cyclic = work->range_cyclic, - }; + unsigned long wb_start = jiffies; + long nr_pages = work->nr_pages; unsigned long oldest_jif; - long wrote = 0; - long write_chunk = MAX_WRITEBACK_PAGES; struct inode *inode; + long progress; - if (wbc.for_kupdate) { - wbc.older_than_this = &oldest_jif; - oldest_jif = jiffies - - msecs_to_jiffies(dirty_expire_interval * 10); - } - if (!wbc.range_cyclic) { - wbc.range_start = 0; - wbc.range_end = LLONG_MAX; - } + oldest_jif = jiffies; + work->older_than_this = &oldest_jif; - /* - * WB_SYNC_ALL mode does livelock avoidance by syncing dirty - * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX - * here avoids calling into writeback_inodes_wb() more than once. - * - * The intended call sequence for WB_SYNC_ALL writeback is: - * - * wb_writeback() - * __writeback_inodes_sb() <== called only once - * write_cache_pages() <== called once for each inode - * (quickly) tag currently dirty pages - * (maybe slowly) sync all tagged pages - */ - if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages) - write_chunk = LONG_MAX; - - wbc.wb_start = jiffies; /* livelock avoidance */ + spin_lock(&wb->list_lock); for (;;) { /* * Stop writeback when nr_pages has been consumed @@ -722,52 +730,54 @@ static long wb_writeback(struct bdi_writeback *wb, if (work->for_background && !over_bground_thresh()) break; - wbc.more_io = 0; - wbc.nr_to_write = write_chunk; - wbc.pages_skipped = 0; + if (work->for_kupdate) { + oldest_jif = jiffies - + msecs_to_jiffies(dirty_expire_interval * 10); + work->older_than_this = &oldest_jif; + } - trace_wbc_writeback_start(&wbc, wb->bdi); + trace_writeback_start(wb->bdi, work); + if (list_empty(&wb->b_io)) + queue_io(wb, work->older_than_this); if (work->sb) - __writeback_inodes_sb(work->sb, wb, &wbc); + progress = writeback_sb_inodes(work->sb, wb, work); else - writeback_inodes_wb(wb, &wbc); - trace_wbc_writeback_written(&wbc, wb->bdi); + progress = __writeback_inodes_wb(wb, work); + trace_writeback_written(wb->bdi, work); - work->nr_pages -= write_chunk - wbc.nr_to_write; - wrote += write_chunk - wbc.nr_to_write; + wb_update_bandwidth(wb, wb_start); /* - * If we consumed everything, see if we have more + * Did we write something? Try for more + * + * Dirty inodes are moved to b_io for writeback in batches. + * The completion of the current batch does not necessarily + * mean the overall work is done. So we keep looping as long + * as made some progress on cleaning pages or inodes. */ - if (wbc.nr_to_write <= 0) + if (progress) continue; /* - * Didn't write everything and we don't have more IO, bail + * No more inodes for IO, bail */ - if (!wbc.more_io) + if (list_empty(&wb->b_more_io)) break; - /* - * Did we write something? Try for more - */ - if (wbc.nr_to_write < write_chunk) - continue; /* * Nothing written. Wait for some inode to * become available for writeback. Otherwise * we'll just busyloop. */ - spin_lock(&inode_wb_list_lock); if (!list_empty(&wb->b_more_io)) { + trace_writeback_wait(wb->bdi, work); inode = wb_inode(wb->b_more_io.prev); - trace_wbc_writeback_wait(&wbc, wb->bdi); spin_lock(&inode->i_lock); - inode_wait_for_writeback(inode); + inode_wait_for_writeback(inode, wb); spin_unlock(&inode->i_lock); } - spin_unlock(&inode_wb_list_lock); } + spin_unlock(&wb->list_lock); - return wrote; + return nr_pages - work->nr_pages; } /* @@ -1047,7 +1057,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) if ((inode->i_state & flags) == flags) return; - if (unlikely(block_dump > 1)) + if (unlikely(block_dump)) block_dump___mark_inode_dirty(inode); spin_lock(&inode->i_lock); @@ -1098,10 +1108,10 @@ void __mark_inode_dirty(struct inode *inode, int flags) } spin_unlock(&inode->i_lock); - spin_lock(&inode_wb_list_lock); + spin_lock(&bdi->wb.list_lock); inode->dirtied_when = jiffies; list_move(&inode->i_wb_list, &bdi->wb.b_dirty); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&bdi->wb.list_lock); if (wakeup_bdi) bdi_wakeup_thread_delayed(bdi); @@ -1303,6 +1313,7 @@ EXPORT_SYMBOL(sync_inodes_sb); */ int write_inode_now(struct inode *inode, int sync) { + struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; int ret; struct writeback_control wbc = { .nr_to_write = LONG_MAX, @@ -1315,11 +1326,11 @@ int write_inode_now(struct inode *inode, int sync) wbc.nr_to_write = 0; might_sleep(); - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); - ret = writeback_single_inode(inode, &wbc); + ret = writeback_single_inode(inode, wb, &wbc); spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); if (sync) inode_sync_wait(inode); return ret; @@ -1339,13 +1350,14 @@ EXPORT_SYMBOL(write_inode_now); */ int sync_inode(struct inode *inode, struct writeback_control *wbc) { + struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; int ret; - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); - ret = writeback_single_inode(inode, wbc); + ret = writeback_single_inode(inode, wb, wbc); spin_unlock(&inode->i_lock); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); return ret; } EXPORT_SYMBOL(sync_inode); diff --git a/fs/inode.c b/fs/inode.c index 43566d17..09f334bf 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -37,7 +37,7 @@ * inode_lru, inode->i_lru * inode_sb_list_lock protects: * sb->s_inodes, inode->i_sb_list - * inode_wb_list_lock protects: + * bdi->wb.list_lock protects: * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list * inode_hash_lock protects: * inode_hashtable, inode->i_hash @@ -48,7 +48,7 @@ * inode->i_lock * inode_lru_lock * - * inode_wb_list_lock + * bdi->wb.list_lock * inode->i_lock * * inode_hash_lock @@ -68,7 +68,6 @@ static LIST_HEAD(inode_lru); static DEFINE_SPINLOCK(inode_lru_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); -__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); /* * iprune_sem provides exclusion between the icache shrinking and the diff --git a/fs/internal.h b/fs/internal.h index b29c46e4..c905f59d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -97,6 +97,7 @@ extern struct file *get_empty_filp(void); * super.c */ extern int do_remount_sb(struct super_block *, int, void *, int); +extern bool grab_super_passive(struct super_block *sb); extern void __put_super(struct super_block *sb); extern void put_super(struct super_block *sb); extern struct dentry *mount_fs(struct file_system_type *, @@ -135,3 +136,5 @@ extern void inode_wb_list_del(struct inode *inode); extern int get_nr_dirty_inodes(void); extern void evict_inodes(struct super_block *); extern int invalidate_inodes(struct super_block *, bool); + +extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); diff --git a/fs/super.c b/fs/super.c index a687cf2a..83288df2 100644 --- a/fs/super.c +++ b/fs/super.c @@ -240,18 +240,49 @@ static int grab_super(struct super_block *s) __releases(sb_lock) return 0; } +/* + * grab_super_passive - acquire a passive reference + * @s: reference we are trying to grab + * + * Tries to acquire a passive reference. This is used in places where we + * cannot take an active reference but we need to ensure that the + * superblock does not go away while we are working on it. It returns + * false if a reference was not gained, and returns true with the s_umount + * lock held in read mode if a reference is gained. On successful return, + * the caller must drop the s_umount lock and the passive reference when + * done. + */ +bool grab_super_passive(struct super_block *sb) +{ + spin_lock(&sb_lock); + if (list_empty(&sb->s_instances)) { + spin_unlock(&sb_lock); + return false; + } + + sb->s_count++; + spin_unlock(&sb_lock); + + if (down_read_trylock(&sb->s_umount)) { + if (sb->s_root) + return true; + up_read(&sb->s_umount); + } + + put_super(sb); + return false; +} + /* * Superblock locking. We really ought to get rid of these two. */ void lock_super(struct super_block * sb) { - get_fs_excl(); mutex_lock(&sb->s_lock); } void unlock_super(struct super_block * sb) { - put_fs_excl(); mutex_unlock(&sb->s_lock); } @@ -822,7 +853,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, } else { char b[BDEVNAME_SIZE]; - s->s_flags = flags | MS_NOSEC; + s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(bdev)); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 96f4094b..3b2f9cb8 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -16,7 +16,7 @@ #include #include #include -#include +#include struct page; struct device; @@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int); enum bdi_stat_item { BDI_RECLAIMABLE, BDI_WRITEBACK, + BDI_WRITTEN, NR_BDI_STAT_ITEMS }; @@ -57,6 +58,7 @@ struct bdi_writeback { struct list_head b_dirty; /* dirty inodes */ struct list_head b_io; /* parked for writeback */ struct list_head b_more_io; /* parked for more writeback */ + spinlock_t list_lock; /* protects the b_* lists */ }; struct backing_dev_info { @@ -71,6 +73,11 @@ struct backing_dev_info { struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; + unsigned long bw_time_stamp; /* last time write bw is updated */ + unsigned long written_stamp; /* pages written at bw_time_stamp */ + unsigned long write_bandwidth; /* the estimated write bandwidth */ + unsigned long avg_write_bandwidth; /* further smoothed write bw */ + struct prop_local_percpu completions; int dirty_exceeded; @@ -106,6 +113,7 @@ int bdi_writeback_thread(void *data); int bdi_has_dirty_io(struct backing_dev_info *bdi); void bdi_arm_supers_timer(void); void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); +void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); extern spinlock_t bdi_lock; extern struct list_head bdi_list; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 3f6542ca..2b8963ff 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -7,9 +7,28 @@ #include #include -struct backing_dev_info; +/* + * The 1/4 region under the global dirty thresh is for smooth dirty throttling: + * + * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) + * + * Further beyond, all dirtier tasks will enter a loop waiting (possibly long + * time) for the dirty pages to drop, unless written enough pages. + * + * The global dirty threshold is normally equal to the global dirty limit, + * except when the system suddenly allocates a lot of anonymous memory and + * knocks down the global dirty threshold quickly, in which case the global + * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. + */ +#define DIRTY_SCOPE 8 +#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) -extern spinlock_t inode_wb_list_lock; +/* + * 4MB minimal write chunk size + */ +#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) + +struct backing_dev_info; /* * fs/fs-writeback.c @@ -26,11 +45,6 @@ enum writeback_sync_modes { */ struct writeback_control { enum writeback_sync_modes sync_mode; - unsigned long *older_than_this; /* If !NULL, only write back inodes - older than this */ - unsigned long wb_start; /* Time writeback_inodes_wb was - called. This is needed to avoid - extra jobs and livelock */ long nr_to_write; /* Write this many pages, and decrement this for each page written */ long pages_skipped; /* Pages which were not written */ @@ -43,14 +57,11 @@ struct writeback_control { loff_t range_start; loff_t range_end; - unsigned nonblocking:1; /* Don't get stuck on request queues */ - unsigned encountered_congestion:1; /* An output: a queue is full */ unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_background:1; /* A background writeback */ unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ - unsigned more_io:1; /* more io to be dispatched */ }; /* @@ -63,8 +74,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr); int writeback_inodes_sb_if_idle(struct super_block *); int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); void sync_inodes_sb(struct super_block *); -void writeback_inodes_wb(struct bdi_writeback *wb, - struct writeback_control *wbc); +long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages); long wb_do_writeback(struct bdi_writeback *wb, int force_wait); void wakeup_flusher_threads(long nr_pages); @@ -95,6 +105,8 @@ static inline void laptop_sync_completion(void) { } #endif void throttle_vm_writeout(gfp_t gfp_mask); +extern unsigned long global_dirty_limit; + /* These are exported to sysctl. */ extern int dirty_background_ratio; extern unsigned long dirty_background_bytes; @@ -129,6 +141,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty); +void __bdi_update_bandwidth(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long start_time); + void page_writeback_init(void); void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 9b60c6fc..69d2056d 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -8,6 +8,19 @@ #include #include +#define show_inode_state(state) \ + __print_flags(state, "|", \ + {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ + {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ + {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ + {I_NEW, "I_NEW"}, \ + {I_WILL_FREE, "I_WILL_FREE"}, \ + {I_FREEING, "I_FREEING"}, \ + {I_CLEAR, "I_CLEAR"}, \ + {I_SYNC, "I_SYNC"}, \ + {I_REFERENCED, "I_REFERENCED"} \ + ) + struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_work_class, @@ -52,6 +65,9 @@ DEFINE_EVENT(writeback_work_class, name, \ DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread); DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); +DEFINE_WRITEBACK_WORK_EVENT(writeback_start); +DEFINE_WRITEBACK_WORK_EVENT(writeback_written); +DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); TRACE_EVENT(writeback_pages_written, TP_PROTO(long pages_written), @@ -91,6 +107,30 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register); DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); DEFINE_WRITEBACK_EVENT(writeback_thread_start); DEFINE_WRITEBACK_EVENT(writeback_thread_stop); +DEFINE_WRITEBACK_EVENT(balance_dirty_start); +DEFINE_WRITEBACK_EVENT(balance_dirty_wait); + +TRACE_EVENT(balance_dirty_written, + + TP_PROTO(struct backing_dev_info *bdi, int written), + + TP_ARGS(bdi, written), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(int, written) + ), + + TP_fast_assign( + strncpy(__entry->name, dev_name(bdi->dev), 32); + __entry->written = written; + ), + + TP_printk("bdi %s written %d", + __entry->name, + __entry->written + ) +); DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), @@ -104,8 +144,6 @@ DECLARE_EVENT_CLASS(wbc_class, __field(int, for_background) __field(int, for_reclaim) __field(int, range_cyclic) - __field(int, more_io) - __field(unsigned long, older_than_this) __field(long, range_start) __field(long, range_end) ), @@ -119,15 +157,12 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->for_background = wbc->for_background; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; - __entry->more_io = wbc->more_io; - __entry->older_than_this = wbc->older_than_this ? - *wbc->older_than_this : 0; __entry->range_start = (long)wbc->range_start; __entry->range_end = (long)wbc->range_end; ), TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " - "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx " + "bgrd=%d reclm=%d cyclic=%d " "start=0x%lx end=0x%lx", __entry->name, __entry->nr_to_write, @@ -137,8 +172,6 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->for_background, __entry->for_reclaim, __entry->range_cyclic, - __entry->more_io, - __entry->older_than_this, __entry->range_start, __entry->range_end) ) @@ -147,14 +180,79 @@ DECLARE_EVENT_CLASS(wbc_class, DEFINE_EVENT(wbc_class, name, \ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ TP_ARGS(wbc, bdi)) -DEFINE_WBC_EVENT(wbc_writeback_start); -DEFINE_WBC_EVENT(wbc_writeback_written); -DEFINE_WBC_EVENT(wbc_writeback_wait); -DEFINE_WBC_EVENT(wbc_balance_dirty_start); -DEFINE_WBC_EVENT(wbc_balance_dirty_written); -DEFINE_WBC_EVENT(wbc_balance_dirty_wait); DEFINE_WBC_EVENT(wbc_writepage); +TRACE_EVENT(writeback_queue_io, + TP_PROTO(struct bdi_writeback *wb, + unsigned long *older_than_this, + int moved), + TP_ARGS(wb, older_than_this, moved), + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, older) + __field(long, age) + __field(int, moved) + ), + TP_fast_assign( + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + __entry->older = older_than_this ? *older_than_this : 0; + __entry->age = older_than_this ? + (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->moved = moved; + ), + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d", + __entry->name, + __entry->older, /* older_than_this in jiffies */ + __entry->age, /* older_than_this in relative milliseconds */ + __entry->moved) +); + +TRACE_EVENT(global_dirty_state, + + TP_PROTO(unsigned long background_thresh, + unsigned long dirty_thresh + ), + + TP_ARGS(background_thresh, + dirty_thresh + ), + + TP_STRUCT__entry( + __field(unsigned long, nr_dirty) + __field(unsigned long, nr_writeback) + __field(unsigned long, nr_unstable) + __field(unsigned long, background_thresh) + __field(unsigned long, dirty_thresh) + __field(unsigned long, dirty_limit) + __field(unsigned long, nr_dirtied) + __field(unsigned long, nr_written) + ), + + TP_fast_assign( + __entry->nr_dirty = global_page_state(NR_FILE_DIRTY); + __entry->nr_writeback = global_page_state(NR_WRITEBACK); + __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS); + __entry->nr_dirtied = global_page_state(NR_DIRTIED); + __entry->nr_written = global_page_state(NR_WRITTEN); + __entry->background_thresh = background_thresh; + __entry->dirty_thresh = dirty_thresh; + __entry->dirty_limit = global_dirty_limit; + ), + + TP_printk("dirty=%lu writeback=%lu unstable=%lu " + "bg_thresh=%lu thresh=%lu limit=%lu " + "dirtied=%lu written=%lu", + __entry->nr_dirty, + __entry->nr_writeback, + __entry->nr_unstable, + __entry->background_thresh, + __entry->dirty_thresh, + __entry->dirty_limit, + __entry->nr_dirtied, + __entry->nr_written + ) +); + DECLARE_EVENT_CLASS(writeback_congest_waited_template, TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), @@ -190,6 +288,63 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, TP_ARGS(usec_timeout, usec_delayed) ); +DECLARE_EVENT_CLASS(writeback_single_inode_template, + + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write + ), + + TP_ARGS(inode, wbc, nr_to_write), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, ino) + __field(unsigned long, state) + __field(unsigned long, dirtied_when) + __field(unsigned long, writeback_index) + __field(long, nr_to_write) + __field(unsigned long, wrote) + ), + + TP_fast_assign( + strncpy(__entry->name, + dev_name(inode->i_mapping->backing_dev_info->dev), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->nr_to_write = nr_to_write; + __entry->wrote = nr_to_write - wbc->nr_to_write; + ), + + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " + "index=%lu to_write=%ld wrote=%lu", + __entry->name, + __entry->ino, + show_inode_state(__entry->state), + __entry->dirtied_when, + (jiffies - __entry->dirtied_when) / HZ, + __entry->writeback_index, + __entry->nr_to_write, + __entry->wrote + ) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue, + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write), + TP_ARGS(inode, wbc, nr_to_write) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, + TP_PROTO(struct inode *inode, + struct writeback_control *wbc, + unsigned long nr_to_write), + TP_ARGS(inode, wbc, nr_to_write) +); + #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ diff --git a/mm/backing-dev.c b/mm/backing-dev.c index b3b122f4..253b071b 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -45,6 +45,17 @@ static struct timer_list sync_supers_timer; static int bdi_sync_supers(void *); static void sync_supers_timer_fn(unsigned long); +void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) +{ + if (wb1 < wb2) { + spin_lock(&wb1->list_lock); + spin_lock_nested(&wb2->list_lock, 1); + } else { + spin_lock(&wb2->list_lock); + spin_lock_nested(&wb1->list_lock, 1); + } +} + #ifdef CONFIG_DEBUG_FS #include #include @@ -67,34 +78,42 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) struct inode *inode; nr_dirty = nr_io = nr_more_io = 0; - spin_lock(&inode_wb_list_lock); + spin_lock(&wb->list_lock); list_for_each_entry(inode, &wb->b_dirty, i_wb_list) nr_dirty++; list_for_each_entry(inode, &wb->b_io, i_wb_list) nr_io++; list_for_each_entry(inode, &wb->b_more_io, i_wb_list) nr_more_io++; - spin_unlock(&inode_wb_list_lock); + spin_unlock(&wb->list_lock); global_dirty_limits(&background_thresh, &dirty_thresh); bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); #define K(x) ((x) << (PAGE_SHIFT - 10)) seq_printf(m, - "BdiWriteback: %8lu kB\n" - "BdiReclaimable: %8lu kB\n" - "BdiDirtyThresh: %8lu kB\n" - "DirtyThresh: %8lu kB\n" - "BackgroundThresh: %8lu kB\n" - "b_dirty: %8lu\n" - "b_io: %8lu\n" - "b_more_io: %8lu\n" - "bdi_list: %8u\n" - "state: %8lx\n", + "BdiWriteback: %10lu kB\n" + "BdiReclaimable: %10lu kB\n" + "BdiDirtyThresh: %10lu kB\n" + "DirtyThresh: %10lu kB\n" + "BackgroundThresh: %10lu kB\n" + "BdiWritten: %10lu kB\n" + "BdiWriteBandwidth: %10lu kBps\n" + "b_dirty: %10lu\n" + "b_io: %10lu\n" + "b_more_io: %10lu\n" + "bdi_list: %10u\n" + "state: %10lx\n", (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), - K(bdi_thresh), K(dirty_thresh), - K(background_thresh), nr_dirty, nr_io, nr_more_io, + K(bdi_thresh), + K(dirty_thresh), + K(background_thresh), + (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), + (unsigned long) K(bdi->write_bandwidth), + nr_dirty, + nr_io, + nr_more_io, !list_empty(&bdi->bdi_list), bdi->state); #undef K @@ -249,18 +268,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) return wb_has_dirty_io(&bdi->wb); } -static void bdi_flush_io(struct backing_dev_info *bdi) -{ - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .older_than_this = NULL, - .range_cyclic = 1, - .nr_to_write = 1024, - }; - - writeback_inodes_wb(&bdi->wb, &wbc); -} - /* * kupdated() used to do this. We cannot do it from the bdi_forker_thread() * or we risk deadlocking on ->s_umount. The longer term solution would be @@ -352,6 +359,17 @@ static unsigned long bdi_longest_inactive(void) return max(5UL * 60 * HZ, interval); } +/* + * Clear pending bit and wakeup anybody waiting for flusher thread creation or + * shutdown + */ +static void bdi_clear_pending(struct backing_dev_info *bdi) +{ + clear_bit(BDI_pending, &bdi->state); + smp_mb__after_clear_bit(); + wake_up_bit(&bdi->state, BDI_pending); +} + static int bdi_forker_thread(void *ptr) { struct bdi_writeback *me = ptr; @@ -383,6 +401,13 @@ static int bdi_forker_thread(void *ptr) } spin_lock_bh(&bdi_lock); + /* + * In the following loop we are going to check whether we have + * some work to do without any synchronization with tasks + * waking us up to do work for them. So we have to set task + * state already here so that we don't miss wakeups coming + * after we verify some condition. + */ set_current_state(TASK_INTERRUPTIBLE); list_for_each_entry(bdi, &bdi_list, bdi_list) { @@ -446,9 +471,10 @@ static int bdi_forker_thread(void *ptr) if (IS_ERR(task)) { /* * If thread creation fails, force writeout of - * the bdi from the thread. + * the bdi from the thread. Hopefully 1024 is + * large enough for efficient IO. */ - bdi_flush_io(bdi); + writeback_inodes_wb(&bdi->wb, 1024); } else { /* * The spinlock makes sure we do not lose @@ -461,11 +487,13 @@ static int bdi_forker_thread(void *ptr) spin_unlock_bh(&bdi->wb_lock); wake_up_process(task); } + bdi_clear_pending(bdi); break; case KILL_THREAD: __set_current_state(TASK_RUNNING); kthread_stop(task); + bdi_clear_pending(bdi); break; case NO_ACTION: @@ -481,16 +509,8 @@ static int bdi_forker_thread(void *ptr) else schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); try_to_freeze(); - /* Back to the main loop */ - continue; + break; } - - /* - * Clear pending bit and wakeup anybody waiting to tear us down. - */ - clear_bit(BDI_pending, &bdi->state); - smp_mb__after_clear_bit(); - wake_up_bit(&bdi->state, BDI_pending); } return 0; @@ -505,7 +525,7 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) list_del_rcu(&bdi->bdi_list); spin_unlock_bh(&bdi_lock); - synchronize_rcu(); + synchronize_rcu_expedited(); } int bdi_register(struct backing_dev_info *bdi, struct device *parent, @@ -629,9 +649,15 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_more_io); + spin_lock_init(&wb->list_lock); setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); } +/* + * Initial write bandwidth: 100 MB/s + */ +#define INIT_BW (100 << (20 - PAGE_SHIFT)) + int bdi_init(struct backing_dev_info *bdi) { int i, err; @@ -654,6 +680,13 @@ int bdi_init(struct backing_dev_info *bdi) } bdi->dirty_exceeded = 0; + + bdi->bw_time_stamp = jiffies; + bdi->written_stamp = 0; + + bdi->write_bandwidth = INIT_BW; + bdi->avg_write_bandwidth = INIT_BW; + err = prop_local_init_percpu(&bdi->completions); if (err) { @@ -677,11 +710,12 @@ void bdi_destroy(struct backing_dev_info *bdi) if (bdi_has_dirty_io(bdi)) { struct bdi_writeback *dst = &default_backing_dev_info.wb; - spin_lock(&inode_wb_list_lock); + bdi_lock_two(&bdi->wb, dst); list_splice(&bdi->wb.b_dirty, &dst->b_dirty); list_splice(&bdi->wb.b_io, &dst->b_io); list_splice(&bdi->wb.b_more_io, &dst->b_more_io); - spin_unlock(&inode_wb_list_lock); + spin_unlock(&bdi->wb.list_lock); + spin_unlock(&dst->list_lock); } bdi_unregister(bdi); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index fd24adf1..34f0c673 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -36,6 +36,16 @@ #include #include +/* + * Sleep at most 200ms at a time in balance_dirty_pages(). + */ +#define MAX_PAUSE max(HZ/5, 1) + +/* + * Estimate write bandwidth at 200ms intervals. + */ +#define BANDWIDTH_INTERVAL max(HZ/5, 1) + /* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. @@ -111,6 +121,7 @@ EXPORT_SYMBOL(laptop_mode); /* End of sysctl-exported parameters */ +unsigned long global_dirty_limit; /* * Scale the writeback cache size proportional to the relative writeout speeds. @@ -219,6 +230,7 @@ int dirty_bytes_handler(struct ctl_table *table, int write, */ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) { + __inc_bdi_stat(bdi, BDI_WRITTEN); __prop_inc_percpu_max(&vm_completions, &bdi->completions, bdi->max_prop_frac); } @@ -244,13 +256,8 @@ void task_dirty_inc(struct task_struct *tsk) static void bdi_writeout_fraction(struct backing_dev_info *bdi, long *numerator, long *denominator) { - if (bdi_cap_writeback_dirty(bdi)) { - prop_fraction_percpu(&vm_completions, &bdi->completions, + prop_fraction_percpu(&vm_completions, &bdi->completions, numerator, denominator); - } else { - *numerator = 0; - *denominator = 1; - } } static inline void task_dirties_fraction(struct task_struct *tsk, @@ -274,12 +281,13 @@ static inline void task_dirties_fraction(struct task_struct *tsk, * effectively curb the growth of dirty pages. Light dirtiers with high enough * dirty threshold may never get throttled. */ +#define TASK_LIMIT_FRACTION 8 static unsigned long task_dirty_limit(struct task_struct *tsk, unsigned long bdi_dirty) { long numerator, denominator; unsigned long dirty = bdi_dirty; - u64 inv = dirty >> 3; + u64 inv = dirty / TASK_LIMIT_FRACTION; task_dirties_fraction(tsk, &numerator, &denominator); inv *= numerator; @@ -290,6 +298,12 @@ static unsigned long task_dirty_limit(struct task_struct *tsk, return max(dirty, bdi_dirty/2); } +/* Minimum limit for any task */ +static unsigned long task_min_dirty_limit(unsigned long bdi_dirty) +{ + return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION; +} + /* * */ @@ -397,6 +411,11 @@ unsigned long determine_dirtyable_memory(void) return x + 1; /* Ensure that we never return 0 */ } +static unsigned long hard_dirty_limit(unsigned long thresh) +{ + return max(thresh, global_dirty_limit); +} + /* * global_dirty_limits - background-writeback and dirty-throttling thresholds * @@ -435,12 +454,20 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) } *pbackground = background; *pdirty = dirty; + trace_global_dirty_state(background, dirty); } -/* +/** * bdi_dirty_limit - @bdi's share of dirty throttling threshold + * @bdi: the backing_dev_info to query + * @dirty: global dirty limit in pages * - * Allocate high/low dirty limits to fast/slow devices, in order to prevent + * Returns @bdi's dirty limit in pages. The term "dirty" in the context of + * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. + * And the "limit" in the name is not seriously taken as hard limit in + * balance_dirty_pages(). + * + * It allocates high/low dirty limits to fast/slow devices, in order to prevent * - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * @@ -468,6 +495,153 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) return bdi_dirty; } +static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, + unsigned long elapsed, + unsigned long written) +{ + const unsigned long period = roundup_pow_of_two(3 * HZ); + unsigned long avg = bdi->avg_write_bandwidth; + unsigned long old = bdi->write_bandwidth; + u64 bw; + + /* + * bw = written * HZ / elapsed + * + * bw * elapsed + write_bandwidth * (period - elapsed) + * write_bandwidth = --------------------------------------------------- + * period + */ + bw = written - bdi->written_stamp; + bw *= HZ; + if (unlikely(elapsed > period)) { + do_div(bw, elapsed); + avg = bw; + goto out; + } + bw += (u64)bdi->write_bandwidth * (period - elapsed); + bw >>= ilog2(period); + + /* + * one more level of smoothing, for filtering out sudden spikes + */ + if (avg > old && old >= (unsigned long)bw) + avg -= (avg - old) >> 3; + + if (avg < old && old <= (unsigned long)bw) + avg += (old - avg) >> 3; + +out: + bdi->write_bandwidth = bw; + bdi->avg_write_bandwidth = avg; +} + +/* + * The global dirtyable memory and dirty threshold could be suddenly knocked + * down by a large amount (eg. on the startup of KVM in a swapless system). + * This may throw the system into deep dirty exceeded state and throttle + * heavy/light dirtiers alike. To retain good responsiveness, maintain + * global_dirty_limit for tracking slowly down to the knocked down dirty + * threshold. + */ +static void update_dirty_limit(unsigned long thresh, unsigned long dirty) +{ + unsigned long limit = global_dirty_limit; + + /* + * Follow up in one step. + */ + if (limit < thresh) { + limit = thresh; + goto update; + } + + /* + * Follow down slowly. Use the higher one as the target, because thresh + * may drop below dirty. This is exactly the reason to introduce + * global_dirty_limit which is guaranteed to lie above the dirty pages. + */ + thresh = max(thresh, dirty); + if (limit > thresh) { + limit -= (limit - thresh) >> 5; + goto update; + } + return; +update: + global_dirty_limit = limit; +} + +static void global_update_bandwidth(unsigned long thresh, + unsigned long dirty, + unsigned long now) +{ + static DEFINE_SPINLOCK(dirty_lock); + static unsigned long update_time; + + /* + * check locklessly first to optimize away locking for the most time + */ + if (time_before(now, update_time + BANDWIDTH_INTERVAL)) + return; + + spin_lock(&dirty_lock); + if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { + update_dirty_limit(thresh, dirty); + update_time = now; + } + spin_unlock(&dirty_lock); +} + +void __bdi_update_bandwidth(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long start_time) +{ + unsigned long now = jiffies; + unsigned long elapsed = now - bdi->bw_time_stamp; + unsigned long written; + + /* + * rate-limit, only update once every 200ms. + */ + if (elapsed < BANDWIDTH_INTERVAL) + return; + + written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); + + /* + * Skip quiet periods when disk bandwidth is under-utilized. + * (at least 1s idle time between two flusher runs) + */ + if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) + goto snapshot; + + if (thresh) + global_update_bandwidth(thresh, dirty, now); + + bdi_update_write_bandwidth(bdi, elapsed, written); + +snapshot: + bdi->written_stamp = written; + bdi->bw_time_stamp = now; +} + +static void bdi_update_bandwidth(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long start_time) +{ + if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) + return; + spin_lock(&bdi->wb.list_lock); + __bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty, + start_time); + spin_unlock(&bdi->wb.list_lock); +} + /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force @@ -478,27 +652,25 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) static void balance_dirty_pages(struct address_space *mapping, unsigned long write_chunk) { - long nr_reclaimable, bdi_nr_reclaimable; - long nr_writeback, bdi_nr_writeback; + unsigned long nr_reclaimable, bdi_nr_reclaimable; + unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ + unsigned long bdi_dirty; unsigned long background_thresh; unsigned long dirty_thresh; unsigned long bdi_thresh; + unsigned long task_bdi_thresh; + unsigned long min_task_bdi_thresh; unsigned long pages_written = 0; unsigned long pause = 1; bool dirty_exceeded = false; + bool clear_dirty_exceeded = true; struct backing_dev_info *bdi = mapping->backing_dev_info; + unsigned long start_time = jiffies; for (;;) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .older_than_this = NULL, - .nr_to_write = write_chunk, - .range_cyclic = 1, - }; - nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); - nr_writeback = global_page_state(NR_WRITEBACK); + nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); global_dirty_limits(&background_thresh, &dirty_thresh); @@ -507,12 +679,12 @@ static void balance_dirty_pages(struct address_space *mapping, * catch-up. This avoids (excessively) small writeouts * when the bdi limits are ramping up. */ - if (nr_reclaimable + nr_writeback <= - (background_thresh + dirty_thresh) / 2) + if (nr_dirty <= (background_thresh + dirty_thresh) / 2) break; bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); - bdi_thresh = task_dirty_limit(current, bdi_thresh); + min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh); + task_bdi_thresh = task_dirty_limit(current, bdi_thresh); /* * In order to avoid the stacked BDI deadlock we need @@ -524,12 +696,14 @@ static void balance_dirty_pages(struct address_space *mapping, * actually dirty; with m+n sitting in the percpu * deltas. */ - if (bdi_thresh < 2*bdi_stat_error(bdi)) { + if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) { bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); - bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); + bdi_dirty = bdi_nr_reclaimable + + bdi_stat_sum(bdi, BDI_WRITEBACK); } else { bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); - bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); + bdi_dirty = bdi_nr_reclaimable + + bdi_stat(bdi, BDI_WRITEBACK); } /* @@ -538,9 +712,10 @@ static void balance_dirty_pages(struct address_space *mapping, * bdi or process from holding back light ones; The latter is * the last resort safeguard. */ - dirty_exceeded = - (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) - || (nr_reclaimable + nr_writeback > dirty_thresh); + dirty_exceeded = (bdi_dirty > task_bdi_thresh) || + (nr_dirty > dirty_thresh); + clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) && + (nr_dirty <= dirty_thresh); if (!dirty_exceeded) break; @@ -548,6 +723,9 @@ static void balance_dirty_pages(struct address_space *mapping, if (!bdi->dirty_exceeded) bdi->dirty_exceeded = 1; + bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty, + bdi_thresh, bdi_dirty, start_time); + /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. * Unstable writes are a feature of certain networked * filesystems (i.e. NFS) in which data may have been @@ -557,17 +735,29 @@ static void balance_dirty_pages(struct address_space *mapping, * threshold otherwise wait until the disk writes catch * up. */ - trace_wbc_balance_dirty_start(&wbc, bdi); - if (bdi_nr_reclaimable > bdi_thresh) { - writeback_inodes_wb(&bdi->wb, &wbc); - pages_written += write_chunk - wbc.nr_to_write; - trace_wbc_balance_dirty_written(&wbc, bdi); + trace_balance_dirty_start(bdi); + if (bdi_nr_reclaimable > task_bdi_thresh) { + pages_written += writeback_inodes_wb(&bdi->wb, + write_chunk); + trace_balance_dirty_written(bdi, pages_written); if (pages_written >= write_chunk) break; /* We've done our duty */ } - trace_wbc_balance_dirty_wait(&wbc, bdi); __set_current_state(TASK_UNINTERRUPTIBLE); io_schedule_timeout(pause); + trace_balance_dirty_wait(bdi); + + dirty_thresh = hard_dirty_limit(dirty_thresh); + /* + * max-pause area. If dirty exceeded but still within this + * area, no need to sleep for more than 200ms: (a) 8 pages per + * 200ms is typically more than enough to curb heavy dirtiers; + * (b) the pause time limit makes the dirtiers more responsive. + */ + if (nr_dirty < dirty_thresh && + bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 && + time_after(jiffies, start_time + MAX_PAUSE)) + break; /* * Increase the delay for each loop, up to our previous @@ -578,7 +768,8 @@ static void balance_dirty_pages(struct address_space *mapping, pause = HZ / 10; } - if (!dirty_exceeded && bdi->dirty_exceeded) + /* Clear dirty_exceeded flag only when no task can exceed the limit */ + if (clear_dirty_exceeded && bdi->dirty_exceeded) bdi->dirty_exceeded = 0; if (writeback_in_progress(bdi)) @@ -626,9 +817,13 @@ static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied) { + struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long ratelimit; unsigned long *p; + if (!bdi_cap_account_dirty(bdi)) + return; + ratelimit = ratelimit_pages; if (mapping->backing_dev_info->dirty_exceeded) ratelimit = 8; @@ -1141,7 +1336,6 @@ EXPORT_SYMBOL(account_page_dirtied); void account_page_writeback(struct page *page) { inc_zone_page_state(page, NR_WRITEBACK); - inc_zone_page_state(page, NR_WRITTEN); } EXPORT_SYMBOL(account_page_writeback); @@ -1358,8 +1552,10 @@ int test_clear_page_writeback(struct page *page) } else { ret = TestClearPageWriteback(page); } - if (ret) + if (ret) { dec_zone_page_state(page, NR_WRITEBACK); + inc_zone_page_state(page, NR_WRITTEN); + } return ret; } @@ -1405,10 +1601,6 @@ EXPORT_SYMBOL(test_set_page_writeback); */ int mapping_tagged(struct address_space *mapping, int tag) { - int ret; - rcu_read_lock(); - ret = radix_tree_tagged(&mapping->page_tree, tag); - rcu_read_unlock(); - return ret; + return radix_tree_tagged(&mapping->page_tree, tag); } EXPORT_SYMBOL(mapping_tagged); From c9f518aad923868b37dff054595832a4aded4fa1 Mon Sep 17 00:00:00 2001 From: franciscofranco Date: Thu, 14 Jun 2012 14:31:01 +0100 Subject: [PATCH 077/117] Makefile: add special arch flags --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 324fd047..3d59efd7 100644 --- a/Makefile +++ b/Makefile @@ -376,7 +376,10 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -fno-delete-null-pointer-checks + -fno-delete-null-pointer-checks \ + -mtune=cortex-a9 \ + -march=armv7-a \ + -mfpu=neon KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := KBUILD_AFLAGS := -D__ASSEMBLY__ From 50626c049d63aa49771188db22c2d8718fb62bda Mon Sep 17 00:00:00 2001 From: franciscofranco Date: Tue, 26 Jun 2012 06:28:57 +0100 Subject: [PATCH 078/117] Makefile: add -ffast-math cflag --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 3d59efd7..3ed80a0f 100644 --- a/Makefile +++ b/Makefile @@ -377,6 +377,7 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -Werror-implicit-function-declaration \ -Wno-format-security \ -fno-delete-null-pointer-checks \ + -ffast-math \ -mtune=cortex-a9 \ -march=armv7-a \ -mfpu=neon From c3665d9eefe4fe56482976068d2646c033a18083 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 4 Sep 2012 00:27:55 +0900 Subject: [PATCH 079/117] ARM: clean up idle handlers Let's factor out the need_resched() check instead of having it duplicated in every pm_idle implementations to avoid inconsistencies (omap2_pm_idle is missing it already). The forceful re-enablement of IRQs after pm_idle has returned can go. The warning certainly doesn't trigger for existing users. To get rid of the pm_idle calling convention oddity, let's introduce arm_pm_idle() allowing for the local_irq_enable() to be factored out from SOC specific implementations. The default pm_idle function becomes a wrapper for arm_pm_idle and it takes care of enabling IRQs closer to where they are initially disabled. And finally move the comment explaining the reason for that turning off of IRQs to a more proper location. Signed-off-by: Nicolas Pitre Acked-and-tested-by: Jamie Iles --- arch/arm/include/asm/system.h | 1 + arch/arm/kernel/process.c | 23 +++++++++++++++-------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 510a58d7..b9f70d42 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -112,6 +112,7 @@ extern void cpu_init(void); void arm_machine_restart(char mode, const char *cmd); extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_idle)(void); #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index b3cadc6c..0d9772dc 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -189,12 +189,16 @@ void cpu_idle_wait(void) EXPORT_SYMBOL_GPL(cpu_idle_wait); /* - * This is our default idle handler. We need to disable - * interrupts here to ensure we don't miss a wakeup call. + * This is our default idle handler. */ + +void (*arm_pm_idle)(void); + static void default_idle(void) { - if (!need_resched()) + if (arm_pm_idle) + arm_pm_idle(); + else arch_idle(); local_irq_enable(); } @@ -222,6 +226,10 @@ void cpu_idle(void) cpu_die(); #endif + /* + * We need to disable interrupts here + * to ensure we don't miss a wakeup call. + */ local_irq_disable(); #ifdef CONFIG_PL310_ERRATA_769419 wmb(); @@ -229,18 +237,17 @@ void cpu_idle(void) if (hlt_counter) { local_irq_enable(); cpu_relax(); - } else { + } else if (!need_resched()) { stop_critical_timings(); pm_idle(); start_critical_timings(); /* - * This will eventually be removed - pm_idle - * functions should always return with IRQs - * enabled. + * pm_idle functions must always + * return with IRQs enabled. */ WARN_ON(irqs_disabled()); + } else local_irq_enable(); - } } tick_nohz_restart_sched_tick(); idle_notifier_call_chain(IDLE_END); From 8100b4353850c22adceadbd0f01e53396d298281 Mon Sep 17 00:00:00 2001 From: ktoonsez Date: Tue, 4 Sep 2012 00:35:12 +0900 Subject: [PATCH 080/117] optimize checksum routines --- lib/checksum.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/lib/checksum.c b/lib/checksum.c index 09750873..7adfe31c 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -49,7 +49,7 @@ static inline unsigned short from32to16(unsigned int x) static unsigned int do_csum(const unsigned char *buff, int len) { - int odd, count; + int odd; unsigned int result = 0; if (len <= 0) @@ -64,16 +64,14 @@ static unsigned int do_csum(const unsigned char *buff, int len) len--; buff++; } - count = len >> 1; /* nr of 16-bit words.. */ - if (count) { + if (len >= 2) { if (2 & (unsigned long) buff) { result += *(unsigned short *) buff; - count--; len -= 2; buff += 2; } - count >>= 1; /* nr of 32-bit words.. */ - if (count) { + if (len >= 4) { + const unsigned char *end = buff + ((unsigned)len & ~3); unsigned int carry = 0; do { unsigned int w = *(unsigned int *) buff; @@ -82,7 +80,7 @@ static unsigned int do_csum(const unsigned char *buff, int len) result += carry; result += w; carry = (w > result); - } while (count); + } while (buff < end); result += carry; result = (result & 0xffff) + (result >> 16); } From 1e22134afcd67feb5c3a62b51ead1b6a25187bbb Mon Sep 17 00:00:00 2001 From: Stepan Moskovchenko Date: Sat, 28 Jan 2012 19:31:41 -0800 Subject: [PATCH 081/117] msm: idle-v7: Power collapse Krait with caches enabled The SCTLR[C/I] bits do not need to be cleared when entering power collapse on Krait CPUs. Change-Id: Ic33b227fe55a87e10aab46e6a30f44cc400ccfbb Signed-off-by: Stepan Moskovchenko --- arch/arm/mach-msm/idle-v7.S | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S index 3ac3e548..c612c4ad 100644 --- a/arch/arm/mach-msm/idle-v7.S +++ b/arch/arm/mach-msm/idle-v7.S @@ -115,12 +115,6 @@ skip: ldr r0, =saved_state sub r1, r1, r0 bl v7_flush_kern_dcache_area - mrc p15, 0, r4, c1, c0, 0 /* read current CR */ - bic r0, r4, #(1 << 2) /* clear dcache bit */ - bic r0, r0, #(1 << 12) /* clear icache bit */ - mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */ - - dsb #ifdef CONFIG_ARCH_MSM_KRAIT ldr r0, =SCM_SVC_BOOT ldr r1, =SCM_CMD_TERMINATE_PC @@ -128,10 +122,18 @@ skip: ldr r0, =saved_state ldr r2, [r2] bl scm_call_atomic1 #else + mrc p15, 0, r4, c1, c0, 0 /* read current CR */ + bic r0, r4, #(1 << 2) /* clear dcache bit */ + bic r0, r0, #(1 << 12) /* clear icache bit */ + mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */ + dsb + wfi -#endif + mcr p15, 0, r4, c1, c0, 0 /* restore d/i cache */ isb +#endif + #if defined(CONFIG_MSM_FIQ_SUPPORT) cpsie f From 6d2155833078dc0bc2b3c2e502b7c5622467dd28 Mon Sep 17 00:00:00 2001 From: Stepan Moskovchenko Date: Mon, 13 Feb 2012 18:39:31 -0800 Subject: [PATCH 082/117] msm: idle-v7: Remove redundant cache clean Cleaning the register save area is not necessary if we are already cleaning the entire cache. Eliminate the redundant cleaning operation to reduce power collapse latency. Change-Id: I3651083f147874fd6003cfe12b07861689ca77cc Signed-off-by: Stepan Moskovchenko --- arch/arm/mach-msm/idle-v7.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S index c612c4ad..19deccb7 100644 --- a/arch/arm/mach-msm/idle-v7.S +++ b/arch/arm/mach-msm/idle-v7.S @@ -109,12 +109,14 @@ ENTRY(msm_pm_collapse) cmp r1, #1 bne skip bl v7_flush_dcache_all + b skip2 skip: ldr r0, =saved_state ldr r1, =saved_state_end sub r1, r1, r0 bl v7_flush_kern_dcache_area +skip2: #ifdef CONFIG_ARCH_MSM_KRAIT ldr r0, =SCM_SVC_BOOT ldr r1, =SCM_CMD_TERMINATE_PC From 260878aae1d45b96abb68fc55bc7bb6bc421b10c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 10 Sep 2012 18:43:44 +0900 Subject: [PATCH 083/117] mm: compaction: make compact_control order signed "order" is -1 when compacting via /proc/sys/vm/compact_memory. Making it unsigned causes a bug in __compact_pgdat() when we test: if (cc->order < 0 || !compaction_deferred(zone, cc->order)) compact_zone(zone, cc); [akpm@linux-foundation.org: make __compact_pgdat()'s comparison match other code sites] Signed-off-by: Dan Carpenter Cc: Mel Gorman Cc: Minchan Kim Reviewed-by: Rik van Riel Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/compaction.c b/mm/compaction.c index 9d337354..cb75d10b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -35,7 +35,7 @@ struct compact_control { unsigned long migrate_pfn; /* isolate_migratepages search base */ bool sync; /* Synchronous migration */ - unsigned int order; /* order a direct compactor needs */ + int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; }; From 7e57d940f635bbe757ba126a5154854ace7d1884 Mon Sep 17 00:00:00 2001 From: vinylfreak89 Date: Fri, 5 Oct 2012 23:05:37 -0400 Subject: [PATCH 084/117] Fixed prox sensor initialization due to linaro changes. --- drivers/i2c/chips/cm3629.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/chips/cm3629.c b/drivers/i2c/chips/cm3629.c index f8b8855d..704f7649 100644 --- a/drivers/i2c/chips/cm3629.c +++ b/drivers/i2c/chips/cm3629.c @@ -692,7 +692,7 @@ static void enable_ps_interrupt(char *ps_conf) static void sensor_irq_do_work(struct work_struct *work) { struct cm3629_info *lpi = lp_info; - uint8_t cmd[3]; + uint8_t cmd[3] = {0,0,0}; uint8_t add = 0; /* Check ALS or PS */ From 52861f6a5c1d29d0788ea4cf1439e12585198429 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Tue, 12 Feb 2013 14:42:54 -0500 Subject: [PATCH 085/117] Update version --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3ed80a0f..90254c00 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 0 SUBLEVEL = 63 -EXTRAVERSION = -Ermagerd-13.02.05 +EXTRAVERSION = -Ermagerd-13.02.13 NAME = Sneaky Weasel # *DOCUMENTATION* From 9bacc8b548c6f2909197dd44e0913f70c8a86c40 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Wed, 13 Feb 2013 10:30:07 -0500 Subject: [PATCH 086/117] Add topology.c --- arch/arm/kernel/topology.c | 478 +++++++++++++++++++++++++++++++++++++ 1 file changed, 478 insertions(+) create mode 100644 arch/arm/kernel/topology.c diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c new file mode 100644 index 00000000..939b2d84 --- /dev/null +++ b/arch/arm/kernel/topology.c @@ -0,0 +1,478 @@ +/* + * arch/arm/kernel/topology.c + * + * Copyright (C) 2011 Linaro Limited. + * Written by: Vincent Guittot + * + * based on arch/sh/kernel/topology.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DEBUG_FS +#include +#include /* for copy_from_user */ +#endif + +#include +#include + +/* + * cpu power scale management + */ + +/* + * cpu power table + * This per cpu data structure describes the relative capacity of each core. + * On a heteregenous system, cores don't have the same computation capacity + * and we reflect that difference in the cpu_power field so the scheduler can + * take this difference into account during load balance. A per cpu structure + * is preferred because each CPU updates its own cpu_power field during the + * load balance except for idle cores. One idle core is selected to run the + * rebalance_domains for all idle cores and the cpu_power can be updated + * during this sequence. + */ +static DEFINE_PER_CPU(unsigned long, cpu_scale); + +unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +static void set_power_scale(unsigned int cpu, unsigned long power) +{ + per_cpu(cpu_scale, cpu) = power; +} + +/* + * cpu topology management + */ + +#define ARM_FAMILY_MASK 0xFF0FFFF0 + +#define MPIDR_SMP_BITMASK (0x3 << 30) +#define MPIDR_SMP_VALUE (0x2 << 30) + +#define MPIDR_MT_BITMASK (0x1 << 24) + +/* + * These masks reflect the current use of the affinity levels. + * The affinity level can be up to 16 bits according to ARM ARM + */ + +#define MPIDR_LEVEL0_MASK 0x3 +#define MPIDR_LEVEL0_SHIFT 0 + +#define MPIDR_LEVEL1_MASK 0xF +#define MPIDR_LEVEL1_SHIFT 8 + +#define MPIDR_LEVEL2_MASK 0xFF +#define MPIDR_LEVEL2_SHIFT 16 +/* + * CPU topology table + */ +struct cputopo_arm cpu_topology[NR_CPUS]; + +/* + * cpu power scale management + * a per cpu data structure should be better because each cpu is mainly + * using its own cpu_power even it's not always true because of + * nohz_idle_balance + */ + + + +/* + * cpu topology mask update management + */ + +static unsigned int prev_sched_mc_power_savings = 0; +static unsigned int prev_sched_smt_power_savings = 0; + +ATOMIC_NOTIFIER_HEAD(topology_update_notifier_list); + +/* + * Update the cpu power of the scheduler + */ + + +int topology_register_notifier(struct notifier_block *nb) +{ + + return atomic_notifier_chain_register( + &topology_update_notifier_list, nb); +} + +int topology_unregister_notifier(struct notifier_block *nb) +{ + + return atomic_notifier_chain_unregister( + &topology_update_notifier_list, nb); +} + +/* + * sched_domain flag configuration + */ +/* TODO add a config flag for this function */ +int arch_sd_sibling_asym_packing(void) +{ + if (sched_smt_power_savings || sched_mc_power_savings) + return SD_ASYM_PACKING; + return 0; +} + +/* + * default topology function + */ +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_topology[cpu].core_sibling; +} + +/* + * clear cpu topology masks + */ +static void clear_cpu_topology_mask(void) +{ + unsigned int cpuid; + for_each_possible_cpu(cpuid) { + struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); + cpumask_clear(&cpuid_topo->core_sibling); + cpumask_clear(&cpuid_topo->thread_sibling); + } + smp_wmb(); +} + +/* + * default_cpu_topology_mask set the core and thread mask as described in the + * ARM ARM + */ +static void default_cpu_topology_mask(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->socket_id == cpu_topo->socket_id) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id == cpu_topo->core_id) { + cpumask_set_cpu(cpuid, + &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->thread_sibling); + } + } + } + smp_wmb(); +} + +static void normal_cpu_topology_mask(void) +{ + unsigned int cpuid; + + for_each_possible_cpu(cpuid) { + default_cpu_topology_mask(cpuid); + } + smp_wmb(); +} + +/* + * For Cortex-A9 MPcore, we emulate a multi-package topology in power mode. + * The goal is to gathers tasks on 1 virtual package + */ +static void power_cpu_topology_mask_CA9(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; + + if ((cpuid_topo->socket_id == cpu_topo->socket_id) + && ((cpuid & 0x1) == (cpu & 0x1))) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id == cpu_topo->core_id) { + cpumask_set_cpu(cpuid, + &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->thread_sibling); + } + } + } + smp_wmb(); +} + +static int need_topology_update(void) +{ + int update; + + update = ((prev_sched_mc_power_savings ^ sched_mc_power_savings) + || (prev_sched_smt_power_savings ^ sched_smt_power_savings)); + + prev_sched_mc_power_savings = sched_mc_power_savings; + prev_sched_smt_power_savings = sched_smt_power_savings; + + return update; +} + +#define ARM_CORTEX_A9_FAMILY 0x410FC090 + +/* update_cpu_topology_policy select a cpu topology policy according to the + * available cores. + * TODO: The current version assumes that all cores are exactly the same which + * might not be true. We need to update it to take into account various + * configuration among which system with different kind of core. + */ +static int update_cpu_topology_mask(void) +{ + unsigned long cpuid; + + if (sched_mc_power_savings == POWERSAVINGS_BALANCE_NONE) { + normal_cpu_topology_mask(); + return 0; + } + + for_each_possible_cpu(cpuid) { + struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); + + switch (cpuid_topo->id) { + case ARM_CORTEX_A9_FAMILY: + power_cpu_topology_mask_CA9(cpuid); + break; + default: + default_cpu_topology_mask(cpuid); + break; + } + } + + return 0; +} + +/* + * store_cpu_topology is called at boot when only one cpu is running + * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, + * which prevents simultaneous write access to cpu_topology array + */ +void store_cpu_topology(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int mpidr; + + /* If the cpu topology has been already set, just return */ + if (cpuid_topo->core_id != -1) + return; + + mpidr = read_cpuid_mpidr(); + + /* create cpu topology mapping */ + if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) { + /* + * This is a multiprocessor system + * multiprocessor format & multiprocessor mode field are set + */ + + if (mpidr & MPIDR_MT_BITMASK) { + /* core performance interdependency */ + cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT) + & MPIDR_LEVEL0_MASK; + cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT) + & MPIDR_LEVEL1_MASK; + cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT) + & MPIDR_LEVEL2_MASK; + } else { + /* largely independent cores */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT) + & MPIDR_LEVEL0_MASK; + cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT) + & MPIDR_LEVEL1_MASK; + } + + cpuid_topo->id = read_cpuid_id() & ARM_FAMILY_MASK; + + } else { + /* + * This is an uniprocessor system + * we are in multiprocessor format but uniprocessor system + * or in the old uniprocessor format + */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = 0; + cpuid_topo->socket_id = -1; + } + + /* + * The core and thread sibling masks can also be updated during the + * call of arch_update_cpu_topology + */ + default_cpu_topology_mask(cpuid); + + + printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", + cpuid, cpu_topology[cpuid].thread_id, + cpu_topology[cpuid].core_id, + cpu_topology[cpuid].socket_id, mpidr); +} + +/* + * arch_update_cpu_topology is called by the scheduler before building + * a new sched_domain hierarchy. + */ +int arch_update_cpu_topology(void) +{ + if (!need_topology_update()) + return 0; + + /* clear core threads mask */ + clear_cpu_topology_mask(); + + /* set topology mask */ + update_cpu_topology_mask(); + + /* notify the topology update */ + atomic_notifier_call_chain(&topology_update_notifier_list, + TOPOLOGY_POSTCHANGE, (void *)sched_mc_power_savings); + + return 1; +} + +/* + * init_cpu_topology is called at boot when only one cpu is running + * which prevent simultaneous write access to cpu_topology array + */ +void init_cpu_topology(void) +{ + unsigned int cpu; + + /* init core mask */ + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); + + cpu_topo->id = -1; + cpu_topo->thread_id = -1; + cpu_topo->core_id = -1; + cpu_topo->socket_id = -1; + cpumask_clear(&cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + + set_power_scale(cpu, SCHED_POWER_SCALE); + } + smp_wmb(); +} + +/* + * debugfs interface for scaling cpu power + */ + +#ifdef CONFIG_DEBUG_FS +static struct dentry *topo_debugfs_root; + +static ssize_t dbg_write(struct file *file, const char __user *buf, + size_t size, loff_t *off) +{ + unsigned int *value = file->f_dentry->d_inode->i_private; + char cdata[128]; + unsigned long tmp; + + if (size < (sizeof(cdata)-1)) { + if (copy_from_user(cdata, buf, size)) + return -EFAULT; + cdata[size] = 0; + if (!strict_strtoul(cdata, 10, &tmp)) { + *value = tmp; + } + return size; + } + return -EINVAL; +} + +static ssize_t dbg_read(struct file *file, char __user *buf, + size_t size, loff_t *off) +{ + unsigned int *value = file->f_dentry->d_inode->i_private; + char cdata[128]; + unsigned int len; + + len = sprintf(cdata, "%u\n", *value); + return simple_read_from_buffer(buf, size, off, cdata, len); +} + +static const struct file_operations debugfs_fops = { + .read = dbg_read, + .write = dbg_write, +}; + +static struct dentry *topo_debugfs_register(unsigned int cpu, + struct dentry *parent) +{ + struct dentry *cpu_d, *d; + char cpu_name[16]; + + sprintf(cpu_name, "cpu%u", cpu); + + cpu_d = debugfs_create_dir(cpu_name, parent); + if (!cpu_d) + return NULL; + + d = debugfs_create_file("cpu_power", S_IRUGO | S_IWUGO, + cpu_d, &per_cpu(cpu_scale, cpu), &debugfs_fops); + if (!d) + goto err_out; + + return cpu_d; + +err_out: + debugfs_remove_recursive(cpu_d); + return NULL; +} + +static int __init topo_debugfs_init(void) +{ + struct dentry *d; + unsigned int cpu; + + d = debugfs_create_dir("cpu_topo", NULL); + if (!d) + return -ENOMEM; + topo_debugfs_root = d; + + for_each_possible_cpu(cpu) { + d = topo_debugfs_register(cpu, topo_debugfs_root); + if (d == NULL) + goto err_out; + } + return 0; + +err_out: + debugfs_remove_recursive(topo_debugfs_root); + return -ENOMEM; +} + +late_initcall(topo_debugfs_init); +#endif + From 678a80bc5d2fff96113211e169c0f3dd72af8e45 Mon Sep 17 00:00:00 2001 From: Snuzzo Date: Tue, 5 Feb 2013 06:20:34 -0500 Subject: [PATCH 087/117] [PATCH] ARM Topology by Linaro team allows individual queue'ing of tasks to cores by the scheduler --- arch/arm/Kconfig | 25 +++++++++++ arch/arm/include/asm/topology.h | 79 +++++++++++++++++++++++++++++++++ arch/arm/kernel/Makefile | 1 + arch/arm/kernel/smp.c | 12 ++++- 4 files changed, 115 insertions(+), 2 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 11c09e29..d3c39259 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1423,6 +1423,31 @@ config SMP_ON_UP If you don't know what to do here, say Y. +config ARM_CPU_TOPOLOGY + bool "Support cpu topology definition" + depends on SMP && CPU_V7 + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + +config SCHED_MC + bool "Multi-core scheduler support" + depends on ARM_CPU_TOPOLOGY + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + +config SCHED_SMT + bool "SMT scheduler support" + depends on ARM_CPU_TOPOLOGY + help + Improves the CPU scheduler's decision making when dealing with + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + config HAVE_ARM_SCU bool depends on SMP diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index accbd7ca..a74a14c6 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -1,6 +1,85 @@ #ifndef _ASM_ARM_TOPOLOGY_H #define _ASM_ARM_TOPOLOGY_H +#ifdef CONFIG_ARM_CPU_TOPOLOGY + +#include + +struct cputopo_arm { + int id; + int thread_id; + int core_id; + int socket_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; +}; + +extern struct cputopo_arm cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) + +#define mc_capable() (cpu_topology[0].socket_id != -1) +#define smt_capable() (cpu_topology[0].thread_id != -1) + +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); + + +int topology_register_notifier(struct notifier_block *nb); +int topology_unregister_notifier(struct notifier_block *nb); + +#else + +static inline void init_cpu_topology(void) { } +static inline void store_cpu_topology(unsigned int cpuid) { } + +static inline void set_power_scale(unsigned int cpu, unsigned int power) { } +static inline int topology_register_notifier(struct notifier_block *nb) { } +static inline int topology_unregister_notifier(struct notifier_block *nb) { } + +#endif + +/* Topology notifier event */ +#define TOPOLOGY_POSTCHANGE 0 + +/* Common values for CPUs */ +#ifndef SD_CPU_INIT +#define SD_CPU_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 1*SD_WAKE_AFFINE \ + | 0*SD_PREFER_LOCAL \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + | arch_sd_sibling_asym_packing() \ + | sd_balance_for_package_power() \ + | sd_power_saving_flags() \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ +} +#endif + #include #endif /* _ASM_ARM_TOPOLOGY_H */ + diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index c77c2fba..3989b909 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_CPU_HAS_PMU) += pmu.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt +obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o ifneq ($(CONFIG_ARCH_EBSA110),y) obj-y += io.o diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 2da52a42..585cb524 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -31,6 +31,10 @@ #include #include #include +#ifdef CONFIG_ARM_CPU_TOPOLOGY +#include +#endif +#include #include #include #include @@ -280,7 +284,9 @@ void __init smp_setup_processor_id(void) static void __cpuinit smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); - +#ifdef CONFIG_ARM_CPU_TOPOLOGY + store_cpu_topology(cpuid); +#endif cpu_info->loops_per_jiffy = loops_per_jiffy; } @@ -377,7 +383,9 @@ void __init smp_prepare_boot_cpu(void) void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = num_possible_cpus(); - +#ifdef CONFIG_ARM_CPU_TOPOLOGY + init_cpu_topology(); +#endif smp_store_cpu_info(smp_processor_id()); /* From f2fbe808dab3b0c46c45d0e216f54008550f6d23 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Wed, 13 Feb 2013 10:36:59 -0500 Subject: [PATCH 088/117] Enable ARM topology --- arch/arm/configs/vigor_aosp_defconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 11670f8d..bc5837fc 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -563,6 +563,9 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y CONFIG_SMP_ON_UP=y +CONFIG_ARM_CPU_TOPOLOGY=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set CONFIG_HAVE_ARM_SCU=y # CONFIG_ARM_ARCH_TIMER is not set CONFIG_VMSPLIT_3G=y From 830061652b6a00dcd1e73680a1196ee1d8b2bb36 Mon Sep 17 00:00:00 2001 From: Jeremy Mullins Date: Wed, 13 Feb 2013 10:41:15 -0500 Subject: [PATCH 089/117] Revert "Added Fugeswap." This reverts commit 9b7e286b0820fad7c7bc902f3fcd502bc8e18b8d. --- drivers/staging/android/lowmemorykiller.c | 27 +---------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 30f9603c..f578bd88 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -44,11 +44,6 @@ #include #include -#ifdef CONFIG_SWAP -#include -#include -#endif - #define DEBUG_LEVEL_DEATHPENDING 6 static uint32_t lowmem_debug_level = 2; @@ -82,9 +77,7 @@ static int lowmem_minfile_size = 6; static unsigned long lowmem_deathpending_timeout; extern int compact_nodes(int); static uint32_t lowmem_check_filepages = 0; -#ifdef CONFIG_SWAP -static int fudgeswap = 512; -#endif + #define lowmem_print(level, x...) \ do { \ if (lowmem_debug_level >= (level)) { \ @@ -150,20 +143,6 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) } } - #ifdef CONFIG_SWAP - if(fudgeswap != 0){ - struct sysinfo si; - si_swapinfo(&si); - - if(si.freeswap > 0){ - if(fudgeswap > si.freeswap) - other_file += si.freeswap; - else - other_file += fudgeswap; - } - } - #endif - if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) @@ -282,10 +261,6 @@ module_param_named(check_filepages , lowmem_check_filepages, uint, module_param_array_named(minfile, lowmem_minfile, uint, &lowmem_minfile_size, S_IRUGO | S_IWUSR); -#ifdef CONFIG_SWAP -module_param_named(fudgeswap, fudgeswap, int, S_IRUGO | S_IWUSR); -#endif - module_init(lowmem_init); module_exit(lowmem_exit); From b8d7b5ccbf9742470678ae62f795470ba2e2c725 Mon Sep 17 00:00:00 2001 From: David Hays Date: Mon, 29 Apr 2013 14:59:52 -0500 Subject: [PATCH 090/117] Update to qdsp6 version 3 Change-Id: I589c8ebb854a2f883b7996be7795f4069b8f3760 --- arch/arm/mach-msm/Makefile | 2 +- arch/arm/mach-msm/board-vigor-audio.c | 17 +- arch/arm/mach-msm/clock-8x60.h | 253 ++ arch/arm/mach-msm/include/mach/qdsp6v3/apr.h | 189 ++ .../mach-msm/include/mach/qdsp6v3/apr_audio.h | 1012 ++++++ .../include/mach/qdsp6v3/audio_dev_ctl.h | 240 ++ .../arm/mach-msm/include/mach/qdsp6v3/q6afe.h | 67 + .../arm/mach-msm/include/mach/qdsp6v3/q6asm.h | 264 ++ .../mach-msm/include/mach/qdsp6v3/q6voice.h | 757 +++++ .../include/mach/qdsp6v3/snddev_ecodec.h | 48 + .../include/mach/qdsp6v3/snddev_hdmi.h | 40 + .../include/mach/qdsp6v3/snddev_icodec.h | 98 + arch/arm/mach-msm/qdsp6v3/Makefile | 15 + arch/arm/mach-msm/qdsp6v3/aac_in.c | 435 +++ arch/arm/mach-msm/qdsp6v3/amrnb_in.c | 330 ++ arch/arm/mach-msm/qdsp6v3/apr.c | 681 ++++ arch/arm/mach-msm/qdsp6v3/apr_tal.c | 288 ++ arch/arm/mach-msm/qdsp6v3/apr_tal.h | 71 + arch/arm/mach-msm/qdsp6v3/audio_aac.c | 71 + arch/arm/mach-msm/qdsp6v3/audio_acdb.c | 780 +++++ arch/arm/mach-msm/qdsp6v3/audio_acdb.h | 63 + arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c | 1757 ++++++++++ arch/arm/mach-msm/qdsp6v3/audio_lpa.c | 1435 +++++++++ arch/arm/mach-msm/qdsp6v3/audio_lpa.h | 129 + arch/arm/mach-msm/qdsp6v3/audio_mvs.c | 998 ++++++ arch/arm/mach-msm/qdsp6v3/audio_utils.c | 644 ++++ arch/arm/mach-msm/qdsp6v3/audio_utils.h | 109 + arch/arm/mach-msm/qdsp6v3/audio_wma.c | 1585 ++++++++++ arch/arm/mach-msm/qdsp6v3/audio_wmapro.c | 1644 ++++++++++ .../mach-msm/qdsp6v3/board-msm8x60-audio.c | 1901 +++++++++++ arch/arm/mach-msm/qdsp6v3/dsp_debug.c | 202 ++ arch/arm/mach-msm/qdsp6v3/dsp_debug.h | 38 + arch/arm/mach-msm/qdsp6v3/evrc_in.c | 337 ++ arch/arm/mach-msm/qdsp6v3/fm.c | 259 ++ arch/arm/mach-msm/qdsp6v3/pcm_in.c | 504 +++ arch/arm/mach-msm/qdsp6v3/pcm_out.c | 491 +++ arch/arm/mach-msm/qdsp6v3/q6adm.c | 651 ++++ arch/arm/mach-msm/qdsp6v3/q6adm.h | 56 + arch/arm/mach-msm/qdsp6v3/q6afe.c | 687 ++++ arch/arm/mach-msm/qdsp6v3/q6asm.c | 2548 +++++++++++++++ arch/arm/mach-msm/qdsp6v3/q6core.c | 348 ++ arch/arm/mach-msm/qdsp6v3/q6voice.c | 2812 +++++++++++++++++ arch/arm/mach-msm/qdsp6v3/qcelp_in.c | 334 ++ arch/arm/mach-msm/qdsp6v3/rtac.h | 45 + arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c | 390 +++ arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c | 182 ++ arch/arm/mach-msm/qdsp6v3/snddev_icodec.c | 1189 +++++++ arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c | 472 +++ arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h | 46 + arch/arm/mach-msm/qdsp6v3/snddev_virtual.c | 172 + arch/arm/mach-msm/qdsp6v3/snddev_virtual.h | 20 + .../mach-msm/qdsp6v3/timpani_profile_8x60.h | 2269 +++++++++++++ .../qdsp6v3/timpani_profile_8x60_lead.h | 699 ++++ .../qdsp6v3/timpani_profile_8x60_vigor.h | 641 ++++ include/linux/msm_audio_mvs.h | 113 + 55 files changed, 31416 insertions(+), 12 deletions(-) create mode 100644 arch/arm/mach-msm/clock-8x60.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/apr.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/apr_audio.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/audio_dev_ctl.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/q6afe.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/q6asm.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/q6voice.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/snddev_ecodec.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/snddev_hdmi.h create mode 100644 arch/arm/mach-msm/include/mach/qdsp6v3/snddev_icodec.h create mode 100644 arch/arm/mach-msm/qdsp6v3/Makefile create mode 100644 arch/arm/mach-msm/qdsp6v3/aac_in.c create mode 100644 arch/arm/mach-msm/qdsp6v3/amrnb_in.c create mode 100644 arch/arm/mach-msm/qdsp6v3/apr.c create mode 100644 arch/arm/mach-msm/qdsp6v3/apr_tal.c create mode 100644 arch/arm/mach-msm/qdsp6v3/apr_tal.h create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_aac.c create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_acdb.c create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_acdb.h create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_lpa.c create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_lpa.h create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_mvs.c create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_utils.c create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_utils.h create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_wma.c create mode 100644 arch/arm/mach-msm/qdsp6v3/audio_wmapro.c create mode 100644 arch/arm/mach-msm/qdsp6v3/board-msm8x60-audio.c create mode 100644 arch/arm/mach-msm/qdsp6v3/dsp_debug.c create mode 100644 arch/arm/mach-msm/qdsp6v3/dsp_debug.h create mode 100644 arch/arm/mach-msm/qdsp6v3/evrc_in.c create mode 100644 arch/arm/mach-msm/qdsp6v3/fm.c create mode 100644 arch/arm/mach-msm/qdsp6v3/pcm_in.c create mode 100644 arch/arm/mach-msm/qdsp6v3/pcm_out.c create mode 100644 arch/arm/mach-msm/qdsp6v3/q6adm.c create mode 100644 arch/arm/mach-msm/qdsp6v3/q6adm.h create mode 100644 arch/arm/mach-msm/qdsp6v3/q6afe.c create mode 100644 arch/arm/mach-msm/qdsp6v3/q6asm.c create mode 100644 arch/arm/mach-msm/qdsp6v3/q6core.c create mode 100644 arch/arm/mach-msm/qdsp6v3/q6voice.c create mode 100644 arch/arm/mach-msm/qdsp6v3/qcelp_in.c create mode 100644 arch/arm/mach-msm/qdsp6v3/rtac.h create mode 100644 arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c create mode 100644 arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c create mode 100644 arch/arm/mach-msm/qdsp6v3/snddev_icodec.c create mode 100644 arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c create mode 100644 arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h create mode 100644 arch/arm/mach-msm/qdsp6v3/snddev_virtual.c create mode 100644 arch/arm/mach-msm/qdsp6v3/snddev_virtual.h create mode 100644 arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60.h create mode 100644 arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h create mode 100644 arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_vigor.h create mode 100644 include/linux/msm_audio_mvs.h diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index f90d9114..757e7012 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -169,7 +169,7 @@ obj-$(CONFIG_MSM7KV2_AUDIO) += qdsp5v2_2x/ obj-$(CONFIG_MSM7KV2_AUDIO) += htc_acoustic_7x30.o htc_acdb_7x30.o obj-$(CONFIG_MSM_QDSP6) += qdsp6/ -obj-$(CONFIG_MSM8X60_AUDIO) += qdsp6v2_1x/ +obj-$(CONFIG_MSM8X60_AUDIO) += qdsp6v3/ obj-$(CONFIG_MSM_AUDIO_QDSP6) += qdsp6v2/ obj-$(CONFIG_MSM_HW3D) += hw3d.o ifdef CONFIG_PM diff --git a/arch/arm/mach-msm/board-vigor-audio.c b/arch/arm/mach-msm/board-vigor-audio.c index beac20b8..15fc1c36 100644 --- a/arch/arm/mach-msm/board-vigor-audio.c +++ b/arch/arm/mach-msm/board-vigor-audio.c @@ -23,12 +23,12 @@ #include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include @@ -404,10 +404,6 @@ static struct dev_ctrl_ops dops = { .support_opendsp = vigor_support_opendsp, }; -static struct q6asm_ops qops = { - .get_q6_effect = vigor_get_q6_effect_mode, -}; - void __init vigor_audio_init(void) { int i = 0; @@ -419,7 +415,6 @@ void __init vigor_audio_init(void) htc_8x60_register_ecodec_ops(&eops); htc_8x60_register_icodec_ops(&iops); htc_8x60_register_dev_ctrl_ops(&dops); - htc_8x60_register_q6asm_ops(&qops); acoustic_register_ops(&acoustic); /* PMIC GPIO Init (See board-vigor.c) */ diff --git a/arch/arm/mach-msm/clock-8x60.h b/arch/arm/mach-msm/clock-8x60.h new file mode 100644 index 00000000..e9effae0 --- /dev/null +++ b/arch/arm/mach-msm/clock-8x60.h @@ -0,0 +1,253 @@ +/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_CLOCK_8X60_H +#define __ARCH_ARM_MACH_MSM_CLOCK_8X60_H + +#include "clock-local.h" + +enum { + /* Peripheral Clocks */ + L_GSBI1_UART_CLK, //0 + L_GSBI2_UART_CLK, + L_GSBI3_UART_CLK, + L_GSBI4_UART_CLK, + L_GSBI5_UART_CLK, + L_GSBI6_UART_CLK, + L_GSBI7_UART_CLK, + L_GSBI8_UART_CLK, + L_GSBI9_UART_CLK, + L_GSBI10_UART_CLK, + L_GSBI11_UART_CLK, //10 + L_GSBI12_UART_CLK, + L_GSBI1_QUP_CLK, + L_GSBI2_QUP_CLK, + L_GSBI3_QUP_CLK, + L_GSBI4_QUP_CLK, + L_GSBI5_QUP_CLK, + L_GSBI6_QUP_CLK, + L_GSBI7_QUP_CLK, + L_GSBI8_QUP_CLK, + L_GSBI9_QUP_CLK, //20 + L_GSBI10_QUP_CLK, + L_GSBI11_QUP_CLK, + L_GSBI12_QUP_CLK, + L_PDM_CLK, + L_PMEM_CLK, + L_PRNG_CLK, + L_SDC1_CLK, + L_SDC2_CLK, + L_SDC3_CLK, + L_SDC4_CLK, //30 + L_SDC5_CLK, + L_TSIF_REF_CLK, + L_TSSC_CLK, + L_USB_HS1_XCVR_CLK, + L_USB_PHY0_CLK, + L_USB_FS1_SRC_CLK, + L_USB_FS1_XCVR_CLK, + L_USB_FS1_SYS_CLK, + L_USB_FS2_SRC_CLK, + L_USB_FS2_XCVR_CLK, //40 + L_USB_FS2_SYS_CLK, + + /* HW-Voteable Clocks */ + L_ADM0_CLK, + L_ADM0_P_CLK, + L_ADM1_CLK, + L_ADM1_P_CLK, + L_MODEM_AHB1_P_CLK, + L_MODEM_AHB2_P_CLK, + L_PMIC_ARB0_P_CLK, + L_PMIC_ARB1_P_CLK, + L_PMIC_SSBI2_CLK, //50 + L_RPM_MSG_RAM_P_CLK, + + /* Fast Peripheral Bus Clocks */ + L_CE2_P_CLK, + L_GSBI1_P_CLK, + L_GSBI2_P_CLK, + L_GSBI3_P_CLK, + L_GSBI4_P_CLK, + L_GSBI5_P_CLK, + L_GSBI6_P_CLK, + L_GSBI7_P_CLK, + L_GSBI8_P_CLK, //60 + L_GSBI9_P_CLK, + L_GSBI10_P_CLK, + L_GSBI11_P_CLK, + L_GSBI12_P_CLK, + L_PPSS_P_CLK, + L_TSIF_P_CLK, + L_USB_FS1_P_CLK, + L_USB_FS2_P_CLK, + L_USB_HS1_P_CLK, + L_SDC1_P_CLK, //70 + L_SDC2_P_CLK, + L_SDC3_P_CLK, + L_SDC4_P_CLK, + L_SDC5_P_CLK, + + /* Multimedia Clocks */ + L_AMP_CLK, + L_CAM_CLK, + L_CSI_SRC_CLK, + L_CSI0_CLK, + L_CSI1_CLK, + L_DSI_BYTE_CLK, //80 + L_DSI_ESC_CLK, + L_GFX2D0_CLK, + L_GFX2D1_CLK, + L_GFX3D_CLK, + L_IJPEG_CLK, + L_JPEGD_CLK, + L_MDP_CLK, + L_MDP_VSYNC_CLK, + L_PIXEL_SRC_CLK, + L_PIXEL_MDP_CLK, //90 + L_PIXEL_LCDC_CLK, + L_ROT_CLK, + L_TV_SRC_CLK, + L_TV_ENC_CLK, + L_TV_DAC_CLK, + L_VCODEC_CLK, + L_MDP_TV_CLK, + L_HDMI_TV_CLK, + L_HDMI_APP_CLK, + L_VPE_CLK, //100 + L_VFE_CLK, + L_CSI0_VFE_CLK, + L_CSI1_VFE_CLK, + L_GMEM_AXI_CLK, + L_IJPEG_AXI_CLK, + L_IMEM_AXI_CLK, + L_JPEGD_AXI_CLK, + L_VCODEC_AXI_CLK, + L_VFE_AXI_CLK, + L_MDP_AXI_CLK, //110 + L_ROT_AXI_CLK, + L_VPE_AXI_CLK, + + /* Multimedia Fast Peripheral Bus Clocks */ + L_AMP_P_CLK, + L_CSI0_P_CLK, + L_CSI1_P_CLK, + L_DSI_M_P_CLK, + L_DSI_S_P_CLK, + L_GFX2D0_P_CLK, + L_GFX2D1_P_CLK, + L_GFX3D_P_CLK, //120 + L_HDMI_M_P_CLK, + L_HDMI_S_P_CLK, + L_IJPEG_P_CLK, + L_IMEM_P_CLK, + L_JPEGD_P_CLK, + L_MDP_P_CLK, + L_ROT_P_CLK, + L_SMMU_P_CLK, + L_TV_ENC_P_CLK, + L_VCODEC_P_CLK, //130 + L_VFE_P_CLK, + L_VPE_P_CLK, + + /* LPA Clocks */ + L_MI2S_SRC_CLK, + L_MI2S_OSR_CLK, + L_MI2S_BIT_CLK, + L_CODEC_I2S_MIC_OSR_CLK, + L_CODEC_I2S_MIC_BIT_CLK, + L_SPARE_I2S_MIC_OSR_CLK, + L_SPARE_I2S_MIC_BIT_CLK, + L_CODEC_I2S_SPKR_OSR_CLK, //140 + L_CODEC_I2S_SPKR_BIT_CLK, + L_SPARE_I2S_SPKR_OSR_CLK, + L_SPARE_I2S_SPKR_BIT_CLK, + L_PCM_CLK, + + /* Measurement-only Clocks */ + L_SC0_DIV2_M_CLK, + L_SC1_DIV2_M_CLK, + L_L2_DIV2_M_CLK, + L_AFAB_M_CLK, + L_SFAB_M_CLK, + L_EBI1_2X_M_CLK, + L_CFPB0_M_CLK, + L_CFPB1_M_CLK, + L_CFPB2_M_CLK, + L_DFAB_M_CLK, + L_SFPB_M_CLK, + L_MMFAB_M_CLK, + L_SMI_DDR2X_M_CLK, + L_MMFPB_M_CLK, + + L_NR_CLKS //145 +}; + +enum clk_sources { + PLL_0 = 0, + PLL_1, + PLL_2, + PLL_3, + PLL_4, + PLL_6, + PLL_7, + PLL_8, + PXO, + CXO, + NUM_SRC +}; + +/*extern struct clk_local soc_clk_local_tbl_mxo[];*/ + +struct pll_rate { + const uint32_t l_val; + const uint32_t m_val; + const uint32_t n_val; + const uint32_t vco; + const uint32_t post_div; + const uint32_t i_bits; +}; +#define PLL_RATE(l, m, n, v, d, i) { l, m, n, v, (d>>1), i } + +extern struct clk_ops soc_clk_ops_8x60; +#define CLK_8X60(clk_name, clk_id, clk_dev, clk_flags) { \ + .con_id = clk_name, \ + .dev_id = clk_dev, \ + .clk = &(struct clk){ \ + .id = L_##clk_id, \ + .ops = &soc_clk_ops_8x60, \ + .flags = clk_flags, \ + .dbg_name = #clk_id, \ + .name = clk_name, \ + }, \ + } + +void soc_clk_src_votes_show(void); + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/apr.h b/arch/arm/mach-msm/include/mach/qdsp6v3/apr.h new file mode 100644 index 00000000..19f1860a --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/apr.h @@ -0,0 +1,189 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __APR_H_ +#define __APR_H_ + +#define APR_Q6_NOIMG 0 +#define APR_Q6_LOADING 1 +#define APR_Q6_LOADED 2 + +struct apr_q6 { + void *pil; + uint32_t state; + struct mutex lock; +}; + +struct apr_hdr { + uint16_t hdr_field; + uint16_t pkt_size; + uint8_t src_svc; + uint8_t src_domain; + uint16_t src_port; + uint8_t dest_svc; + uint8_t dest_domain; + uint16_t dest_port; + uint32_t token; + uint32_t opcode; +}; + +#define APR_HDR_LEN(hdr_len) ((hdr_len)/4) +#define APR_PKT_SIZE(hdr_len, payload_len) ((hdr_len) + (payload_len)) +#define APR_HDR_FIELD(msg_type, hdr_len, ver)\ + (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF)) + +#define APR_HDR_SIZE sizeof(struct apr_hdr) + +/* Version */ +#define APR_PKT_VER 0x0 + +/* Command and Response Types */ +#define APR_MSG_TYPE_EVENT 0x0 +#define APR_MSG_TYPE_CMD_RSP 0x1 +#define APR_MSG_TYPE_SEQ_CMD 0x2 +#define APR_MSG_TYPE_NSEQ_CMD 0x3 +#define APR_MSG_TYPE_MAX 0x04 + +/* APR Basic Response Message */ +#define APR_BASIC_RSP_RESULT 0x000110E8 +#define APR_RSP_ACCEPTED 0x000100BE + +/* Domain IDs */ +#define APR_DOMAIN_SIM 0x1 +#define APR_DOMAIN_PC 0x2 +#define APR_DOMAIN_MODEM 0x3 +#define APR_DOMAIN_ADSP 0x4 +#define APR_DOMAIN_APPS 0x5 +#define APR_DOMAIN_MAX 0x6 + +/* ADSP service IDs */ +#define APR_SVC_TEST_CLIENT 0x2 +#define APR_SVC_ADSP_CORE 0x3 +#define APR_SVC_AFE 0x4 +#define APR_SVC_VSM 0x5 +#define APR_SVC_VPM 0x6 +#define APR_SVC_ASM 0x7 +#define APR_SVC_ADM 0x8 +#define APR_SVC_ADSP_MVM 0x09 +#define APR_SVC_ADSP_CVS 0x0A +#define APR_SVC_ADSP_CVP 0x0B +#define APR_SVC_MAX 0x0C + +/* Modem Service IDs */ +#define APR_SVC_MVS 0x3 +#define APR_SVC_MVM 0x4 +#define APR_SVC_CVS 0x5 +#define APR_SVC_CVP 0x6 +#define APR_SVC_SRD 0x7 + +/* APR Port IDs */ +#define APR_MAX_PORTS 0x40 + +#define APR_NAME_MAX 0x40 + +#define RESET_EVENTS 0xFFFFFFFF + +#define LPASS_RESTART_EVENT 0x1000 +#define LPASS_RESTART_READY 0x1001 + +struct apr_client_data { + uint16_t reset_event; + uint16_t reset_proc; + uint16_t payload_size; + uint16_t hdr_len; + uint16_t msg_type; + uint16_t src; + uint16_t dest_svc; + uint16_t src_port; + uint16_t dest_port; + uint32_t token; + uint32_t opcode; + void *payload; +}; + +typedef int32_t (*apr_fn)(struct apr_client_data *data, void *priv); + +struct apr_svc { + uint16_t id; + uint16_t dest_id; + uint16_t client_id; + uint8_t rvd; + uint8_t port_cnt; + uint8_t svc_cnt; + uint8_t need_reset; + apr_fn port_fn[APR_MAX_PORTS]; + void *port_priv[APR_MAX_PORTS]; + apr_fn fn; + void *priv; + struct mutex m_lock; + spinlock_t w_lock; +}; + +struct apr_client { + uint8_t id; + uint8_t svc_cnt; + uint8_t rvd; + struct mutex m_lock; + struct apr_svc_ch_dev *handle; + struct apr_svc svc[APR_SVC_MAX]; +}; + +#define ADSP_GET_VERSION 0x00011152 +#define ADSP_GET_VERSION_RSP 0x00011153 + +struct adsp_get_version { + uint32_t build_id; + uint32_t svc_cnt; +}; + +struct adsp_service_info { + uint32_t svc_id; + uint32_t svc_ver; +}; + +#define ADSP_CMD_SET_POWER_COLLAPSE_STATE 0x0001115C +struct adsp_power_collapse { + struct apr_hdr hdr; + uint32_t power_collapse; +}; + +struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, + uint32_t src_port, void *priv); +inline int apr_fill_hdr(void *handle, uint32_t *buf, uint16_t src_port, + uint16_t msg_type, uint16_t dest_port, + uint32_t token, uint32_t opcode, uint16_t len); + +int apr_send_pkt(void *handle, uint32_t *buf); +int apr_deregister(void *handle); +void change_q6_state(int state); +void q6audio_dsp_not_responding(void); +uint32_t core_get_adsp_version(void); +void *core_open(void); +int32_t core_close(void); +void apr_reset(void *handle); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/apr_audio.h b/arch/arm/mach-msm/include/mach/qdsp6v3/apr_audio.h new file mode 100644 index 00000000..a6657cff --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/apr_audio.h @@ -0,0 +1,1012 @@ +/* + * + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef _APR_AUDIO_H_ +#define _APR_AUDIO_H_ + +/* ASM opcodes without APR payloads*/ +#include + +/* + * Audio Front End (AFE) + */ + +/* Port ID. Update afe_get_port_index when a new port is added here. */ +#define PRIMARY_I2S_RX 0 /* index = 0 */ +#define PRIMARY_I2S_TX 1 /* index = 1 */ +#define PCM_RX 2 /* index = 2 */ +#define PCM_TX 3 /* index = 3 */ +#define SECONDARY_I2S_RX 4 /* index = 4 */ +#define SECONDARY_I2S_TX 5 /* index = 5 */ +#define MI2S_RX 6 /* index = 6 */ +#define MI2S_TX 7 /* index = 7 */ +#define HDMI_RX 8 /* index = 8 */ +#define RSVD_2 9 /* index = 9 */ +#define RSVD_3 10 /* index = 10 */ +#define DIGI_MIC_TX 11 /* index = 11 */ +#define VOICE_RECORD_RX 0x8003 /* index = 12 */ +#define VOICE_RECORD_TX 0x8004 /* index = 13 */ +#define VOICE_PLAYBACK_TX 0x8005 /* index = 14 */ +#define AFE_PORT_INVALID 0xFFFF + +#define AFE_PORT_CMD_START 0x000100ca +struct afe_port_start_command { + struct apr_hdr hdr; + u16 port_id; + u16 gain; /* Q13 */ + u32 sample_rate; /* 8 , 16, 48khz */ +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_STOP 0x000100cb +struct afe_port_stop_command { + struct apr_hdr hdr; + u16 port_id; + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_APPLY_GAIN 0x000100cc +struct afe_port_gain_command { + struct apr_hdr hdr; + u16 port_id; + u16 gain;/* Q13 */ +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_SIDETONE_CTL 0x000100cd +struct afe_port_sidetone_command { + struct apr_hdr hdr; + u16 rx_port_id; /* Primary i2s tx = 1 */ + /* PCM tx = 3 */ + /* Secondary i2s tx = 5 */ + /* Mi2s tx = 7 */ + /* Digital mic tx = 11 */ + u16 tx_port_id; /* Primary i2s rx = 0 */ + /* PCM rx = 2 */ + /* Secondary i2s rx = 4 */ + /* Mi2S rx = 6 */ + /* HDMI rx = 8 */ + u16 gain; /* Q13 */ + u16 enable; /* 1 = enable, 0 = disable */ +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_LOOPBACK 0x000100ce +struct afe_loopback_command { + struct apr_hdr hdr; + u16 tx_port_id; /* Primary i2s rx = 0 */ + /* PCM rx = 2 */ + /* Secondary i2s rx = 4 */ + /* Mi2S rx = 6 */ + /* HDMI rx = 8 */ + u16 rx_port_id; /* Primary i2s tx = 1 */ + /* PCM tx = 3 */ + /* Secondary i2s tx = 5 */ + /* Mi2s tx = 7 */ + /* Digital mic tx = 11 */ + u16 mode; /* Default -1, DSP will conver + the tx to rx format */ + u16 enable; /* 1 = enable, 0 = disable */ +} __attribute__ ((packed)); + +#define AFE_PSEUDOPORT_CMD_START 0x000100cf +struct afe_pseudoport_start_command { + struct apr_hdr hdr; + u16 port_id; /* Pseudo Port 1 = 0x8000 */ + /* Pseudo Port 2 = 0x8001 */ + /* Pseudo Port 3 = 0x8002 */ + u16 timing; /* FTRT = 0 , AVTimer = 1, */ +} __attribute__ ((packed)); + +#define AFE_PSEUDOPORT_CMD_STOP 0x000100d0 +struct afe_pseudoport_stop_command { + struct apr_hdr hdr; + u16 port_id; /* Pseudo Port 1 = 0x8000 */ + /* Pseudo Port 2 = 0x8001 */ + /* Pseudo Port 3 = 0x8002 */ + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_CMD_GET_ACTIVE_PORTS 0x000100d1 + + +#define AFE_CMD_GET_ACTIVE_HANDLES_FOR_PORT 0x000100d2 +struct afe_get_active_handles_command { + struct apr_hdr hdr; + u16 port_id; + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_PCM_CFG_MODE_PCM 0x0 +#define AFE_PCM_CFG_MODE_AUX 0x1 +#define AFE_PCM_CFG_SYNC_EXT 0x0 +#define AFE_PCM_CFG_SYNC_INT 0x1 +#define AFE_PCM_CFG_FRM_8BPF 0x0 +#define AFE_PCM_CFG_FRM_16BPF 0x1 +#define AFE_PCM_CFG_FRM_32BPF 0x2 +#define AFE_PCM_CFG_FRM_64BPF 0x3 +#define AFE_PCM_CFG_FRM_128BPF 0x4 +#define AFE_PCM_CFG_FRM_256BPF 0x5 +#define AFE_PCM_CFG_QUANT_ALAW_NOPAD 0x0 +#define AFE_PCM_CFG_QUANT_MULAW_NOPAD 0x1 +#define AFE_PCM_CFG_QUANT_LINEAR_NOPAD 0x2 +#define AFE_PCM_CFG_QUANT_ALAW_PAD 0x3 +#define AFE_PCM_CFG_QUANT_MULAW_PAD 0x4 +#define AFE_PCM_CFG_QUANT_LINEAR_PAD 0x5 +#define AFE_PCM_CFG_CDATAOE_MASTER 0x0 +#define AFE_PCM_CFG_CDATAOE_SHARE 0x1 + +struct afe_port_pcm_cfg { + u16 mode; /* PCM (short sync) = 0, AUXPCM (long sync) = 1 */ + u16 sync; /* external = 0 , internal = 1 */ + u16 frame; /* 8 bpf = 0 */ + /* 16 bpf = 1 */ + /* 32 bpf = 2 */ + /* 64 bpf = 3 */ + /* 128 bpf = 4 */ + /* 256 bpf = 5 */ + u16 quant; + u16 slot; /* Slot for PCM stream , 0 - 31 */ + u16 data; /* 0, PCM block is the only master */ + /* 1, PCM block is shares to driver data out signal */ + /* other master */ + u16 reserved; +} __attribute__ ((packed)); + +enum { + AFE_I2S_SD0 = 1, + AFE_I2S_SD1, + AFE_I2S_SD2, + AFE_I2S_SD3, + AFE_I2S_QUAD01, + AFE_I2S_QUAD23, + AFE_I2S_6CHS, + AFE_I2S_8CHS, +}; + +#define AFE_MI2S_MONO 0 +#define AFE_MI2S_STEREO 3 +#define AFE_MI2S_4CHANNELS 4 +#define AFE_MI2S_6CHANNELS 6 +#define AFE_MI2S_8CHANNELS 8 + +struct afe_port_mi2s_cfg { + u16 bitwidth; /* 16,24,32 */ + u16 line; /* Called ChannelMode in documentation */ + /* i2s_sd0 = 1 */ + /* i2s_sd1 = 2 */ + /* i2s_sd2 = 3 */ + /* i2s_sd3 = 4 */ + /* i2s_quad01 = 5 */ + /* i2s_quad23 = 6 */ + /* i2s_6chs = 7 */ + /* i2s_8chs = 8 */ + u16 channel; /* Called MonoStereo in documentation */ + /* i2s mono = 0 */ + /* i2s mono right = 1 */ + /* i2s mono left = 2 */ + /* i2s stereo = 3 */ + u16 ws; /* 0, word select signal from external source */ + /* 1, word select signal from internal source */ + u16 reserved; +} __attribute__ ((packed)); + +struct afe_port_hdmi_cfg { + u16 bitwidth; /* 16,24,32 */ + u16 channel_mode; /* HDMI Stereo = 0 */ + /* HDMI_3Point1 (4-ch) = 1 */ + /* HDMI_5Point1 (6-ch) = 2 */ + /* HDMI_6Point1 (8-ch) = 3 */ + u16 data_type; /* HDMI_Linear = 0 */ + /* HDMI_non_Linaer = 1 */ +} __attribute__ ((packed)); + +#define AFE_PORT_AUDIO_IF_CONFIG 0x000100d3 + +union afe_port_config { + struct afe_port_pcm_cfg pcm; + struct afe_port_mi2s_cfg mi2s; + struct afe_port_hdmi_cfg hdmi; +} __attribute__((packed)); + +struct afe_audioif_config_command { + struct apr_hdr hdr; + u16 port_id; + union afe_port_config port; +} __attribute__ ((packed)); + +#define AFE_TEST_CODEC_LOOPBACK_CTL 0x000100d5 +struct afe_codec_loopback_command { + u16 port_inf; /* Primary i2s = 0 */ + /* PCM = 2 */ + /* Secondary i2s = 4 */ + /* Mi2s = 6 */ + u16 enable; /* 0, disable. 1, enable */ +} __attribute__ ((packed)); + + +#define AFE_PARAM_ID_SIDETONE_GAIN 0x00010300 +struct afe_param_sidetone_gain { + u16 gain; + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_PARAM_ID_SAMPLING_RATE 0x00010301 +struct afe_param_sampling_rate { + u32 sampling_rate; +} __attribute__ ((packed)); + + +#define AFE_PARAM_ID_CHANNELS 0x00010302 +struct afe_param_channels { + u16 channels; + u16 reserved; +} __attribute__ ((packed)); + + +#define AFE_PARAM_ID_LOOPBACK_GAIN 0x00010303 +struct afe_param_loopback_gain { + u16 gain; + u16 reserved; +} __attribute__ ((packed)); + + +#define AFE_MODULE_ID_PORT_INFO 0x00010200 +struct afe_param_payload { + u32 module_id; + u32 param_id; + u16 param_size; + u16 reserved; + union { + struct afe_param_sidetone_gain sidetone_gain; + struct afe_param_sampling_rate sampling_rate; + struct afe_param_channels channels; + struct afe_param_loopback_gain loopback_gain; + } __attribute__((packed)) param; +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_SET_PARAM 0x000100dc + +struct afe_port_cmd_set_param { + struct apr_hdr hdr; + u16 port_id; + u16 payload_size; + u32 payload_address; + struct afe_param_payload payload; +} __attribute__ ((packed)); + + +#define AFE_EVENT_GET_ACTIVE_PORTS 0x00010100 +struct afe_get_active_ports_rsp { + u16 num_ports; + u16 port_id; +} __attribute__ ((packed)); + + +#define AFE_EVENT_GET_ACTIVE_HANDLES 0x00010102 +struct afe_get_active_handles_rsp { + u16 port_id; + u16 num_handles; + u16 mode; /* 0, voice rx */ + /* 1, voice tx */ + /* 2, audio rx */ + /* 3, audio tx */ + u16 handle; +} __attribute__ ((packed)); + +#define ADM_MAX_COPPS 5 + +#define ADM_SERVICE_CMD_GET_COPP_HANDLES 0x00010300 +struct adm_get_copp_handles_command { + struct apr_hdr hdr; +} __attribute__ ((packed)); + +#define ADM_CMD_MATRIX_MAP_ROUTINGS 0x00010301 +struct adm_routings_session { + u16 id; + u16 num_copps; + u16 copp_id[ADM_MAX_COPPS+1]; /*Padding if numCopps is odd */ +} __packed; + +struct adm_routings_command { + struct apr_hdr hdr; + u32 path; /* 0 = Rx, 1 Tx */ + u32 num_sessions; + struct adm_routings_session session[8]; +} __attribute__ ((packed)); + + +#define ADM_CMD_MATRIX_RAMP_GAINS 0x00010302 +struct adm_ramp_gain { + struct apr_hdr hdr; + u16 session_id; + u16 copp_id; + u16 initial_gain; + u16 gain_increment; + u16 ramp_duration; + u16 reserved; +} __attribute__ ((packed)); + +struct adm_ramp_gains_command { + struct apr_hdr hdr; + u32 id; + u32 num_gains; + struct adm_ramp_gain gains[ADM_MAX_COPPS]; +} __attribute__ ((packed)); + + +#define ADM_CMD_COPP_OPEN 0x00010304 +struct adm_copp_open_command { + struct apr_hdr hdr; + u16 flags; + u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */ + u16 endpoint_id1; + u16 endpoint_id2; + u32 topology_id; + u16 channel_config; + u16 reserved; + u32 rate; +} __attribute__ ((packed)); + +#define ADM_CMD_COPP_CLOSE 0x00010305 + +#define ADM_CMD_MEMORY_MAP 0x00010C30 +struct adm_cmd_memory_map{ + struct apr_hdr hdr; + u32 buf_add; + u32 buf_size; + u16 mempool_id; + u16 reserved; +} __attribute__((packed)); + +#define ADM_CMD_MEMORY_UNMAP 0x00010C31 +struct adm_cmd_memory_unmap{ + struct apr_hdr hdr; + u32 buf_add; +} __attribute__((packed)); + +#define ADM_CMD_MEMORY_MAP_REGIONS 0x00010C47 +struct adm_memory_map_regions{ + u32 phys; + u32 buf_size; +} __attribute__((packed)); + +struct adm_cmd_memory_map_regions{ + struct apr_hdr hdr; + u16 mempool_id; + u16 nregions; +} __attribute__((packed)); + +#define ADM_CMD_MEMORY_UNMAP_REGIONS 0x00010C48 +struct adm_memory_unmap_regions{ + u32 phys; +} __attribute__((packed)); + +struct adm_cmd_memory_unmap_regions{ + struct apr_hdr hdr; + u16 nregions; + u16 reserved; +} __attribute__((packed)); + +#define DEFAULT_COPP_TOPOLOGY 0x00010be3 +#define DEFAULT_POPP_TOPOLOGY 0x00010be4 +#define VPM_TX_SM_ECNS_COPP_TOPOLOGY 0x00010F71 +#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY 0x00010F72 +#define HTC_STEREO_RECORD_TOPOLOGY 0x10000000 +#define HTC_COPP_TOPOLOGY 0x10000001 + +#define ASM_MAX_EQ_BANDS 12 + +struct asm_eq_band { + u32 band_idx; /* The band index, 0 .. 11 */ + u32 filter_type; /* Filter band type */ + u32 center_freq_hz; /* Filter band center frequency */ + u32 filter_gain; /* Filter band initial gain (dB) */ + /* Range is +12 dB to -12 dB with 1dB increments. */ + u32 q_factor; +} __attribute__ ((packed)); + +struct asm_equalizer_params { + u32 enable; + u32 num_bands; + struct asm_eq_band eq_bands[ASM_MAX_EQ_BANDS]; +} __attribute__ ((packed)); + +struct asm_master_gain_params { + u16 master_gain; + u16 padding; +} __attribute__ ((packed)); + +struct asm_lrchannel_gain_params { + u16 left_gain; + u16 right_gain; +} __attribute__ ((packed)); + +struct asm_mute_params { + u32 muteflag; +} __attribute__ ((packed)); + +struct asm_softvolume_params { + u32 period; + u32 step; + u32 rampingcurve; +} __attribute__ ((packed)); + +struct asm_softpause_params { + u32 enable; + u32 period; + u32 step; + u32 rampingcurve; +} __packed; + +struct asm_pp_param_data_hdr { + u32 module_id; + u32 param_id; + u16 param_size; + u16 reserved; +} __attribute__ ((packed)); + +struct asm_pp_params_command { + struct apr_hdr hdr; + u32 *payload; + u32 payload_size; + struct asm_pp_param_data_hdr params; +} __attribute__ ((packed)); + +#define EQUALIZER_MODULE_ID 0x00010c27 +#define EQUALIZER_PARAM_ID 0x00010c28 + +#define VOLUME_CONTROL_MODULE_ID 0x00010bfe +#define MASTER_GAIN_PARAM_ID 0x00010bff +#define L_R_CHANNEL_GAIN_PARAM_ID 0x00010c00 +#define MUTE_CONFIG_PARAM_ID 0x00010c01 +#define SOFT_PAUSE_PARAM_ID 0x00010D6A + +#define IIR_FILTER_ENABLE_PARAM_ID 0x00010c03 +#define IIR_FILTER_PREGAIN_PARAM_ID 0x00010c04 +#define IIR_FILTER_CONFIG_PARAM_ID 0x00010c05 + +#define MBADRC_MODULE_ID 0x00010c06 +#define MBADRC_ENABLE_PARAM_ID 0x00010c07 +#define MBADRC_CONFIG_PARAM_ID 0x00010c08 + + +#define ADM_CMD_SET_PARAMS 0x00010306 +#define ADM_CMD_GET_PARAMS 0x0001030B +#define ADM_CMDRSP_GET_PARAMS 0x0001030C +struct adm_set_params_command { + struct apr_hdr hdr; + u32 payload; + u32 payload_size; +} __attribute__ ((packed)); + + +#define ADM_CMD_TAP_COPP_PCM 0x00010307 +struct adm_tap_copp_pcm_command { + struct apr_hdr hdr; +} __attribute__ ((packed)); + + +/* QDSP6 to Client messages +*/ +#define ADM_SERVICE_CMDRSP_GET_COPP_HANDLES 0x00010308 +struct adm_get_copp_handles_respond { + struct apr_hdr hdr; + u32 handles; + u32 copp_id; +} __attribute__ ((packed)); + +#define ADM_CMDRSP_COPP_OPEN 0x0001030A +struct adm_copp_open_respond { + u32 status; + u16 copp_id; + u16 reserved; +} __attribute__ ((packed)); + +#define ASM_STREAM_PRIORITY_NORMAL 0 +#define ASM_STREAM_PRIORITY_LOW 1 +#define ASM_STREAM_PRIORITY_HIGH 2 +#define ASM_STREAM_PRIORITY_RESERVED 3 + +#define ASM_END_POINT_DEVICE_MATRIX 0 +#define ASM_END_POINT_STREAM 1 + +#define AAC_ENC_MODE_AAC_LC 0x02 +#define AAC_ENC_MODE_AAC_P 0x05 +#define AAC_ENC_MODE_EAAC_P 0x1D + +#define ASM_STREAM_CMD_CLOSE 0x00010BCD +#define ASM_STREAM_CMD_FLUSH 0x00010BCE +#define ASM_STREAM_CMD_SET_PP_PARAMS 0x00010BCF +#define ASM_STREAM_CMD_GET_PP_PARAMS 0x00010BD0 +#define ASM_STREAM_CMDRSP_GET_PP_PARAMS 0x00010BD1 +#define ASM_SESSION_CMD_PAUSE 0x00010BD3 +#define ASM_SESSION_CMD_GET_SESSION_TIME 0x00010BD4 +#define ASM_DATA_CMD_EOS 0x00010BDB +#define ASM_DATA_EVENT_EOS 0x00010BDD + +#define ASM_SERVICE_CMD_GET_STREAM_HANDLES 0x00010C0B +#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09 + +#define ASM_SESSION_EVENT_RX_UNDERFLOW 0x00010C17 +#define ASM_SESSION_EVENT_TX_OVERFLOW 0x00010C18 +#define ASM_SERVICE_CMD_GET_WALLCLOCK_TIME 0x00010C19 +#define ASM_DATA_CMDRSP_EOS 0x00010C1C + +/* ASM Data structures */ + +/* common declarations */ +struct asm_pcm_cfg { + u16 ch_cfg; + u16 bits_per_sample; + u32 sample_rate; + u16 is_signed; + u16 interleaved; +}; + +struct asm_adpcm_cfg { + u16 ch_cfg; + u16 bits_per_sample; + u32 sample_rate; + u32 block_size; +}; + +struct asm_yadpcm_cfg { + u16 ch_cfg; + u16 bits_per_sample; + u32 sample_rate; +}; + +struct asm_midi_cfg { + u32 nMode; +}; + +struct asm_wma_cfg { + u16 format_tag; + u16 ch_cfg; + u32 sample_rate; + u32 avg_bytes_per_sec; + u16 block_align; + u16 valid_bits_per_sample; + u32 ch_mask; + u16 encode_opt; + u16 adv_encode_opt; + u32 adv_encode_opt2; + u32 drc_peak_ref; + u32 drc_peak_target; + u32 drc_ave_ref; + u32 drc_ave_target; +}; + +struct asm_wmapro_cfg { + u16 format_tag; + u16 ch_cfg; + u32 sample_rate; + u32 avg_bytes_per_sec; + u16 block_align; + u16 valid_bits_per_sample; + u32 ch_mask; + u16 encode_opt; + u16 adv_encode_opt; + u32 adv_encode_opt2; + u32 drc_peak_ref; + u32 drc_peak_target; + u32 drc_ave_ref; + u32 drc_ave_target; +}; + +struct asm_aac_cfg { + u16 format; + u16 aot; + u16 ep_config; + u16 section_data_resilience; + u16 scalefactor_data_resilience; + u16 spectral_data_resilience; + u16 ch_cfg; + u16 reserved; + u32 sample_rate; +}; + +struct asm_flac_cfg { + u16 stream_info_present; + u16 min_blk_size; + u16 max_blk_size; + u16 ch_cfg; + u16 sample_size; + u16 sample_rate; + u16 md5_sum; + u32 ext_sample_rate; + u32 min_frame_size; + u32 max_frame_size; +}; + +struct asm_vorbis_cfg { + u32 ch_cfg; + u32 bit_rate; + u32 min_bit_rate; + u32 max_bit_rate; + u16 bit_depth_pcm_sample; + u16 bit_stream_format; +}; + +struct asm_aac_read_cfg { + u32 bitrate; + u32 enc_mode; + u16 format; + u16 ch_cfg; + u32 sample_rate; +}; + +struct asm_amrnb_read_cfg { + u16 mode; + u16 dtx_mode; +}; + +struct asm_evrc_read_cfg { + u16 max_rate; + u16 min_rate; + u16 rate_modulation_cmd; + u16 reserved; +}; + +struct asm_qcelp13_read_cfg { + u16 max_rate; + u16 min_rate; + u16 reduced_rate_level; + u16 rate_modulation_cmd; +}; + +struct asm_sbc_read_cfg { + u32 subband; + u32 block_len; + u32 ch_mode; + u32 alloc_method; + u32 bit_rate; + u32 sample_rate; +}; + +struct asm_sbc_bitrate { + u32 bitrate; +}; + +struct asm_immed_decode { + u32 mode; +}; + +struct asm_sbr_ps { + u32 enable; +}; + +struct asm_encode_cfg_blk { + u32 frames_per_buf; + u32 format_id; + u32 cfg_size; + union { + struct asm_pcm_cfg pcm; + struct asm_aac_read_cfg aac; + struct asm_amrnb_read_cfg amrnb; + struct asm_evrc_read_cfg evrc; + struct asm_qcelp13_read_cfg qcelp13; + struct asm_sbc_read_cfg sbc; + } __attribute__((packed)) cfg; +}; + +struct asm_frame_meta_info { + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +}; + +/* Stream level commands */ +#define ASM_STREAM_CMD_OPEN_READ 0x00010BCB +struct asm_stream_cmd_open_read { + struct apr_hdr hdr; + u32 uMode; + u32 src_endpoint; + u32 pre_proc_top; + u32 format; +} __attribute__((packed)); + +/* Supported formats */ +#define LINEAR_PCM 0x00010BE5 +#define DTMF 0x00010BE6 +#define ADPCM 0x00010BE7 +#define YADPCM 0x00010BE8 +#define MP3 0x00010BE9 +#define MPEG4_AAC 0x00010BEA +#define AMRNB_FS 0x00010BEB +#define V13K_FS 0x00010BED +#define EVRC_FS 0x00010BEE +#define EVRCB_FS 0x00010BEF +#define EVRCWB_FS 0x00010BF0 +#define MIDI 0x00010BF1 +#define SBC 0x00010BF2 +#define WMA_V10PRO 0x00010BF3 +#define WMA_V9 0x00010BF4 +#define AMR_WB_PLUS 0x00010BF5 +#define AC3_DECODER 0x00010BF6 +#define G711_ALAW_FS 0x00010BF7 +#define G711_MLAW_FS 0x00010BF8 +#define G711_PCM_FS 0x00010BF9 + +#define ASM_ENCDEC_SBCRATE 0x00010C13 +#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14 +#define ASM_ENCDEC_CFG_BLK 0x00010C2C + +#define ASM_ENCDEC_SBCRATE 0x00010C13 +#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14 +#define ASM_ENCDEC_CFG_BLK 0x00010C2C + +#define ASM_STREAM_CMD_OPEN_WRITE 0x00010BCA +struct asm_stream_cmd_open_write { + struct apr_hdr hdr; + u32 uMode; + u16 sink_endpoint; + u16 stream_handle; + u32 post_proc_top; + u32 format; +} __attribute__((packed)); + +#define ASM_STREAM_CMD_OPEN_READWRITE 0x00010BCC + +struct asm_stream_cmd_open_read_write { + struct apr_hdr hdr; + u32 uMode; + u32 post_proc_top; + u32 write_format; + u32 read_format; +} __attribute__((packed)); + +#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10 +#define ASM_STREAM_CMD_GET_ENCDEC_PARAM 0x00010C11 +#define ASM_ENCDEC_CFG_BLK_ID 0x00010C2C +#define ASM_ENABLE_SBR_PS 0x00010C63 +struct asm_stream_cmd_encdec_cfg_blk{ + struct apr_hdr hdr; + u32 param_id; + u32 param_size; + struct asm_encode_cfg_blk enc_blk; +} __attribute__((packed)); + +struct asm_stream_cmd_encdec_sbc_bitrate{ + struct apr_hdr hdr; + u32 param_id; + struct asm_sbc_bitrate sbc_bitrate; +} __attribute__((packed)); + +struct asm_stream_cmd_encdec_immed_decode{ + struct apr_hdr hdr; + u32 param_id; + u32 param_size; + struct asm_immed_decode dec; +} __attribute__((packed)); + +struct asm_stream_cmd_encdec_sbr{ + struct apr_hdr hdr; + u32 param_id; + u32 param_size; + struct asm_sbr_ps sbr_ps; +} __attribute__((packed)); + +#define ASM_STREAM _CMD_ADJUST_SAMPLES 0x00010C0A +struct asm_stream_cmd_adjust_samples{ + struct apr_hdr hdr; + u16 nsamples; + u16 reserved; +} __attribute__((packed)); + +#define ASM_STREAM_CMD_TAP_POPP_PCM 0x00010BF9 +struct asm_stream_cmd_tap_popp_pcm{ + struct apr_hdr hdr; + u16 enable; + u16 reserved; + u32 module_id; +} __attribute__((packed)); + +/* Session Level commands */ +#define ASM_SESSION_CMD_MEMORY_MAP 0x00010C32 +struct asm_stream_cmd_memory_map{ + struct apr_hdr hdr; + u32 buf_add; + u32 buf_size; + u16 mempool_id; + u16 reserved; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_MEMORY_UNMAP 0x00010C33 +struct asm_stream_cmd_memory_unmap{ + struct apr_hdr hdr; + u32 buf_add; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_MEMORY_MAP_REGIONS 0x00010C45 +struct asm_memory_map_regions{ + u32 phys; + u32 buf_size; +} __attribute__((packed)); + +struct asm_stream_cmd_memory_map_regions{ + struct apr_hdr hdr; + u16 mempool_id; + u16 nregions; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS 0x00010C46 +struct asm_memory_unmap_regions{ + u32 phys; +} __attribute__((packed)); + +struct asm_stream_cmd_memory_unmap_regions{ + struct apr_hdr hdr; + u16 nregions; + u16 reserved; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_RUN 0x00010BD2 +struct asm_stream_cmd_run{ + struct apr_hdr hdr; + u32 flags; + u32 msw_ts; + u32 lsw_ts; +} __attribute__((packed)); + +/* Session level events */ +#define ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS 0x00010BD5 +struct asm_stream_cmd_reg_rx_underflow_event{ + struct apr_hdr hdr; + u16 enable; + u16 reserved; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS 0x00010BD6 +struct asm_stream_cmd_reg_tx_overflow_event{ + struct apr_hdr hdr; + u16 enable; + u16 reserved; +} __attribute__((packed)); + +/* Data Path commands */ +#define ASM_DATA_CMD_WRITE 0x00010BD9 +struct asm_stream_cmd_write{ + struct apr_hdr hdr; + u32 buf_add; + u32 avail_bytes; + u32 uid; + u32 msw_ts; + u32 lsw_ts; + u32 uflags; +} __attribute__((packed)); + +#define ASM_DATA_CMD_READ 0x00010BDA +struct asm_stream_cmd_read{ + struct apr_hdr hdr; + u32 buf_add; + u32 buf_size; + u32 uid; +} __attribute__((packed)); + +#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00010BDC +#define ASM_DATA_EVENT_MEDIA_FORMAT_UPDATE 0x00010BDE +struct asm_stream_media_format_update{ + struct apr_hdr hdr; + u32 format; + u32 cfg_size; + union { + struct asm_pcm_cfg pcm_cfg; + struct asm_adpcm_cfg adpcm_cfg; + struct asm_yadpcm_cfg yadpcm_cfg; + struct asm_midi_cfg midi_cfg; + struct asm_wma_cfg wma_cfg; + struct asm_wmapro_cfg wmapro_cfg; + struct asm_aac_cfg aac_cfg; + struct asm_flac_cfg flac_cfg; + struct asm_vorbis_cfg vorbis_cfg; + } __attribute__((packed)) write_cfg; +} __attribute__((packed)); + + +/* Command Responses */ +#define ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM 0x00010C12 +struct asm_stream_cmdrsp_get_readwrite_param{ + struct apr_hdr hdr; + u32 status; + u32 param_id; + u16 param_size; + u16 padding; + union { + struct asm_sbc_bitrate sbc_bitrate; + struct asm_immed_decode aac_dec; + } __attribute__((packed)) read_write_cfg; +} __attribute__((packed)); + + +#define ASM_SESSION_CMDRSP_GET_SESSION_TIME 0x00010BD8 +struct asm_stream_cmdrsp_get_session_time{ + struct apr_hdr hdr; + u32 status; + u32 msw_ts; + u32 lsw_ts; +} __attribute__((packed)); + +#define ASM_DATA_EVENT_WRITE_DONE 0x00010BDF +struct asm_data_event_write_done{ + u32 buf_add; + u32 status; +} __attribute__((packed)); + +#define ASM_DATA_EVENT_READ_DONE 0x00010BE0 +struct asm_data_event_read_done{ + u32 status; + u32 buffer_add; + u32 enc_frame_size; + u32 offset; + u32 msw_ts; + u32 lsw_ts; + u32 flags; + u32 num_frames; + u32 id; +} __attribute__((packed)); + + +/* service level events */ + +#define ASM_SERVICE_CMDRSP_GET_STREAM_HANDLES 0x00010C1B +struct asm_svc_cmdrsp_get_strm_handles{ + struct apr_hdr hdr; + u32 num_handles; + u32 stream_handles; +} __attribute__((packed)); + + +#define ASM_SERVICE_CMDRSP_GET_WALLCLOCK_TIME 0x00010C1A +struct asm_svc_cmdrsp_get_wallclock_time{ + struct apr_hdr hdr; + u32 status; + u32 msw_ts; + u32 lsw_ts; +} __attribute__((packed)); + +/* + * Error code +*/ +#define ADSP_EOK 0x00000000 /* Success / completed / no errors. */ +#define ADSP_EFAILED 0x00000001 /* General failure. */ +#define ADSP_EBADPARAM 0x00000002 /* Bad operation parameter(s). */ +#define ADSP_EUNSUPPORTED 0x00000003 /* Unsupported routine/operation. */ +#define ADSP_EVERSION 0x00000004 /* Unsupported version. */ +#define ADSP_EUNEXPECTED 0x00000005 /* Unexpected problem encountered. */ +#define ADSP_EPANIC 0x00000006 /* Unhandled problem occurred. */ +#define ADSP_ENORESOURCE 0x00000007 /* Unable to allocate resource(s). */ +#define ADSP_EHANDLE 0x00000008 /* Invalid handle. */ +#define ADSP_EALREADY 0x00000009 /* Operation is already processed. */ +#define ADSP_ENOTREADY 0x0000000A /* Operation not ready to be processed*/ +#define ADSP_EPENDING 0x0000000B /* Operation is pending completion*/ +#define ADSP_EBUSY 0x0000000C /* Operation could not be accepted or + processed. */ +#define ADSP_EABORTED 0x0000000D /* Operation aborted due to an error. */ +#define ADSP_EPREEMPTED 0x0000000E /* Operation preempted by higher priority*/ +#define ADSP_ECONTINUE 0x0000000F /* Operation requests intervention + to complete. */ +#define ADSP_EIMMEDIATE 0x00000010 /* Operation requests immediate + intervention to complete. */ +#define ADSP_ENOTIMPL 0x00000011 /* Operation is not implemented. */ +#define ADSP_ENEEDMORE 0x00000012 /* Operation needs more data or resources*/ + +#endif /*_APR_AUDIO_H_*/ diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/audio_dev_ctl.h b/arch/arm/mach-msm/include/mach/qdsp6v3/audio_dev_ctl.h new file mode 100644 index 00000000..d8ea0d45 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/audio_dev_ctl.h @@ -0,0 +1,240 @@ +/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6_V2_SNDDEV_H +#define __MACH_QDSP6_V2_SNDDEV_H +#include + +#define AUDIO_DEV_CTL_MAX_DEV 64 +#define DIR_TX 2 +#define DIR_RX 1 + +#define DEVICE_IGNORE 0xffffffff +#define SESSION_IGNORE 0x0UL + +/* 8 concurrent sessions with Q6 possible, session:0 + reserved in DSP */ +#define MAX_SESSIONS 0x09 + +/* This represents Maximum bit needed for representing sessions + per clients, MAX_BIT_PER_CLIENT >= MAX_SESSIONS */ +#define MAX_BIT_PER_CLIENT 16 + +#define VOICE_STATE_INVALID 0x0 +#define VOICE_STATE_INCALL 0x1 +#define VOICE_STATE_OFFCALL 0x2 +#define ONE_TO_MANY 1 +#define MANY_TO_ONE 2 + +struct msm_snddev_info { + const char *name; + u32 capability; + u32 copp_id; + u32 acdb_id; + u32 dev_volume; + struct msm_snddev_ops { + int (*open)(struct msm_snddev_info *); + int (*close)(struct msm_snddev_info *); + int (*set_freq)(struct msm_snddev_info *, u32); + int (*enable_sidetone)(struct msm_snddev_info *, u32, uint16_t); + int (*set_device_volume)(struct msm_snddev_info *, u32); + int (*enable_anc)(struct msm_snddev_info *, u32); + } dev_ops; + u8 opened; + void *private_data; + bool state; + u32 sample_rate; + u32 channel_mode; + u32 set_sample_rate; + u64 sessions; + int usage_count; + s32 max_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* [0] is for NB,[1] for WB */ + s32 min_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM]; +}; + +struct msm_volume { + int volume; /* Volume parameter, in % Scale */ + int pan; +}; + +extern struct msm_volume msm_vol_ctl; + +void msm_snddev_register(struct msm_snddev_info *); +void msm_snddev_unregister(struct msm_snddev_info *); +int msm_snddev_devcount(void); +int msm_snddev_query(int dev_id); +unsigned short msm_snddev_route_dec(int popp_id); +unsigned short msm_snddev_route_enc(int enc_id); + +int msm_snddev_set_dec(int popp_id, int copp_id, int set, + int rate, int channel_mode); +int msm_snddev_set_enc(int popp_id, int copp_id, int set, + int rate, int channel_mode); + +int msm_snddev_is_set(int popp_id, int copp_id); +int msm_get_voc_route(u32 *rx_id, u32 *tx_id); +int msm_set_voc_route(struct msm_snddev_info *dev_info, int stream_type, + int dev_id); +int msm_snddev_enable_sidetone(u32 dev_id, u32 enable, uint16_t gain); + +int msm_set_copp_id(int session_id, int copp_id); + +int msm_clear_copp_id(int session_id, int copp_id); + +int msm_clear_session_id(int session_id); + +int msm_reset_all_device(void); + +int msm_clear_all_session(void); + +struct msm_snddev_info *audio_dev_ctrl_find_dev(u32 dev_id); + +void msm_release_voc_thread(void); + +int snddev_voice_set_volume(int vol, int path); + +int msm_get_call_state(void); + +struct auddev_evt_voc_devinfo { + u32 dev_type; /* Rx or Tx */ + u32 acdb_dev_id; /* acdb id of device */ + u32 dev_sample; /* Sample rate of device */ + s32 max_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb (milibel), + [0] is for NB, other for WB */ + s32 min_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb */ + u32 dev_id; /* registered device id */ + u32 dev_port_id; +}; + +struct auddev_evt_audcal_info { + u32 dev_id; + u32 acdb_id; + u32 sample_rate; + u32 dev_type; + u32 sessions; +}; + +union msm_vol_mute { + int vol; + bool mute; +}; + +struct auddev_evt_voc_mute_info { + u32 dev_type; + u32 acdb_dev_id; + union msm_vol_mute dev_vm_val; +}; + +struct auddev_evt_freq_info { + u32 dev_type; + u32 acdb_dev_id; + u32 sample_rate; +}; + +union auddev_evt_data { + struct auddev_evt_voc_devinfo voc_devinfo; + struct auddev_evt_voc_mute_info voc_vm_info; + struct auddev_evt_freq_info freq_info; + u32 routing_id; + s32 session_vol; + s32 voice_state; + struct auddev_evt_audcal_info audcal_info; +}; + +struct message_header { + uint32_t id; + uint32_t data_len; +}; + +#define AUDDEV_EVT_DEV_CHG_VOICE 0x01 /* device change event */ +#define AUDDEV_EVT_DEV_RDY 0x02 /* device ready event */ +#define AUDDEV_EVT_DEV_RLS 0x04 /* device released event */ +#define AUDDEV_EVT_REL_PENDING 0x08 /* device release pending */ +#define AUDDEV_EVT_DEVICE_VOL_MUTE_CHG 0x10 /* device volume changed */ +#define AUDDEV_EVT_START_VOICE 0x20 /* voice call start */ +#define AUDDEV_EVT_END_VOICE 0x40 /* voice call end */ +#define AUDDEV_EVT_STREAM_VOL_CHG 0x80 /* device volume changed */ +#define AUDDEV_EVT_FREQ_CHG 0x100 /* Change in freq */ +#define AUDDEV_EVT_VOICE_STATE_CHG 0x200 /* Change in voice state */ + +#define AUDDEV_CLNT_VOC 0x1 /*Vocoder clients*/ +#define AUDDEV_CLNT_DEC 0x2 /*Decoder clients*/ +#define AUDDEV_CLNT_ENC 0x3 /* Encoder clients */ +#define AUDDEV_CLNT_AUDIOCAL 0x4 /* AudioCalibration client */ + +#define AUDIO_DEV_CTL_MAX_LISTNER 20 /* Max Listeners Supported */ + +struct msm_snd_evt_listner { + uint32_t evt_id; + uint32_t clnt_type; + uint32_t clnt_id; + void *private_data; + void (*auddev_evt_listener)(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data); + struct msm_snd_evt_listner *cb_next; + struct msm_snd_evt_listner *cb_prev; +}; + +struct event_listner { + struct msm_snd_evt_listner *cb; + u32 num_listner; + int state; /* Call state */ /* TODO remove this if not req*/ +}; + +extern struct event_listner event; +int auddev_register_evt_listner(u32 evt_id, u32 clnt_type, u32 clnt_id, + void (*listner)(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data), + void *private_data); +int auddev_unregister_evt_listner(u32 clnt_type, u32 clnt_id); +void mixer_post_event(u32 evt_id, u32 dev_id); +void broadcast_event(u32 evt_id, u32 dev_id, u64 session_id); +int auddev_cfg_tx_copp_topology(int session_id, int cfg); +int msm_snddev_request_freq(int *freq, u32 session_id, + u32 capability, u32 clnt_type); +int msm_snddev_withdraw_freq(u32 session_id, + u32 capability, u32 clnt_type); +int msm_device_is_voice(int dev_id); +int msm_get_voc_freq(int *tx_freq, int *rx_freq); +int msm_snddev_get_enc_freq(int session_id); +int msm_set_voice_vol(int dir, s32 volume); +int msm_set_voice_mute(int dir, int mute); +int msm_get_voice_state(void); +int msm_enable_incall_recording(int popp_id, int rec_mode, int rate, + int channel_mode); +int msm_disable_incall_recording(uint32_t popp_id, uint32_t rec_mode); +void msm_set_voc_freq(int tx_freq, int rx_freq); + +struct dev_ctrl_ops { + int (*support_opendsp) (void); +}; + +void htc_8x60_register_dev_ctrl_ops(struct dev_ctrl_ops *ops); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/q6afe.h b/arch/arm/mach-msm/include/mach/qdsp6v3/q6afe.h new file mode 100644 index 00000000..0e977557 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/q6afe.h @@ -0,0 +1,67 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __Q6AFE_H__ +#define __Q6AFE_H__ +#include + +#define MSM_AFE_MONO 0 +#define MSM_AFE_MONO_RIGHT 1 +#define MSM_AFE_MONO_LEFT 2 +#define MSM_AFE_STEREO 3 + +enum { + IDX_PRIMARY_I2S_RX = 0, + IDX_PRIMARY_I2S_TX = 1, + IDX_PCM_RX = 2, + IDX_PCM_TX = 3, + IDX_SECONDARY_I2S_RX = 4, + IDX_SECONDARY_I2S_TX = 5, + IDX_MI2S_RX = 6, + IDX_MI2S_TX = 7, + IDX_HDMI_RX = 8, + IDX_RSVD_2 = 9, + IDX_RSVD_3 = 10, + IDX_DIGI_MIC_TX = 11, + IDX_VOICE_RECORD_RX = 12, + IDX_VOICE_RECORD_TX = 13, + IDX_VOICE_PLAYBACK_TX = 14, + AFE_MAX_PORTS +}; + + +int afe_open(u16 port_id, union afe_port_config *afe_config, int rate); +int afe_close(int port_id); +int afe_loopback(u16 enable, u16 rx_port, u16 tx_port); +int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain); +int afe_loopback_gain(u16 port_id, u16 volume); +int afe_validate_port(u16 port_id); +int afe_get_port_index(u16 port_id); +int afe_start_pseudo_port(u16 port_id); +int afe_stop_pseudo_port(u16 port_id); + +#endif /* __Q6AFE_H__ */ diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/q6asm.h b/arch/arm/mach-msm/include/mach/qdsp6v3/q6asm.h new file mode 100644 index 00000000..afb03772 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/q6asm.h @@ -0,0 +1,264 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __Q6_ASM_H__ +#define __Q6_ASM_H__ + +#include + +#define IN 0x000 +#define OUT 0x001 +#define CH_MODE_MONO 0x001 +#define CH_MODE_STEREO 0x002 + +#define FORMAT_LINEAR_PCM 0x0000 +#define FORMAT_DTMF 0x0001 +#define FORMAT_ADPCM 0x0002 +#define FORMAT_YADPCM 0x0003 +#define FORMAT_MP3 0x0004 +#define FORMAT_MPEG4_AAC 0x0005 +#define FORMAT_AMRNB 0x0006 +#define FORMAT_AMRWB 0x0007 +#define FORMAT_V13K 0x0008 +#define FORMAT_EVRC 0x0009 +#define FORMAT_EVRCB 0x000a +#define FORMAT_EVRCWB 0x000b +#define FORMAT_MIDI 0x000c +#define FORMAT_SBC 0x000d +#define FORMAT_WMA_V10PRO 0x000e +#define FORMAT_WMA_V9 0x000f +#define FORMAT_AMR_WB_PLUS 0x0010 + +#define ENCDEC_SBCBITRATE 0x0001 +#define ENCDEC_IMMEDIATE_DECODE 0x0002 +#define ENCDEC_CFG_BLK 0x0003 + +#define CMD_PAUSE 0x0001 +#define CMD_FLUSH 0x0002 +#define CMD_EOS 0x0003 +#define CMD_CLOSE 0x0004 + +/* bit 0:1 represents priority of stream */ +#define STREAM_PRIORITY_NORMAL 0x0000 +#define STREAM_PRIORITY_LOW 0x0001 +#define STREAM_PRIORITY_HIGH 0x0002 + +/* bit 4 represents META enable of encoded data buffer */ +#define BUFFER_META_ENABLE 0x0010 + +#define ASYNC_IO_MODE 0x0002 +#define SYNC_IO_MODE 0x0001 +#define NO_TIMESTAMP 0xFF00 +#define SET_TIMESTAMP 0x0000 + +#define SOFT_PAUSE_ENABLE 1 +#define SOFT_PAUSE_DISABLE 0 + +#define SESSION_MAX 0x08 + +typedef void (*app_cb)(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv); + +struct audio_buffer { + dma_addr_t phys; + void *data; + uint32_t used; + uint32_t size;/* size of buffer */ + uint32_t actual_size; /* actual number of bytes read by DSP */ +}; + +struct audio_aio_write_param { + unsigned long paddr; + uint32_t uid; + uint32_t len; + uint32_t msw_ts; + uint32_t lsw_ts; + uint32_t flags; +}; + +struct audio_aio_read_param { + unsigned long paddr; + uint32_t len; + uint32_t uid; +}; + +struct audio_port_data { + struct audio_buffer *buf; + uint32_t max_buf_cnt; + uint32_t dsp_buf; + uint32_t cpu_buf; + /* read or write locks */ + struct mutex lock; + spinlock_t dsp_lock; +}; + +struct audio_client { + int session; + /* idx:1 out port, 0: in port*/ + struct audio_port_data port[2]; + + struct apr_svc *apr; + struct mutex cmd_lock; + + atomic_t cmd_state; + atomic_t time_flag; + wait_queue_head_t cmd_wait; + wait_queue_head_t time_wait; + + app_cb cb; + void *priv; + uint32_t io_mode; + uint64_t time_stamp; +}; + +void q6asm_audio_client_free(struct audio_client *ac); + +struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv); + +int q6asm_audio_client_buf_alloc(unsigned int dir/* 1:Out,0:In */, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt); +int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir + /* 1:Out,0:In */, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt); + +int q6asm_audio_client_buf_free_contiguous(unsigned int dir, + struct audio_client *ac); + +int q6asm_open_read(struct audio_client *ac, uint32_t format); + +int q6asm_open_write(struct audio_client *ac, uint32_t format); + +int q6asm_open_read_write(struct audio_client *ac, + uint32_t rd_format, + uint32_t wr_format); + +int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags); + +int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags); + +int q6asm_async_write(struct audio_client *ac, + struct audio_aio_write_param *param); + +int q6asm_async_read(struct audio_client *ac, + struct audio_aio_read_param *param); + +int q6asm_read(struct audio_client *ac); +int q6asm_read_nolock(struct audio_client *ac); + +int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, + int dir, uint32_t bufsz, uint32_t bufcnt); + +int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, + int dir); + +int q6asm_run(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts); + +int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts); + +int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable); + +int q6asm_cmd(struct audio_client *ac, int cmd); + +int q6asm_cmd_nowait(struct audio_client *ac, int cmd); + +void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, + uint32_t *size, uint32_t *idx); + +int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac); + +/* File format specific configurations to be added below */ + +int q6asm_enc_cfg_blk_aac(struct audio_client *ac, + uint32_t frames_per_buf, + uint32_t sample_rate, uint32_t channels, + uint32_t bit_rate, + uint32_t mode, uint32_t format); + +int q6asm_enc_cfg_blk_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels); + +int q6asm_enable_sbrps(struct audio_client *ac, + uint32_t sbr_ps); + +int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t reduced_rate_level, uint16_t rate_modulation_cmd); + +int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t rate_modulation_cmd); + +int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t band_mode, uint16_t dtx_enable); + +int q6asm_media_format_block_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels); + +int q6asm_media_format_block_aac(struct audio_client *ac, + struct asm_aac_cfg *cfg); + +int q6asm_media_format_block_wma(struct audio_client *ac, + void *cfg); + +int q6asm_media_format_block_wmapro(struct audio_client *ac, + void *cfg); + +/* PP specific */ +int q6asm_equalizer(struct audio_client *ac, void *eq); + +/* Send Volume Command */ +int q6asm_set_volume(struct audio_client *ac, int volume); + +/* Set SoftPause Params */ +int q6asm_set_softpause(struct audio_client *ac, + struct asm_softpause_params *param); + +/* Send left-right channel gain */ +int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain); + +/* Enable Mute/unmute flag */ +int q6asm_set_mute(struct audio_client *ac, int muteflag); + +uint64_t q6asm_get_session_time(struct audio_client *ac); + +/* Client can set the IO mode to either AIO/SIO mode */ +int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode); + +#ifdef CONFIG_MSM8X60_RTAC +/* Get Service ID for APR communication */ +int q6asm_get_apr_service_id(int session_id); +#endif + +#endif /* __Q6_ASM_H__ */ diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/q6voice.h b/arch/arm/mach-msm/include/mach/qdsp6v3/q6voice.h new file mode 100644 index 00000000..25d4ca3a --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/q6voice.h @@ -0,0 +1,757 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __QDSP6VOICE_H__ +#define __QDSP6VOICE_H__ + +#include + +/* Device Event */ +#define DEV_CHANGE_READY 0x1 + +#define VOICE_CALL_START 0x1 +#define VOICE_CALL_END 0 + +#define VOICE_DEV_ENABLED 0x1 +#define VOICE_DEV_DISABLED 0 + +#define MAX_VOC_PKT_SIZE 322 + +#define SESSION_NAME_LEN 20 + +struct voice_header { + uint32_t id; + uint32_t data_len; +}; + +struct voice_init { + struct voice_header hdr; + void *cb_handle; +}; + + +/* Device information payload structure */ + +struct device_data { + uint32_t dev_acdb_id; + uint32_t volume; /* in percentage */ + uint32_t mute; + uint32_t sample; + uint32_t enabled; + uint32_t dev_id; + uint32_t dev_port_id; +}; + +enum { + VOC_INIT = 0, + VOC_RUN, + VOC_CHANGE, + VOC_RELEASE, +}; + +/* TO MVM commands */ +#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x000110FF +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110FE +/* Create a new full control MVM session. */ + +#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IMVM_CMD_ATTACH_STREAM 0x0001123C +/* Attach a stream to the MVM. */ + +#define VSS_IMVM_CMD_DETACH_STREAM 0x0001123D +/* Detach a stream from the MVM. */ + +#define VSS_IMVM_CMD_START_VOICE 0x00011190 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IMVM_CMD_STOP_VOICE 0x00011192 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ISTREAM_CMD_ATTACH_VOCPROC 0x000110F8 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ISTREAM_CMD_DETACH_VOCPROC 0x000110F9 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + + +#define VSS_ISTREAM_CMD_SET_TTY_MODE 0x00011196 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ICOMMON_CMD_SET_NETWORK 0x0001119C +/* Set the network type. */ + +#define VSS_ICOMMON_CMD_SET_VOICE_TIMING 0x000111E0 +/* Set the voice timing parameters. */ + +struct vss_imvm_cmd_create_control_session_t { + char name[SESSION_NAME_LEN]; /* + * A variable-sized stream name. + * + * The stream name size is the payload size minus the size of the other + * fields. + */ +} __packed; + +struct vss_istream_cmd_set_tty_mode_t { + uint32_t mode; + /**< + * TTY mode. + * + * 0 : TTY disabled + * 1 : HCO + * 2 : VCO + * 3 : FULL + */ +} __attribute__((packed)); + +struct vss_istream_cmd_attach_vocproc_t { + uint16_t handle; + /**< Handle of vocproc being attached. */ +} __attribute__((packed)); + +struct vss_istream_cmd_detach_vocproc_t { + uint16_t handle; + /**< Handle of vocproc being detached. */ +} __attribute__((packed)); + +struct vss_imvm_cmd_attach_stream_t { + uint16_t handle; + /* The stream handle to attach. */ +} __attribute__((packed)); + +struct vss_imvm_cmd_detach_stream_t { + uint16_t handle; + /* The stream handle to detach. */ +} __attribute__((packed)); + +struct vss_icommon_cmd_set_network_t { + uint32_t network_id; + /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */ +} __attribute__((packed)); + +struct vss_icommon_cmd_set_voice_timing_t { + uint16_t mode; + /* + * The vocoder frame synchronization mode. + * + * 0 : No frame sync. + * 1 : Hard VFR (20ms Vocoder Frame Reference interrupt). + */ + uint16_t enc_offset; + /* + * The offset in microseconds from the VFR to deliver a Tx vocoder + * packet. The offset should be less than 20000us. + */ + uint16_t dec_req_offset; + /* + * The offset in microseconds from the VFR to request for an Rx vocoder + * packet. The offset should be less than 20000us. + */ + uint16_t dec_offset; + /* + * The offset in microseconds from the VFR to indicate the deadline to + * receive an Rx vocoder packet. The offset should be less than 20000us. + * Rx vocoder packets received after this deadline are not guaranteed to + * be processed. + */ +} __attribute__((packed)); + +struct mvm_attach_vocproc_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_attach_vocproc_t mvm_attach_cvp_handle; +} __attribute__((packed)); + +struct mvm_detach_vocproc_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_detach_vocproc_t mvm_detach_cvp_handle; +} __attribute__((packed)); + +struct mvm_create_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_imvm_cmd_create_control_session_t mvm_session; +} __packed; + +struct mvm_set_tty_mode_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_tty_mode_t tty_mode; +} __attribute__((packed)); + +struct mvm_attach_stream_cmd { + struct apr_hdr hdr; + struct vss_imvm_cmd_attach_stream_t attach_stream; +} __attribute__((packed)); + +struct mvm_detach_stream_cmd { + struct apr_hdr hdr; + struct vss_imvm_cmd_detach_stream_t detach_stream; +} __attribute__((packed)); + +struct mvm_set_network_cmd { + struct apr_hdr hdr; + struct vss_icommon_cmd_set_network_t network; +} __attribute__((packed)); + +struct mvm_set_voice_timing_cmd { + struct apr_hdr hdr; + struct vss_icommon_cmd_set_voice_timing_t timing; +} __attribute__((packed)); + +/* TO CVS commands */ +#define VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x00011140 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110F7 +/* Create a new full control stream session. */ + +#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C + +#define VSS_ISTREAM_CMD_CACHE_CALIBRATION_DATA 0x000110FB + +#define VSS_ISTREAM_CMD_SET_MUTE 0x00011022 + +#define VSS_ISTREAM_CMD_SET_MEDIA_TYPE 0x00011186 +/* Set media type on the stream. */ + +#define VSS_ISTREAM_EVT_SEND_ENC_BUFFER 0x00011015 +/* Event sent by the stream to its client to provide an encoded packet. */ + +#define VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER 0x00011017 +/* Event sent by the stream to its client requesting for a decoder packet. + * The client should respond with a VSS_ISTREAM_EVT_SEND_DEC_BUFFER event. + */ + +#define VSS_ISTREAM_EVT_SEND_DEC_BUFFER 0x00011016 +/* Event sent by the client to the stream in response to a + * VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER event, providing a decoder packet. + */ + +#define VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE 0x0001113E +/* Set AMR encoder rate. */ + +#define VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE 0x0001113F +/* Set AMR-WB encoder rate. */ + +#define VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE 0x00011019 +/* Set encoder minimum and maximum rate. */ + +#define VSS_ISTREAM_CMD_SET_ENC_DTX_MODE 0x0001101D +/* Set encoder DTX mode. */ + +#define VSS_ISTREAM_CMD_START_RECORD 0x00011236 +/* Start in-call conversation recording. */ + +#define VSS_ISTREAM_CMD_STOP_RECORD 0x00011237 +/* Stop in-call conversation recording. */ + +#define VSS_ISTREAM_CMD_START_PLAYBACK 0x00011238 +/* Start in-call music delivery on the Tx voice path. */ + +#define VSS_ISTREAM_CMD_STOP_PLAYBACK 0x00011239 +/* Stop the in-call music delivery on the Tx voice path. */ + +struct vss_istream_cmd_create_passive_control_session_t { + char name[SESSION_NAME_LEN]; + /**< + * A variable-sized stream name. + * + * The stream name size is the payload size minus the size of the other + * fields. + */ +} __attribute__((packed)); + +struct vss_istream_cmd_set_mute_t { + uint16_t direction; + /**< + * 0 : TX only + * 1 : RX only + * 2 : TX and Rx + */ + uint16_t mute_flag; + /**< + * Mute, un-mute. + * + * 0 : Silence disable + * 1 : Silence enable + * 2 : CNG enable. Applicable to TX only. If set on RX behavior + * will be the same as 1 + */ +} __attribute__((packed)); + +struct vss_istream_cmd_create_full_control_session_t { + uint16_t direction; + /* + * Stream direction. + * + * 0 : TX only + * 1 : RX only + * 2 : TX and RX + * 3 : TX and RX loopback + */ + uint32_t enc_media_type; + /* Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ + uint32_t dec_media_type; + /* Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ + uint32_t network_id; + /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */ + char name[SESSION_NAME_LEN]; + /* + * A variable-sized stream name. + * + * The stream name size is the payload size minus the size of the other + * fields. + */ +} __attribute__((packed)); + +struct vss_istream_cmd_set_media_type_t { + uint32_t rx_media_id; + /* Set the Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ + uint32_t tx_media_id; + /* Set the Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ +} __attribute__((packed)); + +struct vss_istream_evt_send_enc_buffer_t { + uint32_t media_id; + /* Media ID of the packet. */ + uint8_t packet_data[MAX_VOC_PKT_SIZE]; + /* Packet data buffer. */ +} __attribute__((packed)); + +struct vss_istream_evt_send_dec_buffer_t { + uint32_t media_id; + /* Media ID of the packet. */ + uint8_t packet_data[MAX_VOC_PKT_SIZE]; + /* Packet data. */ +} __attribute__((packed)); + +struct vss_istream_cmd_voc_amr_set_enc_rate_t { + uint32_t mode; + /* Set the AMR encoder rate. + * + * 0x00000000 : 4.75 kbps + * 0x00000001 : 5.15 kbps + * 0x00000002 : 5.90 kbps + * 0x00000003 : 6.70 kbps + * 0x00000004 : 7.40 kbps + * 0x00000005 : 7.95 kbps + * 0x00000006 : 10.2 kbps + * 0x00000007 : 12.2 kbps + */ +} __attribute__((packed)); + +struct vss_istream_cmd_voc_amrwb_set_enc_rate_t { + uint32_t mode; + /* Set the AMR-WB encoder rate. + * + * 0x00000000 : 6.60 kbps + * 0x00000001 : 8.85 kbps + * 0x00000002 : 12.65 kbps + * 0x00000003 : 14.25 kbps + * 0x00000004 : 15.85 kbps + * 0x00000005 : 18.25 kbps + * 0x00000006 : 19.85 kbps + * 0x00000007 : 23.05 kbps + * 0x00000008 : 23.85 kbps + */ +} __attribute__((packed)); + +struct vss_istream_cmd_cdma_set_enc_minmax_rate_t { + uint16_t min_rate; + /* Set the lower bound encoder rate. + * + * 0x0000 : Blank frame + * 0x0001 : Eighth rate + * 0x0002 : Quarter rate + * 0x0003 : Half rate + * 0x0004 : Full rate + */ + uint16_t max_rate; + /* Set the upper bound encoder rate. + * + * 0x0000 : Blank frame + * 0x0001 : Eighth rate + * 0x0002 : Quarter rate + * 0x0003 : Half rate + * 0x0004 : Full rate + */ +} __attribute__((packed)); + +struct vss_istream_cmd_set_enc_dtx_mode_t { + uint32_t enable; + /* Toggle DTX on or off. + * + * 0 : Disables DTX + * 1 : Enables DTX + */ +} __attribute__((packed)); + +#define VSS_TAP_POINT_NONE 0x00010F78 +/* Indicates no tapping for specified path. */ + +#define VSS_TAP_POINT_STREAM_END 0x00010F79 +/* Indicates that specified path should be tapped at the end of the stream. */ + +struct vss_istream_cmd_start_record_t { + uint32_t rx_tap_point; + /* Tap point to use on the Rx path. Supported values are: + * VSS_TAP_POINT_NONE : Do not record Rx path. + * VSS_TAP_POINT_STREAM_END : Rx tap point is at the end of the stream. + */ + uint32_t tx_tap_point; + /* Tap point to use on the Tx path. Supported values are: + * VSS_TAP_POINT_NONE : Do not record tx path. + * VSS_TAP_POINT_STREAM_END : Tx tap point is at the end of the stream. + */ +} __attribute__((packed)); + +struct cvs_create_passive_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_create_passive_control_session_t cvs_session; +} __attribute__((packed)); + +struct cvs_create_full_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_create_full_control_session_t cvs_session; +} __attribute__((packed)); + +struct cvs_destroy_session_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvs_cache_calibration_data_cmd { + struct apr_hdr hdr; +} __attribute__ ((packed)); + +struct cvs_set_mute_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_mute_t cvs_set_mute; +} __attribute__((packed)); + +struct cvs_set_media_type_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_media_type_t media_type; +} __attribute__((packed)); + +struct cvs_send_dec_buf_cmd { + struct apr_hdr hdr; + struct vss_istream_evt_send_dec_buffer_t dec_buf; +} __attribute__((packed)); + +struct cvs_set_amr_enc_rate_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_voc_amr_set_enc_rate_t amr_rate; +} __attribute__((packed)); + +struct cvs_set_amrwb_enc_rate_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_voc_amrwb_set_enc_rate_t amrwb_rate; +} __attribute__((packed)); + +struct cvs_set_cdma_enc_minmax_rate_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_cdma_set_enc_minmax_rate_t cdma_rate; +} __attribute__((packed)); + +struct cvs_set_enc_dtx_mode_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_enc_dtx_mode_t dtx_mode; +} __attribute__((packed)); + +struct cvs_start_record_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_start_record_t rec_mode; +} __attribute__((packed)); + +/* TO CVP commands */ + +#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION 0x000100C3 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C + +#define VSS_IVOCPROC_CMD_SET_DEVICE 0x000100C4 + +#define VSS_IVOCPROC_CMD_CACHE_CALIBRATION_DATA 0x000110E3 + +#define VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE 0x000110E4 + +#define VSS_IVOCPROC_CMD_SET_VP3_DATA 0x000110EB + +#define VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX 0x000110EE + +#define VSS_IVOCPROC_CMD_ENABLE 0x000100C6 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IVOCPROC_CMD_DISABLE 0x000110E1 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IVOCPROC_TOPOLOGY_ID_NONE 0x00010F70 +#define VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS 0x00010F71 +#define VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE 0x00010F72 + +#define VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT 0x00010F77 + +/* Newtwork IDs */ +#define VSS_NETWORK_ID_DEFAULT 0x00010037 +#define VSS_NETWORK_ID_VOIP_NB 0x00011240 +#define VSS_NETWORK_ID_VOIP_WB 0x00011241 +#define VSS_NETWORK_ID_VOIP_WV 0x00011242 + +/* Media types */ +#define VSS_MEDIA_ID_EVRC_MODEM 0x00010FC2 +/* 80-VF690-47 CDMA enhanced variable rate vocoder modem format. */ +#define VSS_MEDIA_ID_AMR_NB_MODEM 0x00010FC6 +/* 80-VF690-47 UMTS AMR-NB vocoder modem format. */ +#define VSS_MEDIA_ID_AMR_WB_MODEM 0x00010FC7 +/* 80-VF690-47 UMTS AMR-WB vocoder modem format. */ +#define VSS_MEDIA_ID_PCM_NB 0x00010FCB +/* Linear PCM (16-bit, little-endian). */ +#define VSS_MEDIA_ID_G711_ALAW 0x00010FCD +/* G.711 a-law (contains two 10ms vocoder frames). */ +#define VSS_MEDIA_ID_G711_MULAW 0x00010FCE +/* G.711 mu-law (contains two 10ms vocoder frames). */ +#define VSS_MEDIA_ID_G729 0x00010FD0 +/* G.729AB (contains two 10ms vocoder frames. */ + +#define VOICE_CMD_SET_PARAM 0x00011006 +#define VOICE_CMD_GET_PARAM 0x00011007 +#define VOICE_EVT_GET_PARAM_ACK 0x00011008 + +struct vss_ivocproc_cmd_create_full_control_session_t { + uint16_t direction; + /* + * stream direction. + * 0 : TX only + * 1 : RX only + * 2 : TX and RX + */ + uint32_t tx_port_id; + /* + * TX device port ID which vocproc will connect to. If not supplying a + * port ID set to VSS_IVOCPROC_PORT_ID_NONE. + */ + uint32_t tx_topology_id; + /* + * Tx leg topology ID. If not supplying a topology ID set to + * VSS_IVOCPROC_TOPOLOGY_ID_NONE. + */ + uint32_t rx_port_id; + /* + * RX device port ID which vocproc will connect to. If not supplying a + * port ID set to VSS_IVOCPROC_PORT_ID_NONE. + */ + uint32_t rx_topology_id; + /* + * Rx leg topology ID. If not supplying a topology ID set to + * VSS_IVOCPROC_TOPOLOGY_ID_NONE. + */ + int32_t network_id; + /* + * Network ID. (Refer to VSS_NETWORK_ID_XXX). If not supplying a network + * ID set to VSS_NETWORK_ID_DEFAULT. + */ +} __attribute__((packed)); + +struct vss_ivocproc_cmd_set_device_t { + uint32_t tx_port_id; + /**< + * TX device port ID which vocproc will connect to. + * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port. + */ + uint32_t tx_topology_id; + /**< + * TX leg topology ID. + * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any + * pre/post-processing blocks and is pass-through. + */ + int32_t rx_port_id; + /**< + * RX device port ID which vocproc will connect to. + * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port. + */ + uint32_t rx_topology_id; + /**< + * RX leg topology ID. + * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any + * pre/post-processing blocks and is pass-through. + */ +} __attribute__((packed)); + +struct vss_ivocproc_cmd_set_volume_index_t { + uint16_t vol_index; + /**< + * Volume index utilized by the vocproc to index into the volume table + * provided in VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE and set + * volume on the VDSP. + */ +} __attribute__((packed)); + +struct cvp_create_full_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_ivocproc_cmd_create_full_control_session_t cvp_session; +} __attribute__ ((packed)); + +struct cvp_command { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_set_device_cmd { + struct apr_hdr hdr; + struct vss_ivocproc_cmd_set_device_t cvp_set_device; +} __attribute__ ((packed)); + +struct cvp_cache_calibration_data_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_cache_volume_calibration_table_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_set_vp3_data_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_set_rx_volume_index_cmd { + struct apr_hdr hdr; + struct vss_ivocproc_cmd_set_volume_index_t cvp_set_vol_idx; +} __attribute__((packed)); + +/* CB for up-link packets. */ +typedef void (*ul_cb_fn)(uint8_t *voc_pkt, + uint32_t pkt_len, + void *private_data); + +/* CB for down-link packets. */ +typedef void (*dl_cb_fn)(uint8_t *voc_pkt, + uint32_t *pkt_len, + void *private_data); + + +struct mvs_driver_info { + uint32_t media_type; + uint32_t rate; + uint32_t network_type; + uint32_t dtx_mode; + ul_cb_fn ul_cb; + dl_cb_fn dl_cb; + void *private_data; +}; + +struct incall_rec_info { + uint32_t pending; + uint32_t rec_mode; +}; + +struct incall_music_info { + uint32_t pending; + uint32_t playing; +}; + +struct voice_data { + int voc_state;/*INIT, CHANGE, RELEASE, RUN */ + uint32_t voc_path; + uint32_t adsp_version; + + wait_queue_head_t mvm_wait; + wait_queue_head_t cvs_wait; + wait_queue_head_t cvp_wait; + + uint32_t device_events; + + /* cache the values related to Rx and Tx */ + struct device_data dev_rx; + struct device_data dev_tx; + + /* these default values are for all devices */ + uint32_t default_mute_val; + uint32_t default_vol_val; + uint32_t default_sample_val; + + /* call status */ + int v_call_status; /* Start or End */ + + /* APR to MVM in the modem */ + void *apr_mvm; + /* APR to CVS in the modem */ + void *apr_cvs; + /* APR to CVP in the modem */ + void *apr_cvp; + + /* APR to MVM in the Q6 */ + void *apr_q6_mvm; + /* APR to CVS in the Q6 */ + void *apr_q6_cvs; + /* APR to CVP in the Q6 */ + void *apr_q6_cvp; + + u32 mvm_state; + u32 cvs_state; + u32 cvp_state; + + /* Handle to MVM in the modem */ + u16 mvm_handle; + /* Handle to CVS in the modem */ + u16 cvs_handle; + /* Handle to CVP in the modem */ + u16 cvp_handle; + + /* Handle to MVM in the Q6 */ + u16 mvm_q6_handle; + /* Handle to CVS in the Q6 */ + u16 cvs_q6_handle; + /* Handle to CVP in the Q6 */ + u16 cvp_q6_handle; + + struct mutex lock; + + struct mvs_driver_info mvs_info; + + struct incall_rec_info rec_info; + + struct incall_music_info music_info; +}; + +int voice_set_voc_path_full(uint32_t set); + +void voice_register_mvs_cb(ul_cb_fn ul_cb, + dl_cb_fn dl_cb, + void *private_data); + +void voice_config_vocoder(uint32_t media_type, + uint32_t rate, + uint32_t network_type, + uint32_t dtx_mode); + +int voice_start_record(uint32_t rec_mode, uint32_t set); + +int voice_start_playback(uint32_t set); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_ecodec.h b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_ecodec.h new file mode 100644 index 00000000..e07ad025 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_ecodec.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6V2_SNDDEV_ECODEC_H +#define __MACH_QDSP6V2_SNDDEV_ECODEC_H +#include + +struct snddev_ecodec_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + u8 channel_mode; + u32 conf_pcm_ctl_val; + u32 conf_aux_codec_intf; + u32 conf_data_format_padding_val; +}; + +struct q6v2audio_ecodec_ops { + void (*bt_sco_enable)(int en); +}; + +void htc_8x60_register_ecodec_ops(struct q6v2audio_ecodec_ops *ops); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_hdmi.h b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_hdmi.h new file mode 100644 index 00000000..cdc81a1f --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_hdmi.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6_V2_SNDDEV_HDMI_H +#define __MACH_QDSP6_V2_SNDDEV_HDMI_H + +struct snddev_hdmi_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + u32 acdb_id; /* Audio Cal purpose */ + u8 channel_mode; + u32 default_sample_rate; +}; +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_icodec.h b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_icodec.h new file mode 100644 index 00000000..06f31f56 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_icodec.h @@ -0,0 +1,98 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6V2_SNDDEV_ICODEC_H +#define __MACH_QDSP6V2_SNDDEV_ICODEC_H +#include +#include +#include +#include + +struct snddev_icodec_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + /* Adie profile */ + struct adie_codec_dev_profile *profile; + /* Afe setting */ + u8 channel_mode; + u32 default_sample_rate; + void (*pamp_on) (int on); + void (*voltage_on) (int on); + u32 dev_vol_type; + u32 aic3254_id; + u32 aic3254_voc_id; + u32 default_aic3254_id; +}; + +/* Context for each internal codec sound device */ +struct snddev_icodec_state { + struct snddev_icodec_data *data; + struct adie_codec_path *adie_path; + u32 sample_rate; + u32 enabled; +}; + +struct q6v2audio_analog_ops { + void (*speaker_enable)(int en); + void (*headset_enable)(int en); + void (*handset_enable)(int en); + void (*bt_sco_enable)(int en); + void (*headset_speaker_enable)(int en); + void (*int_mic_enable)(int en); + void (*back_mic_enable)(int en); + void (*ext_mic_enable)(int en); + void (*stereo_mic_enable)(int en); + void (*usb_headset_enable)(int en); + void (*fm_headset_enable)(int en); + void (*fm_speaker_enable)(int en); + void (*voltage_on) (int on); +}; + +struct q6v2audio_icodec_ops { + int (*support_aic3254) (void); + int (*support_adie) (void); + int (*is_msm_i2s_slave) (void); + int (*support_aic3254_use_mclk) (void); +}; + +struct q6v2audio_aic3254_ops { + void (*aic3254_set_mode)(int config, int mode); +}; + +struct aic3254_info { + u32 dev_id; + u32 path_id; +}; + + +void htc_8x60_register_analog_ops(struct q6v2audio_analog_ops *ops); +void htc_8x60_register_aic3254_ops(struct q6v2audio_aic3254_ops *ops); +int update_aic3254_info(struct aic3254_info *info); +void htc_8x60_register_icodec_ops(struct q6v2audio_icodec_ops *ops); +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/Makefile b/arch/arm/mach-msm/qdsp6v3/Makefile new file mode 100644 index 00000000..a0aca30c --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/Makefile @@ -0,0 +1,15 @@ +obj-$(CONFIG_MSM8X60_RTAC) += rtac.o +obj-y += audio_dev_ctl.o +obj-y += board-msm8x60-audio.o +obj-$(CONFIG_TIMPANI_CODEC) += snddev_icodec.o +obj-y += snddev_ecodec.o snddev_mi2s.o snddev_virtual.o +obj-y += apr.o apr_tal.o q6core.o dsp_debug.o +obj-y += audio_acdb.o +obj-y += q6asm.o q6adm.o q6afe.o +obj-y += pcm_out.o pcm_in.o fm.o +obj-y += audio_lpa.o +obj-y += q6voice.o +obj-y += snddev_hdmi.o +obj-y += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o audio_utils.o +obj-y += audio_mvs.o +obj-y += audio_wma.o audio_wmapro.o audio_aac.o diff --git a/arch/arm/mach-msm/qdsp6v3/aac_in.c b/arch/arm/mach-msm/qdsp6v3/aac_in.c new file mode 100644 index 00000000..f01a1414 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/aac_in.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 5 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((1536+sizeof(struct meta_out_dsp)) * 5)) + +#define AAC_FORMAT_ADTS 65535 + +void q6asm_aac_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} +/* ------------------- device --------------------- */ +static long aac_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_aac_enc_config *enc_cfg; + struct msm_audio_aac_config *aac_config; + uint32_t aac_mode = AAC_ENC_MODE_AAC_LC; + + enc_cfg = audio->enc_cfg; + aac_config = audio->codec_cfg; + /* ENCODE CFG (after new set of API's are published )bharath*/ + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + pr_debug("%s:sbr_ps_flag = %d, sbr_flag = %d\n", __func__, + aac_config->sbr_ps_on_flag, aac_config->sbr_on_flag); + if (aac_config->sbr_ps_on_flag) + aac_mode = AAC_ENC_MODE_EAAC_P; + else if (aac_config->sbr_on_flag) + aac_mode = AAC_ENC_MODE_AAC_P; + else + aac_mode = AAC_ENC_MODE_AAC_LC; + + rc = q6asm_enc_cfg_blk_aac(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->sample_rate, + enc_cfg->channels, + enc_cfg->bit_rate, + aac_mode, + enc_cfg->stream_format); + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure\ + failed rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: Rxed AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + break; + } + case AUDIO_GET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config cfg; + struct msm_audio_aac_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + if (enc_cfg->channels == CH_MODE_MONO) + cfg.channels = 1; + else + cfg.channels = 2; + cfg.sample_rate = enc_cfg->sample_rate; + cfg.bit_rate = enc_cfg->bit_rate; + /* ADTS(-1) to ADTS(0x00), RAW(0x00) to RAW(0x03) */ + cfg.stream_format = ((enc_cfg->stream_format == \ + 0x00) ? AUDIO_AAC_FORMAT_ADTS : AUDIO_AAC_FORMAT_RAW); + pr_debug("%s:session id %d: Get-aac-cfg: format=%d sr=%d\ + bitrate=%d\n", __func__, audio->ac->session, + cfg.stream_format, cfg.sample_rate, cfg.bit_rate); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config cfg; + struct msm_audio_aac_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + pr_debug("%s:session id %d: Set-aac-cfg: stream=%d\n", __func__, + audio->ac->session, cfg.stream_format); + + if ((cfg.stream_format != AUDIO_AAC_FORMAT_RAW) && + (cfg.stream_format != AAC_FORMAT_ADTS)) { + pr_aud_err("%s:session id %d: unsupported AAC format\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + + if (cfg.channels == 1) { + cfg.channels = CH_MODE_MONO; + } else if (cfg.channels == 2) { + cfg.channels = CH_MODE_STEREO; + } else { + rc = -EINVAL; + break; + } + if ((cfg.sample_rate < 8000) && (cfg.sample_rate > 48000)) { + pr_aud_err("%s: ERROR in setting samplerate = %d\n", + __func__, cfg.sample_rate); + rc = -EINVAL; + break; + } + enc_cfg->sample_rate = cfg.sample_rate; + enc_cfg->channels = cfg.channels; + enc_cfg->bit_rate = cfg.bit_rate; + enc_cfg->stream_format = + ((cfg.stream_format == AUDIO_AAC_FORMAT_RAW) ? \ + 0x03 : 0x00); + pr_debug("%s:session id %d: Set-aac-cfg:SR= 0x%x ch=0x%x\ + bitrate=0x%x, format(adts/raw) = %d\n", + __func__, audio->ac->session, enc_cfg->sample_rate, + enc_cfg->channels, enc_cfg->bit_rate, + enc_cfg->stream_format); + break; + } + case AUDIO_GET_AAC_CONFIG: { + if (copy_to_user((void *)arg, &audio->codec_cfg, + sizeof(struct msm_audio_aac_config))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config aac_cfg; + struct msm_audio_aac_config *audio_aac_cfg; + struct msm_audio_aac_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + audio_aac_cfg = audio->codec_cfg; + + if (copy_from_user(&aac_cfg, (void *)arg, + sizeof(struct msm_audio_aac_config))) { + rc = -EFAULT; + break; + } + pr_debug("%s:session id %d: AUDIO_SET_AAC_CONFIG: sbr_flag = %d" + " sbr_ps_flag = %d\n", __func__, + audio->ac->session, aac_cfg.sbr_on_flag, + aac_cfg.sbr_ps_on_flag); + audio_aac_cfg->sbr_on_flag = aac_cfg.sbr_on_flag; + audio_aac_cfg->sbr_ps_on_flag = aac_cfg.sbr_ps_on_flag; + if ((audio_aac_cfg->sbr_on_flag == 1) || + (audio_aac_cfg->sbr_ps_on_flag == 1)) { + if (enc_cfg->sample_rate < 24000) { + pr_aud_err("%s: ERROR in setting samplerate = %d" + "\n", __func__, enc_cfg->sample_rate); + rc = -EINVAL; + break; + } + } + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int aac_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_aac_enc_config *enc_cfg; + struct msm_audio_aac_config *aac_config; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:Could not allocate memory for aac\ + driver\n", __func__); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_aac_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config), + GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + aac_config = audio->codec_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 1536; + audio->max_frames_per_buf = 5; + enc_cfg->sample_rate = 8000; + enc_cfg->channels = 1; + enc_cfg->bit_rate = 16000; + enc_cfg->stream_format = 0x00;/* 0:ADTS, 3:RAW */ + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + aac_config->format = AUDIO_AAC_FORMAT_ADTS; + aac_config->audio_object = AUDIO_AAC_OBJECT_LC; + aac_config->sbr_on_flag = 0; + aac_config->sbr_ps_on_flag = 0; + aac_config->channel_configuration = 1; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_aac_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s: Could not allocate memory for\ + audio client\n", __func__); + kfree(audio->enc_cfg); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + /* open aac encoder in tunnel mode */ + audio->buf_cfg.frames_per_buf = 0x01; + + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_MPEG4_AAC, + FORMAT_LINEAR_PCM); + + if (rc < 0) { + pr_aud_err("%s:session id %d: NT Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + audio->buf_cfg.meta_info_enable = 0x01; + pr_aud_info("%s:session id %d: NT mode encoder success\n", __func__, + audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_MPEG4_AAC); + + if (rc < 0) { + pr_aud_err("%s:session id %d: Tunnel Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, + audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + audio->buf_cfg.meta_info_enable = 0x00; + pr_aud_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = aac_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = aac_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_aac_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_aac_in", + .fops = &audio_in_fops, +}; + +static int __init aac_in_init(void) +{ + return misc_register(&audio_aac_in_misc); +} +device_initcall(aac_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/amrnb_in.c b/arch/arm/mach-msm/qdsp6v3/amrnb_in.c new file mode 100644 index 00000000..35984c38 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/amrnb_in.c @@ -0,0 +1,330 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((32+sizeof(struct meta_out_dsp)) * 10)) + +void q6asm_amrnb_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode - %d\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ +static long amrnb_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + rc = q6asm_enc_cfg_blk_amrnb(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->band_mode, + enc_cfg->dtx_enable); + + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd amrnb media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", + __func__, audio->ac->session, + audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure failed\ + rc=%d\n", __func__, + audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:AUDIO_STOP\n", __func__); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, + audio->ac->session, rc); + break; + } + break; + } + case AUDIO_GET_AMRNB_ENC_CONFIG_V2: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_amrnb_enc_config_v2))) + rc = -EFAULT; + break; + } + case AUDIO_SET_AMRNB_ENC_CONFIG_V2: { + struct msm_audio_amrnb_enc_config_v2 cfg; + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + enc_cfg = audio->enc_cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(struct msm_audio_amrnb_enc_config_v2))) { + rc = -EFAULT; + break; + } + if (cfg.band_mode > 8 || + cfg.band_mode < 1) { + pr_aud_err("%s:session id %d: invalid band mode\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + /* AMR NB encoder accepts values between 0-7 + while openmax provides value between 1-8 + as per spec */ + enc_cfg->band_mode = (cfg.band_mode - 1); + enc_cfg->dtx_enable = (cfg.dtx_enable ? 1 : 0); + enc_cfg->frame_format = 0; + pr_debug("%s:session id %d: band_mode = 0x%x dtx_enable=0x%x\n", + __func__, audio->ac->session, + enc_cfg->band_mode, enc_cfg->dtx_enable); + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int amrnb_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for amrnb\ + driver\n", __func__, audio->ac->session); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_amrnb_enc_config_v2), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 32; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->band_mode = 7; + enc_cfg->dtx_enable = 0; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_amrnb_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s:session id %d: Could not allocate memory for audio\ + client\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open amrnb encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_AMRNB, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: NT mode encoder success\n", + __func__, audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_AMRNB); + if (rc < 0) { + pr_aud_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, audio->ac->session, + rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: T mode encoder success\n", + __func__, audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = amrnb_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = amrnb_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_amrnb_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrnb_in", + .fops = &audio_in_fops, +}; + +static int __init amrnb_in_init(void) +{ + return misc_register(&audio_amrnb_in_misc); +} + +device_initcall(amrnb_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/apr.c b/arch/arm/mach-msm/qdsp6v3/apr.c new file mode 100644 index 00000000..8ee12c57 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/apr.c @@ -0,0 +1,681 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "apr_tal.h" +#include "dsp_debug.h" + +struct apr_q6 q6; +struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX]; +static atomic_t dsp_state; +static atomic_t modem_state; + +static wait_queue_head_t dsp_wait; +static wait_queue_head_t modem_wait; +/* Subsystem restart: QDSP6 data, functions */ +static struct workqueue_struct *apr_reset_workqueue; +static void apr_reset_deregister(struct work_struct *work); +struct apr_reset_work { + void *handle; + struct work_struct work; +}; + + +int apr_send_pkt(void *handle, uint32_t *buf) +{ + struct apr_svc *svc = handle; + struct apr_client *clnt; + struct apr_hdr *hdr; + uint16_t dest_id; + uint16_t client_id; + uint16_t w_len; + unsigned long flags; + + if (!handle || !buf) { + pr_aud_err("APR: Wrong parameters\n"); + return -EINVAL; + } + if (svc->need_reset) { + pr_aud_err("apr: send_pkt service need reset\n"); + return -ENETRESET; + } + + if ((svc->dest_id == APR_DEST_QDSP6) && + (atomic_read(&dsp_state) == 0)) { + pr_aud_err("apr: Still dsp is not Up\n"); + return -ENETRESET; + } else if ((svc->dest_id == APR_DEST_MODEM) && + (atomic_read(&modem_state) == 0)) { + pr_aud_err("apr: Still Modem is not Up\n"); + return -ENETRESET; + } + + + spin_lock_irqsave(&svc->w_lock, flags); + dest_id = svc->dest_id; + client_id = svc->client_id; + clnt = &client[dest_id][client_id]; + + if (!client[dest_id][client_id].handle) { + pr_aud_err("APR: Still service is not yet opened\n"); + spin_unlock_irqrestore(&svc->w_lock, flags); + return -EINVAL; + } + hdr = (struct apr_hdr *)buf; + + hdr->src_domain = APR_DOMAIN_APPS; + hdr->src_svc = svc->id; + if (dest_id == APR_DEST_MODEM) + hdr->dest_domain = APR_DOMAIN_MODEM; + else if (dest_id == APR_DEST_QDSP6) + hdr->dest_domain = APR_DOMAIN_ADSP; + + hdr->dest_svc = svc->id; + + w_len = apr_tal_write(clnt->handle, buf, hdr->pkt_size); + if (w_len != hdr->pkt_size) + pr_aud_err("Unable to write APR pkt successfully: %d\n", w_len); + spin_unlock_irqrestore(&svc->w_lock, flags); + + return w_len; +} + +static void apr_cb_func(void *buf, int len, void *priv) +{ + struct apr_client_data data; + struct apr_client *apr_client; + struct apr_svc *c_svc; + struct apr_hdr *hdr; + uint16_t hdr_size; + uint16_t msg_type; + uint16_t ver; + uint16_t src; + uint16_t svc; + uint16_t clnt; + int i; + int temp_port = 0; + uint32_t *ptr; + + pr_debug("APR2: len = %d\n", len); + ptr = buf; + pr_debug("\n*****************\n"); + for (i = 0; i < len/4; i++) + pr_debug("%x ", ptr[i]); + pr_debug("\n"); + pr_debug("\n*****************\n"); + + if (!buf || len <= APR_HDR_SIZE) { + pr_aud_err("APR: Improper apr pkt received:%p %d\n", + buf, len); + return; + } + hdr = buf; + + ver = hdr->hdr_field; + ver = (ver & 0x000F); + if (ver > APR_PKT_VER + 1) { + pr_aud_err("APR: Wrong version: %d\n", ver); + return; + } + + hdr_size = hdr->hdr_field; + hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4; + if (hdr_size < APR_HDR_SIZE) { + pr_aud_err("APR: Wrong hdr size:%d\n", hdr_size); + return; + } + + if (hdr->pkt_size < APR_HDR_SIZE) { + pr_aud_err("APR: Wrong paket size\n"); + return; + } + msg_type = hdr->hdr_field; + msg_type = (msg_type >> 0x08) & 0x0003; + if (msg_type >= APR_MSG_TYPE_MAX && + msg_type != APR_BASIC_RSP_RESULT) { + pr_aud_err("APR: Wrong message type: %d\n", msg_type); + return; + } + + if (hdr->src_domain >= APR_DOMAIN_MAX || + hdr->dest_domain >= APR_DOMAIN_MAX || + hdr->src_svc >= APR_SVC_MAX || + hdr->dest_svc >= APR_SVC_MAX) { + pr_aud_err("APR: Wrong APR header\n"); + return; + } + + svc = hdr->dest_svc; + if (hdr->src_domain == APR_DOMAIN_MODEM) { + src = APR_DEST_MODEM; + if (svc == APR_SVC_MVS || svc == APR_SVC_MVM || + svc == APR_SVC_CVS || svc == APR_SVC_CVP || + svc == APR_SVC_TEST_CLIENT) + clnt = APR_CLIENT_VOICE; + else { + pr_aud_err("APR: Wrong svc :%d\n", svc); + return; + } + } else if (hdr->src_domain == APR_DOMAIN_ADSP) { + src = APR_DEST_QDSP6; + if (svc == APR_SVC_AFE || svc == APR_SVC_ASM || + svc == APR_SVC_VSM || svc == APR_SVC_VPM || + svc == APR_SVC_ADM || svc == APR_SVC_ADSP_CORE || + svc == APR_SVC_TEST_CLIENT || svc == APR_SVC_ADSP_MVM || + svc == APR_SVC_ADSP_CVS || svc == APR_SVC_ADSP_CVP) + clnt = APR_CLIENT_AUDIO; + else { + pr_aud_err("APR: Wrong svc :%d\n", svc); + return; + } + } else { + pr_aud_err("APR: Pkt from wrong source: %d\n", hdr->src_domain); + return; + } + + pr_debug("src =%d clnt = %d\n", src, clnt); + apr_client = &client[src][clnt]; + for (i = 0; i < APR_SVC_MAX; i++) + if (apr_client->svc[i].id == svc) { + pr_debug("%d\n", apr_client->svc[i].id); + c_svc = &apr_client->svc[i]; + break; + } + + if (i == APR_SVC_MAX) { + pr_aud_err("APR: service is not registered\n"); + return; + } + pr_debug("svc_idx = %d\n", i); + pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id, + c_svc->client_id, c_svc->fn, c_svc->priv); + data.payload_size = hdr->pkt_size - hdr_size; + data.opcode = hdr->opcode; + data.src = src; + data.src_port = hdr->src_port; + data.dest_port = hdr->dest_port; + data.token = hdr->token; + data.msg_type = msg_type; + if (data.payload_size > 0) + data.payload = (char *)hdr + hdr_size; + + temp_port = ((data.src_port >> 8) * 8) + (data.src_port & 0xFF); + pr_debug("port = %d t_port = %d\n", data.src_port, temp_port); + if (c_svc->port_cnt && c_svc->port_fn[temp_port]) + c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]); + else if (c_svc->fn) + c_svc->fn(&data, c_svc->priv); + else + pr_aud_err("APR: Rxed a packet for NULL callback\n"); +} + +struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, + uint32_t src_port, void *priv) +{ + int client_id = 0; + int svc_idx = 0; + int svc_id = 0; + int dest_id = 0; + int temp_port = 0; + struct apr_svc *svc = NULL; + int rc = 0; + + if (!dest || !svc_name || !svc_fn) + return NULL; + + if (!strcmp(dest, "ADSP")) + dest_id = APR_DEST_QDSP6; + else if (!strcmp(dest, "MODEM")) { + dest_id = APR_DEST_MODEM; + } else { + pr_aud_err("APR: wrong destination\n"); + goto done; + } + + if ((dest_id == APR_DEST_QDSP6) && + (atomic_read(&dsp_state) == 0)) { + rc = wait_event_timeout(dsp_wait, + (atomic_read(&dsp_state) == 1), 5*HZ); + if (rc == 0) { + pr_aud_err("apr: Still dsp is not Up\n"); + return NULL; + } + } else if ((dest_id == APR_DEST_MODEM) && + (atomic_read(&modem_state) == 0)) { + rc = wait_event_timeout(modem_wait, + (atomic_read(&modem_state) == 1), 5*HZ); + if (rc == 0) { + pr_aud_err("apr: Still Modem is not Up\n"); + return NULL; + } + } + + if (!strcmp(svc_name, "AFE")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 0; + svc_id = APR_SVC_AFE; + } else if (!strcmp(svc_name, "ASM")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 1; + svc_id = APR_SVC_ASM; + } else if (!strcmp(svc_name, "ADM")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 2; + svc_id = APR_SVC_ADM; + } else if (!strcmp(svc_name, "CORE")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 3; + svc_id = APR_SVC_ADSP_CORE; + } else if (!strcmp(svc_name, "TEST")) { + if (dest_id == APR_DEST_QDSP6) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 4; + } else { + client_id = APR_CLIENT_VOICE; + svc_idx = 7; + } + svc_id = APR_SVC_TEST_CLIENT; + } else if (!strcmp(svc_name, "VSM")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 0; + svc_id = APR_SVC_VSM; + } else if (!strcmp(svc_name, "VPM")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 1; + svc_id = APR_SVC_VPM; + } else if (!strcmp(svc_name, "MVS")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 2; + svc_id = APR_SVC_MVS; + } else if (!strcmp(svc_name, "MVM")) { + if (dest_id == APR_DEST_MODEM) { + client_id = APR_CLIENT_VOICE; + svc_idx = 3; + svc_id = APR_SVC_MVM; + } else { + client_id = APR_CLIENT_AUDIO; + svc_idx = 5; + svc_id = APR_SVC_ADSP_MVM; + } + } else if (!strcmp(svc_name, "CVS")) { + if (dest_id == APR_DEST_MODEM) { + client_id = APR_CLIENT_VOICE; + svc_idx = 4; + svc_id = APR_SVC_CVS; + } else { + client_id = APR_CLIENT_AUDIO; + svc_idx = 6; + svc_id = APR_SVC_ADSP_CVS; + } + } else if (!strcmp(svc_name, "CVP")) { + if (dest_id == APR_DEST_MODEM) { + client_id = APR_CLIENT_VOICE; + svc_idx = 5; + svc_id = APR_SVC_CVP; + } else { + client_id = APR_CLIENT_AUDIO; + svc_idx = 7; + svc_id = APR_SVC_ADSP_CVP; + } + } else if (!strcmp(svc_name, "SRD")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 6; + svc_id = APR_SVC_SRD; + } else { + pr_aud_err("APR: Wrong svc name\n"); + goto done; + } + + pr_debug("svc name = %s c_id = %d dest_id = %d\n", + svc_name, client_id, dest_id); + mutex_lock(&q6.lock); + if (q6.state == APR_Q6_NOIMG) { + q6.pil = pil_get("q6"); + if (!q6.pil) { + pr_aud_err("APR: Unable to load q6 image\n"); + mutex_unlock(&q6.lock); + return svc; + } + q6.state = APR_Q6_LOADED; + } + mutex_unlock(&q6.lock); + mutex_lock(&client[dest_id][client_id].m_lock); + if (!client[dest_id][client_id].handle) { + client[dest_id][client_id].handle = apr_tal_open(client_id, + dest_id, APR_DL_SMD, apr_cb_func, NULL); + if (!client[dest_id][client_id].handle) { + svc = NULL; + pr_aud_err("APR: Unable to open handle\n"); + mutex_unlock(&client[dest_id][client_id].m_lock); + goto done; + } + } + mutex_unlock(&client[dest_id][client_id].m_lock); + svc = &client[dest_id][client_id].svc[svc_idx]; + mutex_lock(&svc->m_lock); + client[dest_id][client_id].id = client_id; + if (svc->need_reset) { + mutex_unlock(&svc->m_lock); + pr_aud_err("APR: Service needs reset\n"); + goto done; + } + svc->priv = priv; + svc->id = svc_id; + svc->dest_id = dest_id; + svc->client_id = client_id; + if (src_port != 0xFFFFFFFF) { + temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF); + if (temp_port >= APR_MAX_PORTS) { + mutex_unlock(&svc->m_lock); + pr_aud_err("APR: illegal port ID %d\n", temp_port); + svc = NULL; + goto done; + } + pr_debug("port = %d t_port = %d\n", src_port, temp_port); + if (!svc->port_cnt && !svc->svc_cnt) + client[dest_id][client_id].svc_cnt++; + svc->port_cnt++; + svc->port_fn[temp_port] = svc_fn; + svc->port_priv[temp_port] = priv; + } else { + if (!svc->fn) { + if (!svc->port_cnt && !svc->svc_cnt) + client[dest_id][client_id].svc_cnt++; + svc->fn = svc_fn; + if (svc->port_cnt) + svc->svc_cnt++; + } + } + + mutex_unlock(&svc->m_lock); +done: + return svc; +} + +static void apr_reset_deregister(struct work_struct *work) +{ + struct apr_svc *handle = NULL; + struct apr_reset_work *apr_reset = + container_of(work, struct apr_reset_work, work); + + handle = apr_reset->handle; + pr_debug("%s:handle[%p]\n", __func__, handle); + apr_deregister(handle); + kfree(apr_reset); + msleep(5); +} + +int apr_deregister(void *handle) +{ + struct apr_svc *svc = handle; + struct apr_client *clnt; + uint16_t dest_id; + uint16_t client_id; + + if (!handle) + return -EINVAL; + + mutex_lock(&svc->m_lock); + dest_id = svc->dest_id; + client_id = svc->client_id; + clnt = &client[dest_id][client_id]; + + if (svc->port_cnt > 0 || svc->svc_cnt > 0) { + if (svc->port_cnt) + svc->port_cnt--; + else if (svc->svc_cnt) + svc->svc_cnt--; + if (!svc->port_cnt && !svc->svc_cnt) { + client[dest_id][client_id].svc_cnt--; + svc->need_reset = 0x0; + } + } else if (client[dest_id][client_id].svc_cnt > 0) { + client[dest_id][client_id].svc_cnt--; + if (!client[dest_id][client_id].svc_cnt) { + svc->need_reset = 0x0; + pr_debug("%s: service is reset %p\n", __func__, svc); + } + } + + if (!svc->port_cnt && !svc->svc_cnt) { + svc->priv = NULL; + svc->id = 0; + svc->fn = NULL; + svc->dest_id = 0; + svc->client_id = 0; + svc->need_reset = 0x0; + } + if (client[dest_id][client_id].handle && + !client[dest_id][client_id].svc_cnt) { + apr_tal_close(client[dest_id][client_id].handle); + client[dest_id][client_id].handle = NULL; + } + mutex_unlock(&svc->m_lock); + + return 0; +} + +void apr_reset(void *handle) +{ + struct apr_reset_work *apr_reset_worker = NULL; + + if (!handle) + return; + pr_debug("%s: handle[%p]\n", __func__, handle); + + apr_reset_worker = kzalloc(sizeof(struct apr_reset_work), + GFP_ATOMIC); + if (apr_reset_worker == NULL || apr_reset_workqueue == NULL) { + pr_aud_err("%s: mem failure\n", __func__); + return; + } + apr_reset_worker->handle = handle; + INIT_WORK(&apr_reset_worker->work, apr_reset_deregister); + queue_work(apr_reset_workqueue, &apr_reset_worker->work); +} + +void change_q6_state(int state) +{ + mutex_lock(&q6.lock); + q6.state = state; + mutex_unlock(&q6.lock); +} + +int adsp_state(int state) +{ + pr_aud_info("dsp state = %d\n", state); + return 0; +} + +/* Dispatch the Reset events to Modem and audio clients */ +void dispatch_event(unsigned long code, unsigned short proc) +{ + struct apr_client *apr_client; + struct apr_client_data data; + struct apr_svc *svc; + uint16_t clnt; + int i, j; + + data.opcode = RESET_EVENTS; + data.reset_event = code; + data.reset_proc = proc; + + clnt = APR_CLIENT_AUDIO; + apr_client = &client[proc][clnt]; + for (i = 0; i < APR_SVC_MAX; i++) { + mutex_lock(&apr_client->svc[i].m_lock); + if (apr_client->svc[i].fn) { + apr_client->svc[i].need_reset = 0x1; + apr_client->svc[i].fn(&data, apr_client->svc[i].priv); + } + if (apr_client->svc[i].port_cnt) { + svc = &(apr_client->svc[i]); + svc->need_reset = 0x1; + for (j = 0; j < APR_MAX_PORTS; j++) + if (svc->port_fn[j]) + svc->port_fn[j](&data, + svc->port_priv[j]); + } + mutex_unlock(&apr_client->svc[i].m_lock); + } + + clnt = APR_CLIENT_VOICE; + apr_client = &client[proc][clnt]; + for (i = 0; i < APR_SVC_MAX; i++) { + mutex_lock(&apr_client->svc[i].m_lock); + if (apr_client->svc[i].fn) { + apr_client->svc[i].need_reset = 0x1; + apr_client->svc[i].fn(&data, apr_client->svc[i].priv); + } + if (apr_client->svc[i].port_cnt) { + svc = &(apr_client->svc[i]); + svc->need_reset = 0x1; + for (j = 0; j < APR_MAX_PORTS; j++) + if (svc->port_fn[j]) + svc->port_fn[j](&data, + svc->port_priv[j]); + } + mutex_unlock(&apr_client->svc[i].m_lock); + } +} + +static int modem_notifier_cb(struct notifier_block *this, unsigned long code, + void *_cmd) +{ + switch (code) { + case SUBSYS_BEFORE_SHUTDOWN: + pr_debug("M-Notify: Shutdown started\n"); + atomic_set(&modem_state, 0); + dispatch_event(code, APR_DEST_MODEM); + break; + case SUBSYS_AFTER_SHUTDOWN: + pr_debug("M-Notify: Shutdown Completed\n"); + break; + case SUBSYS_BEFORE_POWERUP: + pr_debug("M-notify: Bootup started\n"); + break; + case SUBSYS_AFTER_POWERUP: + if (atomic_read(&modem_state) == 0) { + atomic_set(&modem_state, 1); + wake_up(&modem_wait); + } + pr_debug("M-Notify: Bootup Completed\n"); + break; + default: + pr_aud_err("M-Notify: General: %lu\n", code); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block mnb = { + .notifier_call = modem_notifier_cb, +}; + +static int lpass_notifier_cb(struct notifier_block *this, unsigned long code, + void *_cmd) +{ + switch (code) { + case SUBSYS_BEFORE_SHUTDOWN: + pr_debug("L-Notify: Shutdown started\n"); + atomic_set(&dsp_state, 0); + dispatch_event(code, APR_DEST_QDSP6); + break; + case SUBSYS_AFTER_SHUTDOWN: + pr_debug("L-Notify: Shutdown Completed\n"); + break; + case SUBSYS_BEFORE_POWERUP: + pr_debug("L-notify: Bootup started\n"); + break; + case SUBSYS_AFTER_POWERUP: + if (atomic_read(&dsp_state) == 0) { + atomic_set(&dsp_state, 1); + wake_up(&dsp_wait); + } + pr_debug("L-Notify: Bootup Completed\n"); + break; + default: + pr_aud_err("L-Notify: Generel: %lu\n", code); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block lnb = { + .notifier_call = lpass_notifier_cb, +}; + + +static int __init apr_init(void) +{ + int i, j, k; + + pr_aud_info("apr_probe\n"); + for (i = 0; i < APR_DEST_MAX; i++) + for (j = 0; j < APR_CLIENT_MAX; j++) { + mutex_init(&client[i][j].m_lock); + for (k = 0; k < APR_SVC_MAX; k++) { + mutex_init(&client[i][j].svc[k].m_lock); + spin_lock_init(&client[i][j].svc[k].w_lock); + } + } + mutex_init(&q6.lock); + dsp_debug_register(adsp_state); + apr_reset_workqueue = + create_singlethread_workqueue("apr_driver"); + if (!apr_reset_workqueue) + return -ENOMEM; + return 0; +} +device_initcall(apr_init); + +static int __init apr_late_init(void) +{ + void *ret; + init_waitqueue_head(&dsp_wait); + init_waitqueue_head(&modem_wait); + atomic_set(&dsp_state, 1); + atomic_set(&modem_state, 1); + ret = subsys_notif_register_notifier("modem", &mnb); + pr_debug("subsys_register_notifier: ret1 = %p\n", ret); + ret = subsys_notif_register_notifier("lpass", &lnb); + pr_debug("subsys_register_notifier: ret2 = %p\n", ret); + + return 0; +} +late_initcall(apr_late_init); diff --git a/arch/arm/mach-msm/qdsp6v3/apr_tal.c b/arch/arm/mach-msm/qdsp6v3/apr_tal.c new file mode 100644 index 00000000..44047bd8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/apr_tal.c @@ -0,0 +1,288 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apr_tal.h" +#include "../clock-8x60.h" + +static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = { + { + "apr_audio_svc", + "apr_voice_svc", + }, + { + "apr_audio_svc", + "apr_voice_svc", + }, +}; + +struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX]; + +int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len) +{ + int w_len; + unsigned long flags; + + + spin_lock_irqsave(&apr_ch->w_lock, flags); + if (smd_write_avail(apr_ch->ch) < len) { + spin_unlock_irqrestore(&apr_ch->w_lock, flags); + return -EAGAIN; + } + + w_len = smd_write(apr_ch->ch, data, len); + spin_unlock_irqrestore(&apr_ch->w_lock, flags); + pr_debug("apr_tal:w_len = %d\n", w_len); + + if (w_len != len) { + pr_aud_err("apr_tal: Error in write\n"); + return -ENETRESET; + } + return w_len; +} + +int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len) +{ + int rc = 0, retries = 0; + + if (!apr_ch->ch) + return -EINVAL; + + do { + if (rc == -EAGAIN) + udelay(50); + + rc = __apr_tal_write(apr_ch, data, len); + } while (rc == -EAGAIN && retries++ < 300); + + if (rc == -EAGAIN) + pr_aud_err("apr_tal: TIMEOUT for write\n"); + + return rc; +} + +static void apr_tal_notify(void *priv, unsigned event) +{ + struct apr_svc_ch_dev *apr_ch = priv; + int len, r_len, sz; + int pkt_cnt = 0; + unsigned long flags; + + pr_debug("event = %d\n", event); + switch (event) { + case SMD_EVENT_DATA: + pkt_cnt = 0; + spin_lock_irqsave(&apr_ch->lock, flags); +check_pending: + len = smd_read_avail(apr_ch->ch); + if (len < 0) { + pr_aud_err("apr_tal: Invalid Read Event :%d\n", len); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + sz = smd_cur_packet_size(apr_ch->ch); + if (sz < 0) { + pr_debug("pkt size is zero\n"); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + if (!len && !sz && !pkt_cnt) + goto check_write_avail; + if (!len) { + pr_debug("len = %d pkt_cnt = %d\n", len, pkt_cnt); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + r_len = smd_read_from_cb(apr_ch->ch, apr_ch->data, len); + if (len != r_len) { + pr_aud_err("apr_tal: Invalid Read\n"); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + pkt_cnt++; + pr_debug("%d %d %d\n", len, sz, pkt_cnt); + if (apr_ch->func) + apr_ch->func(apr_ch->data, r_len, apr_ch->priv); + goto check_pending; +check_write_avail: + if (smd_write_avail(apr_ch->ch)) + wake_up(&apr_ch->wait); + spin_unlock_irqrestore(&apr_ch->lock, flags); + break; + case SMD_EVENT_OPEN: + pr_aud_info("apr_tal: SMD_EVENT_OPEN\n"); + apr_ch->smd_state = 1; + wake_up(&apr_ch->wait); + break; + case SMD_EVENT_CLOSE: + pr_aud_info("apr_tal: SMD_EVENT_CLOSE\n"); + break; + } +} + +struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest, + uint32_t dl, apr_svc_cb_fn func, void *priv) +{ + int rc; + + if ((svc >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) || + (dl >= APR_DL_MAX)) { + pr_aud_err("apr_tal: Invalid params\n"); + return NULL; + } + + if (apr_svc_ch[dl][dest][svc].ch) { + pr_aud_err("apr_tal: This channel alreday openend\n"); + return NULL; + } + + mutex_lock(&apr_svc_ch[dl][dest][svc].m_lock); + if (!apr_svc_ch[dl][dest][svc].dest_state) { + rc = wait_event_timeout(apr_svc_ch[dl][dest][svc].dest, + apr_svc_ch[dl][dest][svc].dest_state, + msecs_to_jiffies(APR_OPEN_TIMEOUT_MS)); + if (rc == 0) { + pr_aud_err("%s: TIMEOUT for dest %d svc %d\n", __func__, dest, svc); + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + BUG(); + } + pr_debug("apr_tal:Wakeup done\n"); + apr_svc_ch[dl][dest][svc].dest_state = 0; + } + rc = smd_named_open_on_edge(svc_names[dest][svc], dest, + &apr_svc_ch[dl][dest][svc].ch, + &apr_svc_ch[dl][dest][svc], + apr_tal_notify); + if (rc < 0) { + pr_aud_err("apr_tal: smd_open failed %s\n", + svc_names[dest][svc]); + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + return NULL; + } + rc = wait_event_timeout(apr_svc_ch[dl][dest][svc].wait, + (apr_svc_ch[dl][dest][svc].smd_state == 1), 5 * HZ); + if (rc == 0) { + pr_aud_err("apr_tal:TIMEOUT for OPEN event\n"); + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + BUG(); + return NULL; + } + if (!apr_svc_ch[dl][dest][svc].dest_state) { + apr_svc_ch[dl][dest][svc].dest_state = 1; + pr_debug("apr_tal:Waiting for apr svc init\n"); + msleep(200); + pr_debug("apr_tal:apr svc init done\n"); + } + apr_svc_ch[dl][dest][svc].smd_state = 0; + + apr_svc_ch[dl][dest][svc].func = func; + apr_svc_ch[dl][dest][svc].priv = priv; + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + + return &apr_svc_ch[dl][dest][svc]; +} + +int apr_tal_close(struct apr_svc_ch_dev *apr_ch) +{ + int r; + + if (!apr_ch->ch) + return -EINVAL; + + mutex_lock(&apr_ch->m_lock); + r = smd_close(apr_ch->ch); + apr_ch->ch = NULL; + apr_ch->func = NULL; + apr_ch->priv = NULL; + mutex_unlock(&apr_ch->m_lock); + return r; +} + +static int apr_smd_probe(struct platform_device *pdev) +{ + int dest; + int clnt; + + if (pdev->id == APR_DEST_MODEM) { + pr_aud_info("apr_tal:Modem Is Up\n"); + dest = APR_DEST_MODEM; + clnt = APR_CLIENT_VOICE; + apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; + wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); + } else if (pdev->id == APR_DEST_QDSP6) { + pr_aud_info("apr_tal:Q6 Is Up\n"); + /* + local_src_disable(PLL_4); + */ + dest = APR_DEST_QDSP6; + clnt = APR_CLIENT_AUDIO; + apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; + wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); + } else + pr_aud_err("apr_tal:Invalid Dest Id: %d\n", pdev->id); + return 0; +} + +static struct platform_driver apr_q6_driver = { + .probe = apr_smd_probe, + .driver = { + .name = "apr_audio_svc", + .owner = THIS_MODULE, + }, +}; + +static struct platform_driver apr_modem_driver = { + .probe = apr_smd_probe, + .driver = { + .name = "apr_voice_svc", + .owner = THIS_MODULE, + }, +}; + +static int __init apr_tal_init(void) +{ + int i, j, k; + + for (i = 0; i < APR_DL_MAX; i++) + for (j = 0; j < APR_DEST_MAX; j++) + for (k = 0; k < APR_CLIENT_MAX; k++) { + init_waitqueue_head(&apr_svc_ch[i][j][k].wait); + init_waitqueue_head(&apr_svc_ch[i][j][k].dest); + spin_lock_init(&apr_svc_ch[i][j][k].lock); + spin_lock_init(&apr_svc_ch[i][j][k].w_lock); + mutex_init(&apr_svc_ch[i][j][k].m_lock); + } + platform_driver_register(&apr_q6_driver); + platform_driver_register(&apr_modem_driver); + return 0; +} +device_initcall(apr_tal_init); diff --git a/arch/arm/mach-msm/qdsp6v3/apr_tal.h b/arch/arm/mach-msm/qdsp6v3/apr_tal.h new file mode 100644 index 00000000..88f8ba45 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/apr_tal.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __APR_TAL_H_ +#define __APR_TAL_H_ + +#include +#include +#include + +/* APR Client IDs */ +#define APR_CLIENT_AUDIO 0x0 +#define APR_CLIENT_VOICE 0x1 +#define APR_CLIENT_MAX 0x2 + +#define APR_DL_SMD 0 +#define APR_DL_MAX 1 + +#define APR_DEST_MODEM 0 +#define APR_DEST_QDSP6 1 +#define APR_DEST_MAX 2 + +#define APR_MAX_BUF 8192 + +#define APR_OPEN_TIMEOUT_MS 5000 + +typedef void (*apr_svc_cb_fn)(void *buf, int len, void *priv); +struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest, + uint32_t dl, apr_svc_cb_fn func, void *priv); +int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len); +int apr_tal_close(struct apr_svc_ch_dev *apr_ch); +struct apr_svc_ch_dev { + struct smd_channel *ch; + spinlock_t lock; + spinlock_t w_lock; + struct mutex m_lock; + apr_svc_cb_fn func; + char data[APR_MAX_BUF]; + wait_queue_head_t wait; + void *priv; + uint32_t smd_state; + wait_queue_head_t dest; + uint32_t dest_state; +}; + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/audio_aac.c b/arch/arm/mach-msm/qdsp6v3/audio_aac.c new file mode 100644 index 00000000..88f8ba45 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_aac.c @@ -0,0 +1,71 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __APR_TAL_H_ +#define __APR_TAL_H_ + +#include +#include +#include + +/* APR Client IDs */ +#define APR_CLIENT_AUDIO 0x0 +#define APR_CLIENT_VOICE 0x1 +#define APR_CLIENT_MAX 0x2 + +#define APR_DL_SMD 0 +#define APR_DL_MAX 1 + +#define APR_DEST_MODEM 0 +#define APR_DEST_QDSP6 1 +#define APR_DEST_MAX 2 + +#define APR_MAX_BUF 8192 + +#define APR_OPEN_TIMEOUT_MS 5000 + +typedef void (*apr_svc_cb_fn)(void *buf, int len, void *priv); +struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest, + uint32_t dl, apr_svc_cb_fn func, void *priv); +int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len); +int apr_tal_close(struct apr_svc_ch_dev *apr_ch); +struct apr_svc_ch_dev { + struct smd_channel *ch; + spinlock_t lock; + spinlock_t w_lock; + struct mutex m_lock; + apr_svc_cb_fn func; + char data[APR_MAX_BUF]; + wait_queue_head_t wait; + void *priv; + uint32_t smd_state; + wait_queue_head_t dest; + uint32_t dest_state; +}; + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/audio_acdb.c b/arch/arm/mach-msm/qdsp6v3/audio_acdb.c new file mode 100644 index 00000000..dca0df60 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_acdb.c @@ -0,0 +1,780 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" + + +#define MAX_NETWORKS 9 +#define NUM_ACTIVE_NETWORKS 6 +#define VOCPROC_STREAM_OFFSET NUM_ACTIVE_NETWORKS +#define VOCPROC_VOL_OFFSET (NUM_ACTIVE_NETWORKS * 2) +#define NUM_VOCPROC_CAL_TYPES (NUM_ACTIVE_NETWORKS * 3) +#define NUM_AUDPROC_CAL_TYPES 3 +#define ACDB_BLOCK_SIZE 4096 +#define NUM_VOCPROC_BLOCKS 18 + +enum { + RX_CAL, + TX_CAL, + MAX_AUDPROC_TYPES +}; + +struct acdb_data { + struct mutex acdb_mutex; + + /* ANC Cal */ + struct acdb_cal_block anc_cal; + + /* AudProc Cal */ + struct acdb_cal_block audproc_cal[MAX_AUDPROC_TYPES]; + struct acdb_cal_block audstrm_cal[MAX_AUDPROC_TYPES]; + struct acdb_cal_block audvol_cal[MAX_AUDPROC_TYPES]; + + /* VocProc Cal */ + struct acdb_cal_block vocproc_cal[MAX_NETWORKS]; + struct acdb_cal_block vocstrm_cal[MAX_NETWORKS]; + struct acdb_cal_block vocvol_cal[MAX_NETWORKS]; + uint32_t vocproc_cal_size; + uint32_t vocstrm_cal_size; + uint32_t vocvol_cal_size; + + /* Sidetone Cal */ + struct sidetone_cal sidetone_cal; + + /* PMEM information */ + int pmem_fd; + unsigned long paddr; + unsigned long kvaddr; + unsigned long pmem_len; + struct file *file; + +}; + +static struct acdb_data acdb_data; +static atomic_t usage_count; + +void get_anc_cal(struct acdb_cal_block *cal_block) +{ + pr_debug("%s\n", __func__); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.anc_cal.cal_kvaddr; + cal_block->cal_paddr = acdb_data.anc_cal.cal_paddr; + cal_block->cal_size = acdb_data.anc_cal.cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_anc_cal(struct cal_block *cal_block) +{ + pr_debug("%s,\n", __func__); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + acdb_data.anc_cal.cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.anc_cal.cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.anc_cal.cal_size = + cal_block->cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_audproc_buffer_data(struct audproc_buffer_data *cal_buffers) +{ + int i; + pr_debug("%s\n", __func__); + + if (cal_buffers == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + for (i = 0; i < NUM_AUDPROC_BUFFERS; i++) { + cal_buffers->phys_addr[i] = (uint32_t) + (acdb_data.paddr + + (NUM_VOCPROC_BLOCKS + i) * ACDB_BLOCK_SIZE); + cal_buffers->buf_size[i] = ACDB_BLOCK_SIZE; + } +done: + return; +} + +void store_audproc_cal(int32_t path, struct cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + mutex_lock(&acdb_data.acdb_mutex); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + acdb_data.audproc_cal[path].cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.audproc_cal[path].cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.audproc_cal[path].cal_size = + cal_block->cal_size; + +done: + mutex_unlock(&acdb_data.acdb_mutex); + return; +} + +void get_audproc_cal(int32_t path, struct acdb_cal_block *cal_block) +{ + pr_aud_info("%s, path = %d\n", __func__, path); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.audproc_cal[path].cal_kvaddr; + cal_block->cal_paddr = acdb_data.audproc_cal[path].cal_paddr; + cal_block->cal_size = acdb_data.audproc_cal[path].cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_audstrm_cal(int32_t path, struct cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + mutex_lock(&acdb_data.acdb_mutex); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + acdb_data.audstrm_cal[path].cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.audstrm_cal[path].cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.audstrm_cal[path].cal_size = + cal_block->cal_size; + +done: + mutex_unlock(&acdb_data.acdb_mutex); + return; +} + +void get_audstrm_cal(int32_t path, struct acdb_cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.audstrm_cal[path].cal_kvaddr; + cal_block->cal_paddr = acdb_data.audstrm_cal[path].cal_paddr; + cal_block->cal_size = acdb_data.audstrm_cal[path].cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_audvol_cal(int32_t path, struct cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + mutex_lock(&acdb_data.acdb_mutex); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + acdb_data.audvol_cal[path].cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.audvol_cal[path].cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.audvol_cal[path].cal_size = + cal_block->cal_size; + +done: + mutex_unlock(&acdb_data.acdb_mutex); + return; +} + +void get_audvol_cal(int32_t path, struct acdb_cal_block *cal_block) +{ + pr_aud_info("%s, path = %d\n", __func__, path); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.audvol_cal[path].cal_kvaddr; + cal_block->cal_paddr = acdb_data.audvol_cal[path].cal_paddr; + cal_block->cal_size = acdb_data.audvol_cal[path].cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + + +void store_vocproc_cal(int32_t len, struct cal_block *cal_blocks) +{ + int i; + pr_debug("%s\n", __func__); + + if (len > MAX_NETWORKS) { + pr_aud_err("%s: Calibration sent for %d networks, only %d are " + "supported!\n", __func__, len, MAX_NETWORKS); + goto done; + } + + + mutex_lock(&acdb_data.acdb_mutex); + + for (i = 0; i < len; i++) { + if (cal_blocks[i].cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_blocks[i].cal_offset, + acdb_data.pmem_len); + acdb_data.vocproc_cal[i].cal_size = 0; + } else { + acdb_data.vocproc_cal[i].cal_size = + cal_blocks[i].cal_size; + acdb_data.vocproc_cal[i].cal_paddr = + cal_blocks[i].cal_offset + + acdb_data.paddr; + acdb_data.vocproc_cal[i].cal_kvaddr = + cal_blocks[i].cal_offset + + acdb_data.kvaddr; + } + } + acdb_data.vocproc_cal_size = len; + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_vocproc_cal(struct acdb_cal_data *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->num_cal_blocks = acdb_data.vocproc_cal_size; + cal_data->cal_blocks = &acdb_data.vocproc_cal[0]; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_vocstrm_cal(int32_t len, struct cal_block *cal_blocks) +{ + int i; + pr_debug("%s\n", __func__); + + if (len > MAX_NETWORKS) { + pr_aud_err("%s: Calibration sent for %d networks, only %d are " + "supported!\n", __func__, len, MAX_NETWORKS); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + for (i = 0; i < len; i++) { + if (cal_blocks[i].cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_blocks[i].cal_offset, + acdb_data.pmem_len); + acdb_data.vocstrm_cal[i].cal_size = 0; + } else { + acdb_data.vocstrm_cal[i].cal_size = + cal_blocks[i].cal_size; + acdb_data.vocstrm_cal[i].cal_paddr = + cal_blocks[i].cal_offset + + acdb_data.paddr; + acdb_data.vocstrm_cal[i].cal_kvaddr = + cal_blocks[i].cal_offset + + acdb_data.kvaddr; + } + } + acdb_data.vocstrm_cal_size = len; + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_vocstrm_cal(struct acdb_cal_data *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->num_cal_blocks = acdb_data.vocstrm_cal_size; + cal_data->cal_blocks = &acdb_data.vocstrm_cal[0]; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_vocvol_cal(int32_t len, struct cal_block *cal_blocks) +{ + int i; + pr_debug("%s\n", __func__); + + if (len > MAX_NETWORKS) { + pr_aud_err("%s: Calibration sent for %d networks, only %d are " + "supported!\n", __func__, len, MAX_NETWORKS); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + for (i = 0; i < len; i++) { + if (cal_blocks[i].cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_blocks[i].cal_offset, + acdb_data.pmem_len); + acdb_data.vocvol_cal[i].cal_size = 0; + } else { + acdb_data.vocvol_cal[i].cal_size = + cal_blocks[i].cal_size; + acdb_data.vocvol_cal[i].cal_paddr = + cal_blocks[i].cal_offset + + acdb_data.paddr; + acdb_data.vocvol_cal[i].cal_kvaddr = + cal_blocks[i].cal_offset + + acdb_data.kvaddr; + } + } + acdb_data.vocvol_cal_size = len; + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_vocvol_cal(struct acdb_cal_data *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->num_cal_blocks = acdb_data.vocvol_cal_size; + cal_data->cal_blocks = &acdb_data.vocvol_cal[0]; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_sidetone_cal(struct sidetone_cal *cal_data) +{ + pr_debug("%s\n", __func__); + + mutex_lock(&acdb_data.acdb_mutex); + + acdb_data.sidetone_cal.enable = cal_data->enable; + acdb_data.sidetone_cal.gain = cal_data->gain; + + mutex_unlock(&acdb_data.acdb_mutex); +} + + +void get_sidetone_cal(struct sidetone_cal *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->enable = acdb_data.sidetone_cal.enable; + cal_data->gain = acdb_data.sidetone_cal.gain; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +static int acdb_open(struct inode *inode, struct file *f) +{ + s32 result = 0; + pr_aud_info("%s\n", __func__); + + mutex_lock(&acdb_data.acdb_mutex); + if (acdb_data.pmem_fd) { + pr_aud_info("%s: ACDB opened but PMEM allocated, using existing PMEM!\n", + __func__); + } + mutex_unlock(&acdb_data.acdb_mutex); + + atomic_inc(&usage_count); + return result; +} + +static int deregister_pmem(void) +{ + int result; + struct audproc_buffer_data buffer; + + get_audproc_buffer_data(&buffer); + + result = adm_memory_unmap_regions(buffer.phys_addr, + buffer.buf_size, NUM_AUDPROC_BUFFERS); + + if (result < 0) + pr_aud_err("Audcal unmap did not work!\n"); + + if (acdb_data.pmem_fd) { + put_pmem_file(acdb_data.file); + acdb_data.pmem_fd = 0; + } + return result; +} + +static int register_pmem(void) +{ + int result; + struct audproc_buffer_data buffer; + + result = get_pmem_file(acdb_data.pmem_fd, &acdb_data.paddr, + &acdb_data.kvaddr, &acdb_data.pmem_len, + &acdb_data.file); + if (result != 0) { + acdb_data.pmem_fd = 0; + pr_aud_err("%s: Could not register PMEM!!!\n", __func__); + goto done; + } + + pr_debug("AUDIO_REGISTER_PMEM done! paddr = 0x%lx, " + "kvaddr = 0x%lx, len = x%lx\n", acdb_data.paddr, + acdb_data.kvaddr, acdb_data.pmem_len); + get_audproc_buffer_data(&buffer); + result = adm_memory_map_regions(buffer.phys_addr, 0, + buffer.buf_size, + NUM_AUDPROC_BUFFERS); + if (result < 0) + pr_aud_err("Audcal mmap did not work!\n"); + goto done; + +done: + return result; +} +static long acdb_ioctl(struct file *f, + unsigned int cmd, unsigned long arg) +{ + s32 result = 0; + s32 audproc_path; + s32 size; + struct cal_block data[MAX_NETWORKS]; + pr_debug("%s\n", __func__); + + switch (cmd) { + case AUDIO_REGISTER_PMEM: + pr_debug("AUDIO_REGISTER_PMEM\n"); + mutex_lock(&acdb_data.acdb_mutex); + if (acdb_data.pmem_fd) { + deregister_pmem(); + pr_aud_info("Remove the existing PMEM\n"); + } + + if (copy_from_user(&acdb_data.pmem_fd, (void *)arg, + sizeof(acdb_data.pmem_fd))) + result = -EFAULT; + else + result = register_pmem(); + mutex_unlock(&acdb_data.acdb_mutex); + goto done; + + case AUDIO_DEREGISTER_PMEM: + pr_debug("AUDIO_DEREGISTER_PMEM\n"); + mutex_lock(&acdb_data.acdb_mutex); + deregister_pmem(); + mutex_unlock(&acdb_data.acdb_mutex); + goto done; + } + + if (copy_from_user(&size, (void *) arg, sizeof(size))) { + + result = -EFAULT; + goto done; + } + + if (size <= 0) { + pr_aud_err("%s: Invalid size sent to driver: %d\n", + __func__, size); + result = -EFAULT; + goto done; + } + + if (copy_from_user(data, (void *)(arg + sizeof(size)), size)) { + + pr_aud_err("%s: fail to copy table size %d\n", __func__, size); + result = -EFAULT; + goto done; + } + + if (data == NULL) { + pr_aud_err("%s: NULL pointer sent to driver!\n", __func__); + result = -EFAULT; + goto done; + } + + switch (cmd) { + case AUDIO_SET_AUDPROC_TX_CAL: + audproc_path = TX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audproc_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_RX_CAL: + audproc_path = RX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audproc_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_TX_STREAM_CAL: + audproc_path = TX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audstrm_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_RX_STREAM_CAL: + audproc_path = RX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audstrm_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_TX_VOL_CAL: + audproc_path = TX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audvol_cal(audproc_path, data); + case AUDIO_SET_AUDPROC_RX_VOL_CAL: + audproc_path = RX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audvol_cal(audproc_path, data); + break; + case AUDIO_SET_VOCPROC_CAL: + store_vocproc_cal(size / sizeof(struct cal_block), data); + break; + case AUDIO_SET_VOCPROC_STREAM_CAL: + store_vocstrm_cal(size / sizeof(struct cal_block), data); + break; + case AUDIO_SET_VOCPROC_VOL_CAL: + store_vocvol_cal(size / sizeof(struct cal_block), data); + break; + case AUDIO_SET_SIDETONE_CAL: + if (size > sizeof(struct sidetone_cal)) + pr_aud_err("%s: More sidetone cal then expected, " + "size received: %d\n", __func__, size); + store_sidetone_cal((struct sidetone_cal *)data); + break; + case AUDIO_SET_ANC_CAL: + store_anc_cal(data); + break; + default: + pr_aud_err("ACDB=> ACDB ioctl not found!\n"); + } + +done: + return result; +} + +static int acdb_mmap(struct file *file, struct vm_area_struct *vma) +{ + int result = 0; + int size = vma->vm_end - vma->vm_start; + + pr_debug("%s\n", __func__); + + mutex_lock(&acdb_data.acdb_mutex); + if (acdb_data.pmem_fd) { + if (size <= acdb_data.pmem_len) { + vma->vm_page_prot = pgprot_noncached( + vma->vm_page_prot); + result = remap_pfn_range(vma, + vma->vm_start, + acdb_data.paddr >> PAGE_SHIFT, + size, + vma->vm_page_prot); + } else { + pr_aud_err("%s: Not enough PMEM memory!\n", __func__); + result = -ENOMEM; + } + } else { + pr_aud_err("%s: PMEM is not allocated, yet!\n", __func__); + result = -ENODEV; + } + mutex_unlock(&acdb_data.acdb_mutex); + + return result; +} + +static int acdb_release(struct inode *inode, struct file *f) +{ + s32 result = 0; + + atomic_dec(&usage_count); + atomic_read(&usage_count); + + pr_aud_info("%s: ref count %d!\n", __func__, + atomic_read(&usage_count)); + + if (atomic_read(&usage_count) >= 1) { + result = -EBUSY; + } else { + mutex_lock(&acdb_data.acdb_mutex); + result = deregister_pmem(); + mutex_unlock(&acdb_data.acdb_mutex); + } + + return result; +} + +static const struct file_operations acdb_fops = { + .owner = THIS_MODULE, + .open = acdb_open, + .release = acdb_release, + .unlocked_ioctl = acdb_ioctl, + .mmap = acdb_mmap, +}; + +struct miscdevice acdb_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_acdb", + .fops = &acdb_fops, +}; + +static int __init acdb_init(void) +{ + pr_aud_info("%s\n", __func__); + memset(&acdb_data, 0, sizeof(acdb_data)); + mutex_init(&acdb_data.acdb_mutex); + atomic_set(&usage_count, 0); + return misc_register(&acdb_misc); +} + +static void __exit acdb_exit(void) +{ +} + +module_init(acdb_init); +module_exit(acdb_exit); + +MODULE_DESCRIPTION("MSM 8x60 Audio ACDB driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_acdb.h b/arch/arm/mach-msm/qdsp6v3/audio_acdb.h new file mode 100644 index 00000000..17f9a7c4 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_acdb.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _AUDIO_ACDB_H +#define _AUDIO_ACDB_H + +#include +#include "q6adm.h" + +#define NUM_AUDPROC_BUFFERS 6 + +struct acdb_cal_block { + uint32_t cal_size; + uint32_t cal_kvaddr; + uint32_t cal_paddr; +}; + +struct acdb_cal_data { + uint32_t num_cal_blocks; + struct acdb_cal_block *cal_blocks; +}; + +struct audproc_buffer_data { + uint32_t buf_size[NUM_AUDPROC_BUFFERS]; + uint32_t phys_addr[NUM_AUDPROC_BUFFERS]; +}; + +void get_audproc_buffer_data(struct audproc_buffer_data *cal_buffers); +void get_audproc_cal(int32_t path, struct acdb_cal_block *cal_block); +void get_audstrm_cal(int32_t path, struct acdb_cal_block *cal_block); +void get_audvol_cal(int32_t path, struct acdb_cal_block *cal_block); +void get_vocproc_cal(struct acdb_cal_data *cal_data); +void get_vocstrm_cal(struct acdb_cal_data *cal_data); +void get_vocvol_cal(struct acdb_cal_data *cal_data); +void get_sidetone_cal(struct sidetone_cal *cal_data); +void get_anc_cal(struct acdb_cal_block *cal_block); + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c b/arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c new file mode 100644 index 00000000..f170ae52 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c @@ -0,0 +1,1757 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "q6adm.h" +#include "rtac.h" + +#ifndef MAX +#define MAX(x, y) (((x) > (y)) ? (x) : (y)) +#endif + + +static DEFINE_MUTEX(session_lock); + +struct audio_dev_ctrl_state { + struct msm_snddev_info *devs[AUDIO_DEV_CTL_MAX_DEV]; + u32 num_dev; + atomic_t opened; + struct msm_snddev_info *voice_rx_dev; + struct msm_snddev_info *voice_tx_dev; + wait_queue_head_t wait; +}; + +static struct audio_dev_ctrl_state audio_dev_ctrl; +struct event_listner event; + +#define PLAYBACK 0x1 +#define LIVE_RECORDING 0x2 +#define NON_LIVE_RECORDING 0x3 +#define MAX_COPP_DEVICES 4 +static int voc_rx_freq = 0; +static int voc_tx_freq = 0; + +struct session_freq { + int freq; + int evt; +}; + +struct audio_routing_info { + unsigned short mixer_mask[MAX_SESSIONS]; + unsigned short audrec_mixer_mask[MAX_SESSIONS]; + struct session_freq dec_freq[MAX_SESSIONS]; + struct session_freq enc_freq[MAX_SESSIONS]; + unsigned int copp_list[MAX_SESSIONS][AFE_MAX_PORTS]; + int voice_tx_dev_id; + int voice_rx_dev_id; + int voice_tx_sample_rate; + int voice_rx_sample_rate; + signed int voice_tx_vol; + signed int voice_rx_vol; + int tx_mute; + int rx_mute; + int voice_state; + int call_state; + struct mutex copp_list_mutex; + struct mutex adm_mutex; +}; + +static struct audio_routing_info routing_info; + +struct audio_copp_topology { + struct mutex lock; + int session_cnt; + int session_id[MAX_SESSIONS]; + int topolog_id[MAX_SESSIONS]; +}; +static struct audio_copp_topology adm_tx_topology_tbl; + +static struct dev_ctrl_ops default_ctrl_ops; +static struct dev_ctrl_ops *ctrl_ops = &default_ctrl_ops; + +void htc_8x60_register_dev_ctrl_ops(struct dev_ctrl_ops *ops) +{ + ctrl_ops = ops; +} + +int msm_reset_all_device(void) +{ + int rc = 0; + int dev_id = 0; + struct msm_snddev_info *dev_info = NULL; + + for (dev_id = 0; dev_id < audio_dev_ctrl.num_dev; dev_id++) { + dev_info = audio_dev_ctrl_find_dev(dev_id); + if (IS_ERR(dev_info)) { + pr_aud_err("%s:pass invalid dev_id\n", __func__); + rc = PTR_ERR(dev_info); + return rc; + } + if (!dev_info->opened) + continue; + pr_debug("%s:Resetting device %d active on COPP %d" + "with %lld as routing\n", __func__, + dev_id, dev_info->copp_id, dev_info->sessions); + broadcast_event(AUDDEV_EVT_REL_PENDING, + dev_id, + SESSION_IGNORE); + rc = dev_info->dev_ops.close(dev_info); + if (rc < 0) { + pr_aud_err("%s:Snd device failed close!\n", __func__); + return rc; + } else { + dev_info->opened = 0; + broadcast_event(AUDDEV_EVT_DEV_RLS, + dev_id, + SESSION_IGNORE); + + if (dev_info->copp_id == VOICE_PLAYBACK_TX) + voice_start_playback(0); + } + dev_info->sessions = 0; + } + msm_clear_all_session(); + return 0; +} +EXPORT_SYMBOL(msm_reset_all_device); + +int msm_set_copp_id(int session_id, int copp_id) +{ + int rc = 0; + int index; + + if (session_id < 1 || session_id > 8) + return -EINVAL; + if (afe_validate_port(copp_id) < 0) + return -EINVAL; + + index = afe_get_port_index(copp_id); + pr_debug("%s: session[%d] copp_id[%d] index[%d]\n", __func__, + session_id, copp_id, index); + mutex_lock(&routing_info.copp_list_mutex); + if (routing_info.copp_list[session_id][index] == DEVICE_IGNORE) + routing_info.copp_list[session_id][index] = copp_id; + mutex_unlock(&routing_info.copp_list_mutex); + + return rc; +} +EXPORT_SYMBOL(msm_set_copp_id); + +int msm_clear_copp_id(int session_id, int copp_id) +{ + int rc = 0; + int index = afe_get_port_index(copp_id); + + if (session_id < 1 || session_id > 8) { + pr_aud_err("%s: invalid session_id %d\n", __func__, session_id); + return -EINVAL; + } + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid copp_id index %d\n", __func__, index); + return -EINVAL; + } + + pr_debug("%s: session[%d] copp_id[%d] index[%d]\n", __func__, + session_id, copp_id, index); + mutex_lock(&routing_info.copp_list_mutex); + if (routing_info.copp_list[session_id][index] == copp_id) + routing_info.copp_list[session_id][index] = DEVICE_IGNORE; + mutex_unlock(&routing_info.copp_list_mutex); + + return rc; +} +EXPORT_SYMBOL(msm_clear_copp_id); + +int msm_clear_session_id(int session_id) +{ + int rc = 0; + int i = 0; + if (session_id < 1 || session_id > 8) + return -EINVAL; + pr_debug("%s: session[%d]\n", __func__, session_id); + mutex_lock(&routing_info.adm_mutex); + mutex_lock(&routing_info.copp_list_mutex); + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[session_id][i] != DEVICE_IGNORE) { + rc = adm_close(routing_info.copp_list[session_id][i]); + if (rc < 0) { + pr_aud_err("%s: adm close fail port[%d] rc[%d]\n", + __func__, + routing_info.copp_list[session_id][i], + rc); + continue; + } + routing_info.copp_list[session_id][i] = DEVICE_IGNORE; + rc = 0; + } + } + mutex_unlock(&routing_info.copp_list_mutex); + mutex_unlock(&routing_info.adm_mutex); + + return rc; +} +EXPORT_SYMBOL(msm_clear_session_id); + +int msm_clear_all_session() +{ + int rc = 0; + int i = 0, j = 0; + pr_aud_info("%s:\n", __func__); + mutex_lock(&routing_info.adm_mutex); + mutex_lock(&routing_info.copp_list_mutex); + for (j = 1; j < MAX_SESSIONS; j++) { + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[j][i] != DEVICE_IGNORE) { + rc = adm_close( + routing_info.copp_list[j][i]); + if (rc < 0) { + pr_aud_err("%s: adm close fail copp[%d]" + "session[%d] rc[%d]\n", + __func__, + routing_info.copp_list[j][i], + j, rc); + continue; + } + routing_info.copp_list[j][i] = DEVICE_IGNORE; + rc = 0; + } + } + } + mutex_unlock(&routing_info.copp_list_mutex); + mutex_unlock(&routing_info.adm_mutex); + return rc; +} +EXPORT_SYMBOL(msm_clear_all_session); + +int msm_get_voice_state(void) +{ + pr_debug("voice state %d\n", routing_info.voice_state); + return routing_info.voice_state; +} +EXPORT_SYMBOL(msm_get_voice_state); + +int msm_get_call_state(void) +{ + pr_debug("call state %d\n", routing_info.call_state); + return routing_info.call_state; +} +EXPORT_SYMBOL(msm_get_call_state); + +int msm_set_voice_mute(int dir, int mute) +{ + pr_debug("dir %x mute %x\n", dir, mute); + if (dir == DIR_TX) { + routing_info.tx_mute = mute; + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, + routing_info.voice_tx_dev_id, SESSION_IGNORE); + } else + return -EPERM; + return 0; +} +EXPORT_SYMBOL(msm_set_voice_mute); + +int msm_set_voice_vol(int dir, s32 volume) +{ + if (dir == DIR_TX) { + routing_info.voice_tx_vol = volume; + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, + routing_info.voice_tx_dev_id, + SESSION_IGNORE); + } else if (dir == DIR_RX) { + routing_info.voice_rx_vol = volume; + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, + routing_info.voice_rx_dev_id, + SESSION_IGNORE); + } else + return -EINVAL; + return 0; +} +EXPORT_SYMBOL(msm_set_voice_vol); + +void msm_snddev_register(struct msm_snddev_info *dev_info) +{ + mutex_lock(&session_lock); + if (audio_dev_ctrl.num_dev < AUDIO_DEV_CTL_MAX_DEV) { + audio_dev_ctrl.devs[audio_dev_ctrl.num_dev] = dev_info; + dev_info->dev_volume = 50; /* 50% */ + dev_info->sessions = 0x0; + dev_info->usage_count = 0; + audio_dev_ctrl.num_dev++; + } else + pr_aud_err("%s: device registry max out\n", __func__); + mutex_unlock(&session_lock); +} +EXPORT_SYMBOL(msm_snddev_register); + +int msm_snddev_devcount(void) +{ + return audio_dev_ctrl.num_dev; +} +EXPORT_SYMBOL(msm_snddev_devcount); + +int msm_snddev_query(int dev_id) +{ + if (dev_id <= audio_dev_ctrl.num_dev) + return 0; + return -ENODEV; +} +EXPORT_SYMBOL(msm_snddev_query); + +int msm_snddev_is_set(int popp_id, int copp_id) +{ + return routing_info.mixer_mask[popp_id] & (0x1 << copp_id); +} +EXPORT_SYMBOL(msm_snddev_is_set); + +unsigned short msm_snddev_route_enc(int enc_id) +{ + if (enc_id >= MAX_SESSIONS) + return -EINVAL; + return routing_info.audrec_mixer_mask[enc_id]; +} +EXPORT_SYMBOL(msm_snddev_route_enc); + +unsigned short msm_snddev_route_dec(int popp_id) +{ + if (popp_id >= MAX_SESSIONS) + return -EINVAL; + return routing_info.mixer_mask[popp_id]; +} +EXPORT_SYMBOL(msm_snddev_route_dec); + +/*To check one->many case*/ +int msm_check_multicopp_per_stream(int session_id, + struct route_payload *payload) +{ + int i = 0; + int flag = 0; + pr_debug("%s: session_id=%d\n", __func__, session_id); + mutex_lock(&routing_info.copp_list_mutex); + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[session_id][i] == DEVICE_IGNORE) + continue; + else { + pr_debug("Device enabled port_id = %d\n", + routing_info.copp_list[session_id][i]); + payload->copp_ids[flag++] = + routing_info.copp_list[session_id][i]; + } + } + mutex_unlock(&routing_info.copp_list_mutex); + if (flag > 1) { + pr_debug("Multiple copp per stream case num_copps=%d\n", flag); + } else { + pr_debug("Stream routed to single copp\n"); + } + payload->num_copps = flag; + return flag; +} + +int msm_snddev_set_dec(int popp_id, int copp_id, int set, + int rate, int mode) +{ + int rc = 0, i = 0; + struct route_payload payload; + int topology = DEFAULT_COPP_TOPOLOGY; + + if ((popp_id >= MAX_SESSIONS) || (popp_id <= 0)) { + pr_aud_err("%s: Invalid session id %d\n", __func__, popp_id); + return 0; + } + + mutex_lock(&routing_info.adm_mutex); + if (set) { + if (ctrl_ops->support_opendsp) { + if (ctrl_ops->support_opendsp()) + topology = HTC_COPP_TOPOLOGY; + } + pr_aud_info("%s, topology = 0x%x\n", __func__, topology); + rc = adm_open(copp_id, PLAYBACK, rate, mode, + topology); + if (rc < 0) { + pr_aud_err("%s: adm open fail rc[%d]\n", __func__, rc); + rc = -EINVAL; + mutex_unlock(&routing_info.adm_mutex); + return rc; + } + msm_set_copp_id(popp_id, copp_id); + pr_debug("%s:Session id=%d copp_id=%d\n", + __func__, popp_id, copp_id); + memset(payload.copp_ids, DEVICE_IGNORE, + (sizeof(unsigned int) * AFE_MAX_PORTS)); + rc = msm_check_multicopp_per_stream(popp_id, &payload); + /* Multiple streams per copp is handled, one stream at a time */ + rc = adm_matrix_map(popp_id, PLAYBACK, rc, + payload.copp_ids, copp_id); + if (rc < 0) { + pr_aud_err("%s: matrix map failed rc[%d]\n", + __func__, rc); + adm_close(copp_id); + rc = -EINVAL; + mutex_unlock(&routing_info.adm_mutex); + return rc; + } + } else { + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[popp_id][i] == copp_id) { + rc = adm_close(copp_id); + if (rc < 0) { + pr_aud_err("%s: adm close fail copp[%d]" + "rc[%d]\n", + __func__, copp_id, rc); + rc = -EINVAL; + mutex_unlock(&routing_info.adm_mutex); + return rc; + } + msm_clear_copp_id(popp_id, copp_id); + break; + } + } + } + + if (copp_id == VOICE_PLAYBACK_TX) { + /* Signal uplink playback. */ + rc = voice_start_playback(set); + } + mutex_unlock(&routing_info.adm_mutex); + return rc; +} +EXPORT_SYMBOL(msm_snddev_set_dec); + + +static int check_tx_copp_topology(int session_id) +{ + int cnt; + int ret_val = -ENOENT; + + cnt = adm_tx_topology_tbl.session_cnt; + if (cnt) { + do { + if (adm_tx_topology_tbl.session_id[cnt-1] + == session_id) + ret_val = cnt-1; + } while (--cnt); + } + + return ret_val; +} + +static int add_to_tx_topology_lists(int session_id, int topology) +{ + int idx = 0, tbl_idx; + int ret_val = -ENOSPC; + + mutex_lock(&adm_tx_topology_tbl.lock); + + tbl_idx = check_tx_copp_topology(session_id); + if (tbl_idx == -ENOENT) { + while (adm_tx_topology_tbl.session_id[idx++]) + ; + tbl_idx = idx-1; + } + + if (tbl_idx < MAX_SESSIONS) { + adm_tx_topology_tbl.session_id[tbl_idx] = session_id; + adm_tx_topology_tbl.topolog_id[tbl_idx] = topology; + adm_tx_topology_tbl.session_cnt++; + + ret_val = 0; + } + mutex_unlock(&adm_tx_topology_tbl.lock); + return ret_val; +} + +static void remove_from_tx_topology_lists(int session_id) +{ + int tbl_idx; + + mutex_lock(&adm_tx_topology_tbl.lock); + tbl_idx = check_tx_copp_topology(session_id); + if (tbl_idx != -ENOENT) { + + adm_tx_topology_tbl.session_cnt--; + adm_tx_topology_tbl.session_id[tbl_idx] = 0; + adm_tx_topology_tbl.topolog_id[tbl_idx] = 0; + } + mutex_unlock(&adm_tx_topology_tbl.lock); +} + +int auddev_cfg_tx_copp_topology(int session_id, int cfg) +{ + int ret = 0; + + if (cfg == DEFAULT_COPP_TOPOLOGY) + remove_from_tx_topology_lists(session_id); + else { + switch (cfg) { + case VPM_TX_SM_ECNS_COPP_TOPOLOGY: + case VPM_TX_DM_FLUENCE_COPP_TOPOLOGY: + case HTC_STEREO_RECORD_TOPOLOGY: + ret = add_to_tx_topology_lists(session_id, cfg); + break; + + default: + ret = -ENODEV; + break; + } + } + return ret; +} + +int msm_snddev_set_enc(int popp_id, int copp_id, int set, + int rate, int mode) +{ + int topology; + int tbl_idx; + int rc = 0, i = 0; + mutex_lock(&routing_info.adm_mutex); + if (set) { + mutex_lock(&adm_tx_topology_tbl.lock); + tbl_idx = check_tx_copp_topology(popp_id); + if (tbl_idx == -ENOENT) + topology = DEFAULT_COPP_TOPOLOGY; + else { + topology = adm_tx_topology_tbl.topolog_id[tbl_idx]; + rate = 16000; + } + mutex_unlock(&adm_tx_topology_tbl.lock); + pr_aud_info("%s, topology = 0x%x\n", __func__, topology); + rc = adm_open(copp_id, LIVE_RECORDING, rate, mode, topology); + if (rc < 0) { + pr_aud_err("%s: adm open fail rc[%d]\n", __func__, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 1, + (unsigned int *)&copp_id, copp_id); + if (rc < 0) { + pr_aud_err("%s: matrix map failed rc[%d]\n", __func__, rc); + adm_close(copp_id); + rc = -EINVAL; + goto fail_cmd; + } + msm_set_copp_id(popp_id, copp_id); + } else { + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[popp_id][i] == copp_id) { + rc = adm_close(copp_id); + if (rc < 0) { + pr_aud_err("%s: adm close fail copp[%d]" + "rc[%d]\n", + __func__, copp_id, rc); + rc = -EINVAL; + goto fail_cmd; + } + msm_clear_copp_id(popp_id, copp_id); + break; + } + } + } +fail_cmd: + mutex_unlock(&routing_info.adm_mutex); + return rc; +} +EXPORT_SYMBOL(msm_snddev_set_enc); + +int msm_device_is_voice(int dev_id) +{ + if ((dev_id == routing_info.voice_rx_dev_id) + || (dev_id == routing_info.voice_tx_dev_id)) + return 0; + else + return -EINVAL; +} +EXPORT_SYMBOL(msm_device_is_voice); + +int msm_set_voc_route(struct msm_snddev_info *dev_info, + int stream_type, int dev_id) +{ + int rc = 0; + u64 session_mask = 0; + + if (dev_info == NULL) { + pr_aud_err("%s: invalid param\n", __func__); + return -EINVAL; + } + + mutex_lock(&session_lock); + switch (stream_type) { + case AUDIO_ROUTE_STREAM_VOICE_RX: + if (audio_dev_ctrl.voice_rx_dev) + audio_dev_ctrl.voice_rx_dev->sessions &= ~0xFFFF; + + if (!(dev_info->capability & SNDDEV_CAP_RX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + audio_dev_ctrl.voice_rx_dev = dev_info; + if (audio_dev_ctrl.voice_rx_dev) { + session_mask = + ((u64)0x1) << (MAX_BIT_PER_CLIENT * \ + ((int)AUDDEV_CLNT_VOC-1)); + audio_dev_ctrl.voice_rx_dev->sessions |= + session_mask; + } + routing_info.voice_rx_dev_id = dev_id; + break; + case AUDIO_ROUTE_STREAM_VOICE_TX: + if (audio_dev_ctrl.voice_tx_dev) + audio_dev_ctrl.voice_tx_dev->sessions &= ~0xFFFF; + + if (!(dev_info->capability & SNDDEV_CAP_TX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + + audio_dev_ctrl.voice_tx_dev = dev_info; + if (audio_dev_ctrl.voice_rx_dev) { + session_mask = + ((u64)0x1) << (MAX_BIT_PER_CLIENT * \ + ((int)AUDDEV_CLNT_VOC-1)); + audio_dev_ctrl.voice_tx_dev->sessions |= + session_mask; + } + routing_info.voice_tx_dev_id = dev_id; + break; + default: + rc = -EINVAL; + } + mutex_unlock(&session_lock); + return rc; +} +EXPORT_SYMBOL(msm_set_voc_route); + +void msm_release_voc_thread(void) +{ + wake_up(&audio_dev_ctrl.wait); +} +EXPORT_SYMBOL(msm_release_voc_thread); + +int msm_snddev_get_enc_freq(session_id) +{ + return routing_info.enc_freq[session_id].freq; +} +EXPORT_SYMBOL(msm_snddev_get_enc_freq); + +int msm_get_voc_freq(int *tx_freq, int *rx_freq) +{ + *tx_freq = (0 == voc_tx_freq ? routing_info.voice_tx_sample_rate + : voc_tx_freq); + *rx_freq = (0 == voc_rx_freq ? routing_info.voice_rx_sample_rate + : voc_rx_freq); + return 0; +} +EXPORT_SYMBOL(msm_get_voc_freq); + +void msm_set_voc_freq(int tx_freq, int rx_freq) +{ + voc_tx_freq = tx_freq; + voc_rx_freq = rx_freq; +} +EXPORT_SYMBOL(msm_set_voc_freq); + + +int msm_get_voc_route(u32 *rx_id, u32 *tx_id) +{ + int rc = 0; + + if (!rx_id || !tx_id) + return -EINVAL; + + mutex_lock(&session_lock); + if (!audio_dev_ctrl.voice_rx_dev || !audio_dev_ctrl.voice_tx_dev) { + rc = -ENODEV; + mutex_unlock(&session_lock); + return rc; + } + + *rx_id = audio_dev_ctrl.voice_rx_dev->acdb_id; + *tx_id = audio_dev_ctrl.voice_tx_dev->acdb_id; + + mutex_unlock(&session_lock); + + return rc; +} +EXPORT_SYMBOL(msm_get_voc_route); + +struct msm_snddev_info *audio_dev_ctrl_find_dev(u32 dev_id) +{ + struct msm_snddev_info *info; + + if ((audio_dev_ctrl.num_dev - 1) < dev_id) { + info = ERR_PTR(-ENODEV); + goto error; + } + + info = audio_dev_ctrl.devs[dev_id]; +error: + return info; + +} +EXPORT_SYMBOL(audio_dev_ctrl_find_dev); + +int snddev_voice_set_volume(int vol, int path) +{ + if (audio_dev_ctrl.voice_rx_dev + && audio_dev_ctrl.voice_tx_dev) { + if (path) + audio_dev_ctrl.voice_tx_dev->dev_volume = vol; + else + audio_dev_ctrl.voice_rx_dev->dev_volume = vol; + } else + return -ENODEV; + return 0; +} +EXPORT_SYMBOL(snddev_voice_set_volume); + +static int audio_dev_ctrl_get_devices(struct audio_dev_ctrl_state *dev_ctrl, + void __user *arg) +{ + int rc = 0; + u32 index; + struct msm_snd_device_list work_list; + struct msm_snd_device_info *work_tbl; + + if (copy_from_user(&work_list, arg, sizeof(work_list))) { + rc = -EFAULT; + goto error; + } + + if (work_list.num_dev > dev_ctrl->num_dev) { + rc = -EINVAL; + goto error; + } + + work_tbl = kmalloc(work_list.num_dev * + sizeof(struct msm_snd_device_info), GFP_KERNEL); + if (!work_tbl) { + rc = -ENOMEM; + goto error; + } + + for (index = 0; index < dev_ctrl->num_dev; index++) { + work_tbl[index].dev_id = index; + work_tbl[index].dev_cap = dev_ctrl->devs[index]->capability; + strlcpy(work_tbl[index].dev_name, dev_ctrl->devs[index]->name, + 64); + } + + if (copy_to_user((void *) (work_list.list), work_tbl, + work_list.num_dev * sizeof(struct msm_snd_device_info))) + rc = -EFAULT; + kfree(work_tbl); +error: + return rc; +} + + +int auddev_register_evt_listner(u32 evt_id, u32 clnt_type, u32 clnt_id, + void (*listner)(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data), + void *private_data) +{ + int rc; + struct msm_snd_evt_listner *callback = NULL; + struct msm_snd_evt_listner *new_cb; + + new_cb = kzalloc(sizeof(struct msm_snd_evt_listner), GFP_KERNEL); + if (!new_cb) { + pr_aud_err("No memory to add new listener node\n"); + return -ENOMEM; + } + + mutex_lock(&session_lock); + new_cb->cb_next = NULL; + new_cb->auddev_evt_listener = listner; + new_cb->evt_id = evt_id; + new_cb->clnt_type = clnt_type; + new_cb->clnt_id = clnt_id; + new_cb->private_data = private_data; + if (event.cb == NULL) { + event.cb = new_cb; + new_cb->cb_prev = NULL; + } else { + callback = event.cb; + for (; ;) { + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + callback->cb_next = new_cb; + new_cb->cb_prev = callback; + } + event.num_listner++; + mutex_unlock(&session_lock); + rc = 0; + return rc; +} +EXPORT_SYMBOL(auddev_register_evt_listner); + +int auddev_unregister_evt_listner(u32 clnt_type, u32 clnt_id) +{ + struct msm_snd_evt_listner *callback = event.cb; + struct msm_snddev_info *info; + u64 session_mask = 0; + int i = 0; + + mutex_lock(&session_lock); + while (callback != NULL) { + if ((callback->clnt_type == clnt_type) + && (callback->clnt_id == clnt_id)) + break; + callback = callback->cb_next; + } + if (callback == NULL) { + mutex_unlock(&session_lock); + return -EINVAL; + } + + if ((callback->cb_next == NULL) && (callback->cb_prev == NULL)) + event.cb = NULL; + else if (callback->cb_next == NULL) + callback->cb_prev->cb_next = NULL; + else if (callback->cb_prev == NULL) { + callback->cb_next->cb_prev = NULL; + event.cb = callback->cb_next; + } else { + callback->cb_prev->cb_next = callback->cb_next; + callback->cb_next->cb_prev = callback->cb_prev; + } + kfree(callback); + + session_mask = (((u64)0x1) << clnt_id) << (MAX_BIT_PER_CLIENT * \ + ((int)clnt_type-1)); + for (i = 0; i < audio_dev_ctrl.num_dev; i++) { + info = audio_dev_ctrl.devs[i]; + info->sessions &= ~session_mask; + } + mutex_unlock(&session_lock); + return 0; +} +EXPORT_SYMBOL(auddev_unregister_evt_listner); + +int msm_snddev_withdraw_freq(u32 session_id, u32 capability, u32 clnt_type) +{ + int i = 0; + struct msm_snddev_info *info; + u64 session_mask = 0; + + if ((clnt_type == AUDDEV_CLNT_VOC) && (session_id != 0)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_DEC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_ENC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + + session_mask = (((u64)0x1) << session_id) << (MAX_BIT_PER_CLIENT * \ + ((int)clnt_type-1)); + + for (i = 0; i < audio_dev_ctrl.num_dev; i++) { + info = audio_dev_ctrl.devs[i]; + if ((info->sessions & session_mask) + && (info->capability & capability)) { + if (!(info->sessions & ~(session_mask))) + info->set_sample_rate = 0; + } + } + if (clnt_type == AUDDEV_CLNT_DEC) + routing_info.dec_freq[session_id].freq + = 0; + else if (clnt_type == AUDDEV_CLNT_ENC) + routing_info.enc_freq[session_id].freq + = 0; + else if (capability == SNDDEV_CAP_TX) + routing_info.voice_tx_sample_rate = 0; + else + routing_info.voice_rx_sample_rate = 48000; + return 0; +} + +int msm_snddev_request_freq(int *freq, u32 session_id, + u32 capability, u32 clnt_type) +{ + int i = 0; + int rc = 0; + struct msm_snddev_info *info; + u32 set_freq; + u64 session_mask = 0; + u64 clnt_type_mask = 0; + + pr_debug(": clnt_type 0x%08x\n", clnt_type); + + if ((clnt_type == AUDDEV_CLNT_VOC) && (session_id != 0)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_DEC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_ENC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + session_mask = (((u64)0x1) << session_id) << (MAX_BIT_PER_CLIENT * \ + ((int)clnt_type-1)); + clnt_type_mask = (0xFFFF << (MAX_BIT_PER_CLIENT * (clnt_type-1))); + if (!(*freq == 8000) && !(*freq == 11025) && + !(*freq == 12000) && !(*freq == 16000) && + !(*freq == 22050) && !(*freq == 24000) && + !(*freq == 32000) && !(*freq == 44100) && + !(*freq == 48000)) + return -EINVAL; + + for (i = 0; i < audio_dev_ctrl.num_dev; i++) { + info = audio_dev_ctrl.devs[i]; + if ((info->sessions & session_mask) + && (info->capability & capability)) { + rc = 0; + if ((info->sessions & ~clnt_type_mask) + && ((*freq != 8000) && (*freq != 16000) + && (*freq != 48000))) { + if (clnt_type == AUDDEV_CLNT_ENC) { + routing_info.enc_freq[session_id].freq + = 0; + return -EPERM; + } else if (clnt_type == AUDDEV_CLNT_DEC) { + routing_info.dec_freq[session_id].freq + = 0; + return -EPERM; + } + } + if (*freq == info->set_sample_rate) { + rc = info->set_sample_rate; + continue; + } + set_freq = MAX(*freq, info->set_sample_rate); + + + if (clnt_type == AUDDEV_CLNT_DEC) { + routing_info.dec_freq[session_id].evt = 1; + routing_info.dec_freq[session_id].freq + = set_freq; + } else if (clnt_type == AUDDEV_CLNT_ENC) { + routing_info.enc_freq[session_id].evt = 1; + routing_info.enc_freq[session_id].freq + = set_freq; + } else if (capability == SNDDEV_CAP_TX) + routing_info.voice_tx_sample_rate = set_freq; + + rc = set_freq; + info->set_sample_rate = set_freq; + *freq = info->set_sample_rate; + + if (info->opened) { + broadcast_event(AUDDEV_EVT_FREQ_CHG, i, + SESSION_IGNORE); + set_freq = info->dev_ops.set_freq(info, + set_freq); + broadcast_event(AUDDEV_EVT_DEV_RDY, i, + SESSION_IGNORE); + } + } + pr_debug("info->set_sample_rate = %d\n", info->set_sample_rate); + pr_debug("routing_info.enc_freq.freq = %d\n", + routing_info.enc_freq[session_id].freq); + } + return rc; +} +EXPORT_SYMBOL(msm_snddev_request_freq); + +int msm_snddev_enable_sidetone(u32 dev_id, u32 enable, uint16_t gain) +{ + int rc; + struct msm_snddev_info *dev_info; + + pr_debug("dev_id %d enable %d\n", dev_id, enable); + + dev_info = audio_dev_ctrl_find_dev(dev_id); + + if (IS_ERR(dev_info)) { + pr_aud_err("bad dev_id %d\n", dev_id); + rc = -EINVAL; + } else if (!dev_info->dev_ops.enable_sidetone) { + pr_debug("dev %d no sidetone support\n", dev_id); + rc = -EPERM; + } else + rc = dev_info->dev_ops.enable_sidetone(dev_info, enable, gain); + + return rc; +} +EXPORT_SYMBOL(msm_snddev_enable_sidetone); + +int msm_enable_incall_recording(int popp_id, int rec_mode, int rate, + int channel_mode) +{ + int rc = 0; + unsigned int port_id[2]; + port_id[0] = VOICE_RECORD_TX; + port_id[1] = VOICE_RECORD_RX; + + pr_debug("%s: popp_id %d, rec_mode %d, rate %d, channel_mode %d\n", + __func__, popp_id, rec_mode, rate, channel_mode); + + mutex_lock(&routing_info.adm_mutex); + + if (rec_mode == VOC_REC_UPLINK) { + rc = afe_start_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[0], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 1, + &port_id[0], port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM matrix map %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[0]); + + } else if (rec_mode == VOC_REC_DOWNLINK) { + rc = afe_start_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[1], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 1, + &port_id[1], port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM matrix map %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[1]); + + } else if (rec_mode == VOC_REC_BOTH) { + rc = afe_start_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[0], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[0]); + + rc = afe_start_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[1], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 2, + &port_id[0], port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM matrix map\n", + __func__, rc); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[1]); + } else { + pr_aud_err("%s Unknown rec_mode %d\n", __func__, rec_mode); + + goto fail_cmd; + } + + rc = voice_start_record(rec_mode, 1); + +fail_cmd: + mutex_unlock(&routing_info.adm_mutex); + return rc; +} + +int msm_disable_incall_recording(uint32_t popp_id, uint32_t rec_mode) +{ + int rc = 0; + uint32_t port_id[2]; + port_id[0] = VOICE_RECORD_TX; + port_id[1] = VOICE_RECORD_RX; + + pr_debug("%s: popp_id %d, rec_mode %d\n", __func__, popp_id, rec_mode); + + mutex_lock(&routing_info.adm_mutex); + + rc = voice_start_record(rec_mode, 0); + if (rc < 0) { + pr_aud_err("%s: Error %d stopping record\n", __func__, rc); + + goto fail_cmd; + } + + if (rec_mode == VOC_REC_UPLINK) { + rc = adm_close(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[0]); + + rc = afe_stop_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + + } else if (rec_mode == VOC_REC_DOWNLINK) { + rc = adm_close(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[1]); + + rc = afe_stop_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + } else if (rec_mode == VOC_REC_BOTH) { + rc = adm_close(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[0]); + + rc = afe_stop_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + + rc = adm_close(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[1]); + + rc = afe_stop_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + } else { + pr_aud_err("%s Unknown rec_mode %d\n", __func__, rec_mode); + + goto fail_cmd; + } + +fail_cmd: + mutex_unlock(&routing_info.adm_mutex); + return rc; +} + +static long audio_dev_ctrl_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int rc = 0; + struct audio_dev_ctrl_state *dev_ctrl = file->private_data; + + mutex_lock(&session_lock); + switch (cmd) { + case AUDIO_GET_NUM_SND_DEVICE: + rc = put_user(dev_ctrl->num_dev, (uint32_t __user *) arg); + break; + case AUDIO_GET_SND_DEVICES: + rc = audio_dev_ctrl_get_devices(dev_ctrl, (void __user *) arg); + break; + case AUDIO_ENABLE_SND_DEVICE: { + struct msm_snddev_info *dev_info; + u32 dev_id; + + if (get_user(dev_id, (u32 __user *) arg)) { + rc = -EFAULT; + break; + } + dev_info = audio_dev_ctrl_find_dev(dev_id); + if (IS_ERR(dev_info)) + rc = PTR_ERR(dev_info); + else { + rc = dev_info->dev_ops.open(dev_info); + if (!rc) + dev_info->opened = 1; + wake_up(&audio_dev_ctrl.wait); + } + break; + + } + + case AUDIO_DISABLE_SND_DEVICE: { + struct msm_snddev_info *dev_info; + u32 dev_id; + + if (get_user(dev_id, (u32 __user *) arg)) { + rc = -EFAULT; + break; + } + dev_info = audio_dev_ctrl_find_dev(dev_id); + if (IS_ERR(dev_info)) + rc = PTR_ERR(dev_info); + else { + rc = dev_info->dev_ops.close(dev_info); + dev_info->opened = 0; + } + break; + } + + case AUDIO_ROUTE_STREAM: { + struct msm_audio_route_config route_cfg; + struct msm_snddev_info *dev_info; + + if (copy_from_user(&route_cfg, (void __user *) arg, + sizeof(struct msm_audio_route_config))) { + rc = -EFAULT; + break; + } + pr_debug("%s: route cfg %d %d type\n", __func__, + route_cfg.dev_id, route_cfg.stream_type); + dev_info = audio_dev_ctrl_find_dev(route_cfg.dev_id); + if (IS_ERR(dev_info)) { + pr_aud_err("%s: pass invalid dev_id\n", __func__); + rc = PTR_ERR(dev_info); + break; + } + + switch (route_cfg.stream_type) { + + case AUDIO_ROUTE_STREAM_VOICE_RX: + if (!(dev_info->capability & SNDDEV_CAP_RX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + dev_ctrl->voice_rx_dev = dev_info; + break; + case AUDIO_ROUTE_STREAM_VOICE_TX: + if (!(dev_info->capability & SNDDEV_CAP_TX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + dev_ctrl->voice_tx_dev = dev_info; + break; + } + break; + } + + default: + rc = -EINVAL; + } + mutex_unlock(&session_lock); + return rc; +} + + +static int audio_dev_ctrl_open(struct inode *inode, struct file *file) +{ + pr_debug("open audio_dev_ctrl\n"); + atomic_inc(&audio_dev_ctrl.opened); + file->private_data = &audio_dev_ctrl; + return 0; +} + +static int audio_dev_ctrl_release(struct inode *inode, struct file *file) +{ + pr_debug("release audio_dev_ctrl\n"); + atomic_dec(&audio_dev_ctrl.opened); + return 0; +} + +static const struct file_operations audio_dev_ctrl_fops = { + .owner = THIS_MODULE, + .open = audio_dev_ctrl_open, + .release = audio_dev_ctrl_release, + .unlocked_ioctl = audio_dev_ctrl_ioctl, +}; + + +struct miscdevice audio_dev_ctrl_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_audio_dev_ctrl", + .fops = &audio_dev_ctrl_fops, +}; + +/* session id is 64 bit routing mask per device + * 0-15 for voice clients + * 16-31 for Decoder clients + * 32-47 for Encoder clients + * 48-63 Do not care + */ +void broadcast_event(u32 evt_id, u32 dev_id, u64 session_id) +{ + int clnt_id = 0, i; + union auddev_evt_data *evt_payload = NULL; + struct msm_snd_evt_listner *callback; + struct msm_snddev_info *dev_info = NULL; + u64 session_mask = 0; + static int pending_sent; + + pr_debug(": evt_id = %d\n", evt_id); + + if ((evt_id != AUDDEV_EVT_START_VOICE) + && (evt_id != AUDDEV_EVT_END_VOICE) + && (evt_id != AUDDEV_EVT_STREAM_VOL_CHG) + && (evt_id != AUDDEV_EVT_VOICE_STATE_CHG)) + dev_info = audio_dev_ctrl_find_dev(dev_id); + +#ifdef CONFIG_MSM8X60_RTAC + update_rtac(evt_id, dev_id, dev_info); +#endif + + if (event.cb != NULL) + callback = event.cb; + else + return; + mutex_lock(&session_lock); + + if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + routing_info.voice_state = dev_id; + + evt_payload = kzalloc(sizeof(union auddev_evt_data), + GFP_KERNEL); + + if (evt_payload == NULL) { + pr_aud_err("%s: fail to allocate evt_payload", __func__); + return; + } + + for (; ;) { + if (!(evt_id & callback->evt_id)) { + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + clnt_id = callback->clnt_id; + memset(evt_payload, 0, sizeof(union auddev_evt_data)); + + if (evt_id == AUDDEV_EVT_START_VOICE) + routing_info.call_state = 1; + if (evt_id == AUDDEV_EVT_END_VOICE) + routing_info.call_state = 0; + + if ((evt_id == AUDDEV_EVT_START_VOICE) + || (evt_id == AUDDEV_EVT_END_VOICE)) + goto skip_check; + if (callback->clnt_type == AUDDEV_CLNT_AUDIOCAL) + goto aud_cal; + + session_mask = (((u64)0x1) << clnt_id) + << (MAX_BIT_PER_CLIENT * \ + ((int)callback->clnt_type-1)); + + if ((evt_id == AUDDEV_EVT_STREAM_VOL_CHG) || \ + (evt_id == AUDDEV_EVT_VOICE_STATE_CHG)) { + pr_debug("AUDDEV_EVT_STREAM_VOL_CHG or\ + AUDDEV_EVT_VOICE_STATE_CHG\n"); + goto volume_strm; + } + if (dev_info) + pr_debug("dev_info->sessions = %llu\n", dev_info->sessions); + else { + pr_aud_err("dev_info is NULL\n"); + break; + } + if ((!session_id && !(dev_info->sessions & session_mask)) || + (session_id && ((dev_info->sessions & session_mask) != + session_id))) { + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + if (evt_id == AUDDEV_EVT_DEV_CHG_VOICE) + goto voc_events; + +volume_strm: + if (callback->clnt_type == AUDDEV_CLNT_DEC) { + pr_debug("AUDDEV_CLNT_DEC\n"); + if (evt_id == AUDDEV_EVT_STREAM_VOL_CHG) { + pr_debug("clnt_id = %d, session_id = %llu\n", + clnt_id, session_id); + if (session_mask != session_id) + goto sent_dec; + else + evt_payload->session_vol = + msm_vol_ctl.volume; + } else if (evt_id == AUDDEV_EVT_FREQ_CHG) { + if (routing_info.dec_freq[clnt_id].evt) { + routing_info.dec_freq[clnt_id].evt + = 0; + goto sent_dec; + } else if (routing_info.dec_freq[clnt_id].freq + == dev_info->set_sample_rate) + goto sent_dec; + else { + evt_payload->freq_info.sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.dev_type + = dev_info->capability; + evt_payload->freq_info.acdb_dev_id + = dev_info->acdb_id; + } + } else if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else + evt_payload->routing_id = dev_info->copp_id; + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); +sent_dec: + if ((evt_id != AUDDEV_EVT_STREAM_VOL_CHG) && + (evt_id != AUDDEV_EVT_VOICE_STATE_CHG)) + routing_info.dec_freq[clnt_id].freq + = dev_info->set_sample_rate; + + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + if (callback->clnt_type == AUDDEV_CLNT_ENC) { + pr_debug("AUDDEV_CLNT_ENC\n"); + if (evt_id == AUDDEV_EVT_FREQ_CHG) { + if (routing_info.enc_freq[clnt_id].evt) { + routing_info.enc_freq[clnt_id].evt + = 0; + goto sent_enc; + } else { + evt_payload->freq_info.sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.dev_type + = dev_info->capability; + evt_payload->freq_info.acdb_dev_id + = dev_info->acdb_id; + } + } else if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else { + if (dev_info) + evt_payload->routing_id = dev_info->copp_id; + else + pr_aud_info("dev_info == NULL\n"); + } + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); +sent_enc: + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } +aud_cal: + if (callback->clnt_type == AUDDEV_CLNT_AUDIOCAL) { + pr_debug("AUDDEV_CLNT_AUDIOCAL\n"); + if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else if (!dev_info->sessions) + goto sent_aud_cal; + else { + evt_payload->audcal_info.dev_id = + dev_info->copp_id; + evt_payload->audcal_info.acdb_id = + dev_info->acdb_id; + evt_payload->audcal_info.dev_type = + (dev_info->capability & SNDDEV_CAP_TX) ? + SNDDEV_CAP_TX : SNDDEV_CAP_RX; + evt_payload->audcal_info.sample_rate = + dev_info->set_sample_rate ? + dev_info->set_sample_rate : + dev_info->sample_rate; + } + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); + +sent_aud_cal: + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } +skip_check: +voc_events: + if (callback->clnt_type == AUDDEV_CLNT_VOC) { + pr_debug("AUDDEV_CLNT_VOC\n"); + if (evt_id == AUDDEV_EVT_DEV_RLS) { + if (!pending_sent) + goto sent_voc; + else + pending_sent = 0; + } + if (evt_id == AUDDEV_EVT_REL_PENDING) + pending_sent = 1; + + if (evt_id == AUDDEV_EVT_DEVICE_VOL_MUTE_CHG) { + if (dev_info->capability & SNDDEV_CAP_TX) { + evt_payload->voc_vm_info.dev_type = + SNDDEV_CAP_TX; + evt_payload->voc_vm_info.acdb_dev_id = + dev_info->acdb_id; + evt_payload-> + voc_vm_info.dev_vm_val.mute = + routing_info.tx_mute; + } else { + evt_payload->voc_vm_info.dev_type = + SNDDEV_CAP_RX; + evt_payload->voc_vm_info.acdb_dev_id = + dev_info->acdb_id; + evt_payload-> + voc_vm_info.dev_vm_val.vol = + routing_info.voice_rx_vol; + } + } else if ((evt_id == AUDDEV_EVT_START_VOICE) + || (evt_id == AUDDEV_EVT_END_VOICE)) + memset(evt_payload, 0, + sizeof(union auddev_evt_data)); + else if (evt_id == AUDDEV_EVT_FREQ_CHG) { + if (routing_info.voice_tx_sample_rate + != dev_info->set_sample_rate) { + routing_info.voice_tx_sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.dev_type + = dev_info->capability; + evt_payload->freq_info.acdb_dev_id + = dev_info->acdb_id; + } else + goto sent_voc; + } else if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else { + evt_payload->voc_devinfo.dev_type = + (dev_info->capability & SNDDEV_CAP_TX) ? + SNDDEV_CAP_TX : SNDDEV_CAP_RX; + evt_payload->voc_devinfo.acdb_dev_id = + dev_info->acdb_id; + evt_payload->voc_devinfo.dev_port_id = + dev_info->copp_id; + evt_payload->voc_devinfo.dev_sample = + dev_info->set_sample_rate ? + dev_info->set_sample_rate : + dev_info->sample_rate; + evt_payload->voc_devinfo.dev_id = dev_id; + if (dev_info->capability & SNDDEV_CAP_RX) { + for (i = 0; i < VOC_RX_VOL_ARRAY_NUM; + i++) { + evt_payload-> + voc_devinfo.max_rx_vol[i] = + dev_info->max_voc_rx_vol[i]; + evt_payload + ->voc_devinfo.min_rx_vol[i] = + dev_info->min_voc_rx_vol[i]; + } + } + } + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); + if (evt_id == AUDDEV_EVT_DEV_RLS) + dev_info->sessions &= ~(0xFFFF); +sent_voc: + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + } + kfree(evt_payload); + mutex_unlock(&session_lock); +} +EXPORT_SYMBOL(broadcast_event); + + +void mixer_post_event(u32 evt_id, u32 id) +{ + + pr_debug("evt_id = %d\n", evt_id); + switch (evt_id) { + case AUDDEV_EVT_DEV_CHG_VOICE: /* Called from Voice_route */ + broadcast_event(AUDDEV_EVT_DEV_CHG_VOICE, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_DEV_RDY: + broadcast_event(AUDDEV_EVT_DEV_RDY, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_DEV_RLS: + broadcast_event(AUDDEV_EVT_DEV_RLS, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_REL_PENDING: + broadcast_event(AUDDEV_EVT_REL_PENDING, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_DEVICE_VOL_MUTE_CHG: + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, id, + SESSION_IGNORE); + break; + case AUDDEV_EVT_STREAM_VOL_CHG: + broadcast_event(AUDDEV_EVT_STREAM_VOL_CHG, id, + SESSION_IGNORE); + break; + case AUDDEV_EVT_START_VOICE: + broadcast_event(AUDDEV_EVT_START_VOICE, + id, SESSION_IGNORE); + break; + case AUDDEV_EVT_END_VOICE: + broadcast_event(AUDDEV_EVT_END_VOICE, + id, SESSION_IGNORE); + break; + case AUDDEV_EVT_FREQ_CHG: + broadcast_event(AUDDEV_EVT_FREQ_CHG, id, SESSION_IGNORE); + break; + default: + break; + } +} +EXPORT_SYMBOL(mixer_post_event); + +static int __init audio_dev_ctrl_init(void) +{ + init_waitqueue_head(&audio_dev_ctrl.wait); + + event.cb = NULL; + + atomic_set(&audio_dev_ctrl.opened, 0); + audio_dev_ctrl.num_dev = 0; + audio_dev_ctrl.voice_tx_dev = NULL; + audio_dev_ctrl.voice_rx_dev = NULL; + routing_info.voice_state = VOICE_STATE_INVALID; + routing_info.call_state = 0; + mutex_init(&adm_tx_topology_tbl.lock); + mutex_init(&routing_info.copp_list_mutex); + mutex_init(&routing_info.adm_mutex); + + memset(routing_info.copp_list, DEVICE_IGNORE, + (sizeof(unsigned int) * MAX_SESSIONS * AFE_MAX_PORTS)); + return misc_register(&audio_dev_ctrl_misc); +} + +static void __exit audio_dev_ctrl_exit(void) +{ +} +module_init(audio_dev_ctrl_init); +module_exit(audio_dev_ctrl_exit); + +MODULE_DESCRIPTION("MSM 8K Audio Device Control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_lpa.c b/arch/arm/mach-msm/qdsp6v3/audio_lpa.c new file mode 100644 index 00000000..7845a35b --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_lpa.c @@ -0,0 +1,1435 @@ +/* low power audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_lpa.h" + +#include +#include +#include + +#include +#include + +#define MAX_BUF 4 +#define BUFSZ (655360) + +#define AUDDEC_DEC_PCM 0 + +#define AUDLPA_EVENT_NUM 10 /* Default number of pre-allocated event packets */ + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = ((__v >= __r->vaddr) && \ + (__e <= __r->vaddr + __r->len)); \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +struct audlpa_event { + struct list_head list; + int event_type; + union msm_audio_event_payload payload; +}; + +struct audlpa_pmem_region { + struct list_head list; + struct file *file; + int fd; + void *vaddr; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + unsigned ref_cnt; +}; + +struct audlpa_buffer_node { + struct list_head list; + struct msm_audio_aio_buf buf; + unsigned long paddr; +}; + +struct audlpa_dec { + char *name; + int dec_attrb; + long (*ioctl)(struct file *, unsigned int, unsigned long); + int (*set_params)(void *); +}; + +static void audlpa_post_event(struct audio *audio, int type, + union msm_audio_event_payload payload); +static unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr, + unsigned long len, int ref_up); +static void audlpa_async_send_data(struct audio *audio, unsigned needed, + uint32_t token); +static int audlpa_pause(struct audio *audio); +static void audlpa_unmap_pmem_region(struct audio *audio); +static long pcm_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static int audlpa_set_pcm_params(void *data); + +struct audlpa_dec audlpa_decs[] = { + {"msm_pcm_lp_dec", AUDDEC_DEC_PCM, &pcm_ioctl, + &audlpa_set_pcm_params}, +}; + +static void lpa_listner(u32 evt_id, union auddev_evt_data *evt_payload, + void *private_data) +{ + struct audio *audio = (struct audio *) private_data; + int rc = 0; + + switch (evt_id) { + case AUDDEV_EVT_STREAM_VOL_CHG: + audio->volume = evt_payload->session_vol; + pr_debug("%s: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, " + "enabled = %d\n", __func__, audio->volume, + audio->out_enabled); + if (audio->out_enabled == 1) { + if (audio->ac) { + rc = q6asm_set_volume(audio->ac, audio->volume); + if (rc < 0) { + pr_aud_err("%s: Send Volume command failed" + " rc=%d\n", __func__, rc); + } + } + } + break; + default: + pr_aud_err("%s:ERROR:wrong event\n", __func__); + break; + } +} + +static void audlpa_prevent_sleep(struct audio *audio) +{ + pr_debug("%s:\n", __func__); + wake_lock(&audio->wakelock); +} + +static void audlpa_allow_sleep(struct audio *audio) +{ + pr_debug("%s:\n", __func__); + wake_unlock(&audio->wakelock); +} + +/* must be called with audio->lock held */ +static int audio_enable(struct audio *audio) +{ + pr_aud_info("%s\n", __func__); + + return q6asm_run(audio->ac, 0, 0, 0); + +} + +static int audlpa_async_flush(struct audio *audio) +{ + struct audlpa_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + int rc = 0; + + pr_aud_info("%s:out_enabled = %d, drv_status = 0x%x\n", __func__, + audio->out_enabled, audio->drv_status); + if (audio->out_enabled) { + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audlpa_buffer_node, + list); + list_del(&buf_node->list); + payload.aio_buf = buf_node->buf; + audlpa_post_event(audio, AUDIO_EVENT_WRITE_DONE, + payload); + kfree(buf_node); + } + /* Implicitly issue a pause to the decoder before flushing if + it is not in pause state */ + if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { + rc = audlpa_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, + rc); + } + + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) + pr_aud_err("%s: flush cmd failed rc=%d\n", __func__, rc); + + audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; + audio->out_needed = 0; + + if (audio->stopped == 0) { + rc = audio_enable(audio); + if (rc < 0) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->out_enabled = 1; + audio->out_needed = 1; + if (audio->drv_status & ADRV_STATUS_PAUSE) + audio->drv_status &= ~ADRV_STATUS_PAUSE; + } + } + wake_up(&audio->write_wait); + } + return rc; +} + +/* must be called with audio->lock held */ +static int audio_disable(struct audio *audio) +{ + int rc = 0; + + pr_aud_info("%s:%d %d\n", __func__, audio->opened, audio->out_enabled); + + if (audio->opened) { + audio->out_enabled = 0; + audio->opened = 0; + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("%s: CLOSE cmd failed\n", __func__); + else + pr_debug("%s: rxed CLOSE resp\n", __func__); + audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; + wake_up(&audio->write_wait); + audio->out_needed = 0; + } + return rc; +} +static int audlpa_pause(struct audio *audio) +{ + int rc = 0; + + pr_aud_info("%s, enabled = %d\n", __func__, + audio->out_enabled); + if (audio->out_enabled) { + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, rc); + + } else + pr_aud_err("%s: Driver not enabled\n", __func__); + return rc; +} + +/* ------------------- dsp --------------------- */ +static void audlpa_async_send_data(struct audio *audio, unsigned needed, + uint32_t token) +{ + unsigned long flags; + struct audio_client *ac; + int rc = 0; + + pr_debug("%s:\n", __func__); + spin_lock_irqsave(&audio->dsp_lock, flags); + + pr_debug("%s: needed = %d, out_needed = %d, token = 0x%x\n", + __func__, needed, audio->out_needed, token); + if (needed && !audio->wflush) { + audio->out_needed = 1; + if (audio->drv_status & ADRV_STATUS_OBUF_GIVEN) { + /* pop one node out of queue */ + union msm_audio_event_payload evt_payload; + struct audlpa_buffer_node *used_buf; + + used_buf = list_first_entry(&audio->out_queue, + struct audlpa_buffer_node, list); + if (token == used_buf->paddr) { + pr_debug("%s, Release: addr: %lx," + " token = 0x%x\n", __func__, + used_buf->paddr, token); + list_del(&used_buf->list); + evt_payload.aio_buf = used_buf->buf; + audlpa_post_event(audio, AUDIO_EVENT_WRITE_DONE, + evt_payload); + kfree(used_buf); + audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; + } + } + } + pr_debug("%s: out_needed = %d, stopped = %d, drv_status = 0x%x\n", + __func__, audio->out_needed, audio->stopped, + audio->drv_status); + if (audio->out_needed && (audio->stopped == 0)) { + struct audlpa_buffer_node *next_buf; + struct audio_aio_write_param param; + if (!list_empty(&audio->out_queue)) { + pr_debug("%s: list not empty\n", __func__); + next_buf = list_first_entry(&audio->out_queue, + struct audlpa_buffer_node, list); + if (next_buf) { + pr_debug("%s: Send: addr: %lx\n", __func__, + next_buf->paddr); + ac = audio->ac; + param.paddr = next_buf->paddr; + param.len = next_buf->buf.data_len; + param.msw_ts = 0; + param.lsw_ts = 0; + /* No time stamp valid */ + param.flags = NO_TIMESTAMP; + param.uid = next_buf->paddr; + rc = q6asm_async_write(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:q6asm_async_write failed\n", + __func__); + audio->out_needed = 0; + audio->drv_status |= ADRV_STATUS_OBUF_GIVEN; + } + } else if (list_empty(&audio->out_queue) && + (audio->drv_status & ADRV_STATUS_FSYNC)) { + pr_debug("%s: list is empty, reached EOS\n", __func__); + wake_up(&audio->write_wait); + } + } + + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +static int audlpa_events_pending(struct audio *audio) +{ + int empty; + + spin_lock(&audio->event_queue_lock); + empty = !list_empty(&audio->event_queue); + spin_unlock(&audio->event_queue_lock); + return empty || audio->event_abort; +} + +static void audlpa_reset_event_queue(struct audio *audio) +{ + struct audlpa_event *drv_evt; + struct list_head *ptr, *next; + + spin_lock(&audio->event_queue_lock); + list_for_each_safe(ptr, next, &audio->event_queue) { + drv_evt = list_first_entry(&audio->event_queue, + struct audlpa_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + list_for_each_safe(ptr, next, &audio->free_event_queue) { + drv_evt = list_first_entry(&audio->free_event_queue, + struct audlpa_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + spin_unlock(&audio->event_queue_lock); + + return; +} + +static long audlpa_process_event_req(struct audio *audio, void __user *arg) +{ + long rc; + struct msm_audio_event usr_evt; + struct audlpa_event *drv_evt = NULL; + int timeout; + + if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) + return -EFAULT; + + timeout = (int) usr_evt.timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout( + audio->event_wait, audlpa_events_pending(audio), + msecs_to_jiffies(timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible( + audio->event_wait, audlpa_events_pending(audio)); + } + + if (rc < 0) + return rc; + + if (audio->event_abort) { + audio->event_abort = 0; + return -ENODEV; + } + + rc = 0; + + spin_lock(&audio->event_queue_lock); + if (!list_empty(&audio->event_queue)) { + drv_evt = list_first_entry(&audio->event_queue, + struct audlpa_event, list); + list_del(&drv_evt->list); + } + if (drv_evt) { + usr_evt.event_type = drv_evt->event_type; + usr_evt.event_payload = drv_evt->payload; + list_add_tail(&drv_evt->list, &audio->free_event_queue); + } else { + rc = -1; + spin_unlock(&audio->event_queue_lock); + return rc; + } + spin_unlock(&audio->event_queue_lock); + + if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE || + drv_evt->event_type == AUDIO_EVENT_READ_DONE) { + pr_debug("%s: AUDIO_EVENT_WRITE_DONE completing\n", __func__); + mutex_lock(&audio->lock); + audlpa_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0); + mutex_unlock(&audio->lock); + } + if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt))) + rc = -EFAULT; + + return rc; +} + +static int audlpa_pmem_check(struct audio *audio, + void *vaddr, unsigned long len) +{ + struct audlpa_pmem_region *region_elt; + struct audlpa_pmem_region t = { .vaddr = vaddr, .len = len }; + + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + pr_aud_err("%s: region (vaddr %p len %ld)" + " clashes with registered region" + " (vaddr %p paddr %p len %ld)\n", + __func__, vaddr, len, + region_elt->vaddr, + (void *)region_elt->paddr, + region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int audlpa_pmem_add(struct audio *audio, + struct msm_audio_pmem_info *info) +{ + unsigned long paddr, kvaddr, len; + struct file *file; + struct audlpa_pmem_region *region; + int rc = -EINVAL; + + pr_aud_info("%s:\n", __func__); + region = kmalloc(sizeof(*region), GFP_KERNEL); + + if (!region) { + rc = -ENOMEM; + goto end; + } + + if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { + kfree(region); + goto end; + } + + rc = audlpa_pmem_check(audio, info->vaddr, len); + if (rc < 0) { + put_pmem_file(file); + kfree(region); + goto end; + } + + region->vaddr = info->vaddr; + region->fd = info->fd; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->file = file; + region->ref_cnt = 0; + pr_debug("%s: add region paddr %lx vaddr %p, len %lu\n", __func__, + region->paddr, region->vaddr, + region->len); + list_add_tail(®ion->list, &audio->pmem_region_queue); + rc = q6asm_memory_map(audio->ac, (uint32_t)paddr, IN, (uint32_t)len, 1); + if (rc < 0) + pr_aud_err("%s: memory map failed\n", __func__); +end: + return rc; +} + +static int audlpa_pmem_remove(struct audio *audio, + struct msm_audio_pmem_info *info) +{ + struct audlpa_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audlpa_pmem_region, list); + + if ((region != NULL) && (region->fd == info->fd) && + (region->vaddr == info->vaddr)) { + if (region->ref_cnt) { + pr_debug("%s: region %p in use ref_cnt %d\n", + __func__, region, region->ref_cnt); + break; + } + rc = q6asm_memory_unmap(audio->ac, + (uint32_t)region->paddr, + IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + rc = 0; + break; + } + } + + return rc; +} + +static int audlpa_pmem_lookup_vaddr(struct audio *audio, void *addr, + unsigned long len, struct audlpa_pmem_region **region) +{ + struct audlpa_pmem_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + list_for_each_entry(region_elt, &audio->pmem_region_queue, + list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) { + /* offset since we could pass vaddr inside a registerd + * pmem buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + pr_aud_err("%s: multiple hits for vaddr %p, len %ld\n", __func__, + addr, len); + list_for_each_entry(region_elt, + &audio->pmem_region_queue, list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) + pr_aud_err("%s: \t%p, %ld --> %p\n", __func__, + region_elt->vaddr, region_elt->len, + (void *)region_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr, + unsigned long len, int ref_up) +{ + struct audlpa_pmem_region *region; + unsigned long paddr; + int ret; + + ret = audlpa_pmem_lookup_vaddr(audio, addr, len, ®ion); + if (ret) { + pr_aud_err("%s: lookup (%p, %ld) failed\n", __func__, addr, len); + return 0; + } + if (ref_up) + region->ref_cnt++; + else + region->ref_cnt--; + paddr = region->paddr + (addr - region->vaddr); + return paddr; +} + +/* audio -> lock must be held at this point */ +static int audlpa_aio_buf_add(struct audio *audio, unsigned dir, + void __user *arg) +{ + struct audlpa_buffer_node *buf_node; + + buf_node = kmalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) + return -ENOMEM; + + if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { + kfree(buf_node); + return -EFAULT; + } + + buf_node->paddr = audlpa_pmem_fixup( + audio, buf_node->buf.buf_addr, + buf_node->buf.buf_len, 1); + if (dir) { + /* write */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (buf_node->buf.data_len & 0x1)) { + kfree(buf_node); + return -EINVAL; + } + list_add_tail(&buf_node->list, &audio->out_queue); + pr_debug("%s, Added to list: addr: %lx, length = %d\n", + __func__, buf_node->paddr, buf_node->buf.data_len); + audlpa_async_send_data(audio, 0, 0); + } else { + /* read */ + } + return 0; +} + +static int config(struct audio *audio) +{ + int rc = 0; + if (!audio->out_prefill) { + if (audio->codec_ops.set_params != NULL) { + rc = audio->codec_ops.set_params(audio); + audio->out_prefill = 1; + } + } + return rc; +} + +void q6_audlpa_out_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct audio *audio = (struct audio *) priv; + + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + pr_debug("%s: ASM_DATA_EVENT_WRITE_DONE, token = 0x%x\n", + __func__, token); + audlpa_async_send_data(audio, 1, token); + break; + case ASM_DATA_EVENT_EOS: + case ASM_DATA_CMDRSP_EOS: + pr_debug("%s: ASM_DATA_CMDRSP_EOS, teos = %d\n", __func__, + audio->teos); + if (audio->teos == 0) { + audio->teos = 1; + wake_up(&audio->write_wait); + } + break; + case ASM_SESSION_CMDRSP_GET_SESSION_TIME: + break; + default: + break; + } +} + +static long pcm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + pr_debug("%s: cmd = %d\n", __func__, cmd); + return -EINVAL; +} + +static int audlpa_set_pcm_params(void *data) +{ + struct audio *audio = (struct audio *)data; + int rc; + + rc = q6asm_media_format_block_pcm(audio->ac, audio->out_sample_rate, + audio->out_channel_mode); + if (rc < 0) + pr_aud_err("%s: Format block pcm failed\n", __func__); + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = -EINVAL; + uint64_t timestamp = 0; + uint64_t temp; + + pr_debug("%s: audio_ioctl() cmd = %d\n", __func__, cmd); + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + + pr_aud_info("%s: audio_get_stats command\n", __func__); + memset(&stats, 0, sizeof(stats)); + timestamp = q6asm_get_session_time(audio->ac); + if (timestamp < 0) { + pr_aud_err("%s: Get Session Time return value =%lld\n", + __func__, timestamp); + return -EAGAIN; + } + temp = (timestamp * 2 * audio->out_channel_mode); + temp = temp * (audio->out_sample_rate/1000); + temp = div_u64(temp, 1000); + audio->bytes_consumed = (uint32_t)(temp & 0xFFFFFFFF); + stats.byte_count = audio->bytes_consumed; + stats.unused[0] = (uint32_t)((temp >> 32) & 0xFFFFFFFF); + pr_debug("%s: bytes_consumed:lsb = %d, msb = %d," + "timestamp = %lld\n", __func__, + audio->bytes_consumed, stats.unused[0], timestamp); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + + switch (cmd) { + case AUDIO_ENABLE_AUDPP: + break; + + case AUDIO_SET_VOLUME: + break; + + case AUDIO_SET_PAN: + break; + + case AUDIO_SET_EQ: + break; + } + + if (cmd == AUDIO_GET_EVENT) { + pr_debug("%s: AUDIO_GET_EVENT\n", __func__); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audlpa_process_event_req(audio, + (void __user *) arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + return rc; + } + + if (cmd == AUDIO_ABORT_GET_EVENT) { + audio->event_abort = 1; + wake_up(&audio->event_wait); + return 0; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + pr_aud_info("%s: AUDIO_START: Session %d\n", __func__, + audio->ac->session); + if (!audio->opened) { + pr_aud_err("%s: Driver not opened\n", __func__); + rc = -EFAULT; + goto fail; + } + rc = config(audio); + if (rc) { + pr_aud_err("%s: Out Configuration failed\n", __func__); + rc = -EFAULT; + goto fail; + } + + rc = audio_enable(audio); + if (rc) { + pr_aud_err("%s: audio enable failed\n", __func__); + rc = -EFAULT; + goto fail; + } else { + struct asm_softpause_params param = { + .enable = SOFT_PAUSE_ENABLE, + .period = SOFT_PAUSE_PERIOD, + .step = SOFT_PAUSE_STEP, + .rampingcurve = SOFT_PAUSE_CURVE_LINEAR, + }; + audio->out_enabled = 1; + audio->out_needed = 1; + rc = q6asm_set_volume(audio->ac, audio->volume); + if (rc < 0) + pr_aud_err("%s: Send Volume command failed rc=%d\n", + __func__, rc); + rc = q6asm_set_softpause(audio->ac, ¶m); + if (rc < 0) + pr_aud_err("%s: Send SoftPause Param failed rc=%d\n", + __func__, rc); + rc = q6asm_set_lrgain(audio->ac, 0x2000, 0x2000); + if (rc < 0) + pr_aud_err("%s: Send channel gain failed rc=%d\n", + __func__, rc); + /* disable mute by default */ + rc = q6asm_set_mute(audio->ac, 0); + if (rc < 0) + pr_aud_err("%s: Send mute command failed rc=%d\n", + __func__, rc); + if (!list_empty(&audio->out_queue)) + pr_aud_err("%s: write_list is not empty!!!\n", + __func__); + if (audio->stopped == 1) + audio->stopped = 0; + audlpa_prevent_sleep(audio); + } + break; + + case AUDIO_STOP: + pr_aud_info("%s: AUDIO_STOP: session_id:%d\n", __func__, + audio->ac->session); + audio->stopped = 1; + audlpa_async_flush(audio); + audio->out_enabled = 0; + audio->out_needed = 0; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audlpa_allow_sleep(audio); + break; + + case AUDIO_FLUSH: + pr_aud_info("%s: AUDIO_FLUSH: session_id:%d\n", __func__, + audio->ac->session); + audio->wflush = 1; + if (audio->out_enabled) + rc = audlpa_async_flush(audio); + else + audio->wflush = 0; + audio->wflush = 0; + break; + + case AUDIO_SET_CONFIG:{ + struct msm_audio_config config; + pr_aud_info("%s: AUDIO_SET_CONFIG\n", __func__); + if (copy_from_user(&config, (void *) arg, sizeof(config))) { + rc = -EFAULT; + pr_aud_err("%s: ERROR: copy from user\n", __func__); + break; + } + if (!((config.channel_count == 1) || + (config.channel_count == 2))) { + rc = -EINVAL; + pr_aud_err("%s: ERROR: config.channel_count == %d\n", + __func__, config.channel_count); + break; + } + + if (!((config.bits == 8) || (config.bits == 16) || + (config.bits == 24))) { + rc = -EINVAL; + pr_aud_err("%s: ERROR: config.bits = %d\n", __func__, + config.bits); + break; + } + audio->out_sample_rate = config.sample_rate; + audio->out_channel_mode = config.channel_count; + audio->out_bits = config.bits; + audio->buffer_count = config.buffer_count; + audio->buffer_size = config.buffer_size; + rc = 0; + break; + } + + case AUDIO_GET_CONFIG:{ + struct msm_audio_config config; + config.buffer_count = audio->buffer_count; + config.buffer_size = audio->buffer_size; + config.sample_rate = audio->out_sample_rate; + config.channel_count = audio->out_channel_mode; + config.bits = audio->out_bits; + + config.meta_field = 0; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *) arg, &config, sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + + case AUDIO_PAUSE: + pr_aud_info("%s: AUDIO_PAUSE %ld\n", __func__, arg); + if (arg == 1) { + rc = audlpa_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause FAILED rc=%d\n", __func__, + rc); + audio->drv_status |= ADRV_STATUS_PAUSE; + } else if (arg == 0) { + if (audio->drv_status & ADRV_STATUS_PAUSE) { + rc = audio_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", + __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->out_enabled = 1; + } + } + } + break; + + case AUDIO_REGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_aud_info("%s: AUDIO_REGISTER_PMEM\n", __func__); + if (copy_from_user(&info, (void *) arg, sizeof(info))) + rc = -EFAULT; + else + rc = audlpa_pmem_add(audio, &info); + break; + } + + case AUDIO_DEREGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_aud_info("%s: AUDIO_DEREGISTER_PMEM\n", __func__); + if (copy_from_user(&info, (void *) arg, sizeof(info))) + rc = -EFAULT; + else + rc = audlpa_pmem_remove(audio, &info); + break; + } + case AUDIO_ASYNC_WRITE: + pr_debug("%s: AUDIO_ASYNC_WRITE\n", __func__); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else + rc = audlpa_aio_buf_add(audio, 1, (void __user *) arg); + break; + + case AUDIO_GET_SESSION_ID: + if (copy_to_user((void *) arg, &audio->ac->session, + sizeof(unsigned short))) + return -EFAULT; + rc = 0; + break; + + default: + rc = audio->codec_ops.ioctl(file, cmd, arg); + } +fail: + mutex_unlock(&audio->lock); + return rc; +} + +/* Only useful in tunnel-mode */ +int audlpa_async_fsync(struct audio *audio) +{ + int rc = 0; + + pr_aud_info("%s:Session %d\n", __func__, audio->ac->session); + + /* Blocking client sends more data */ + mutex_lock(&audio->lock); + audio->drv_status |= ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + mutex_lock(&audio->write_lock); + audio->teos = 0; + + rc = wait_event_interruptible(audio->write_wait, + ((list_empty(&audio->out_queue)) || + audio->wflush || audio->stopped)); + + if (audio->wflush || audio->stopped) + goto flush_event; + + if (rc < 0) { + pr_aud_err("%s: wait event for list_empty failed, rc = %d\n", + __func__, rc); + goto done; + } + + rc = q6asm_cmd(audio->ac, CMD_EOS); + + if (rc < 0) { + pr_aud_err("%s: q6asm_cmd failed, rc = %d", __func__, rc); + goto done; + } + rc = wait_event_interruptible_timeout(audio->write_wait, + (audio->teos || audio->wflush || + audio->stopped), 5*HZ); + + if (rc < 0) { + pr_aud_err("%s: wait event for teos failed, rc = %d\n", __func__, + rc); + goto done; + } + + if (audio->teos == 1) { + rc = audio_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->out_enabled = 1; + audio->out_needed = 1; + } + } + +flush_event: + if (audio->stopped || audio->wflush) + rc = -EBUSY; + +done: + mutex_unlock(&audio->write_lock); + mutex_lock(&audio->lock); + audio->drv_status &= ~ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + return rc; +} + +int audlpa_fsync(struct file *file, int datasync) +{ + struct audio *audio = file->private_data; + + return audlpa_async_fsync(audio); +} + +static void audlpa_reset_pmem_region(struct audio *audio) +{ + struct audlpa_pmem_region *region; + struct list_head *ptr, *next; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audlpa_pmem_region, list); + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + } + + return; +} + +static void audlpa_unmap_pmem_region(struct audio *audio) +{ + struct audlpa_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_aud_info("%s:\n", __func__); + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audlpa_pmem_region, list); + if (region != NULL) { + pr_aud_info("%s: phy_address = 0x%lx\n", __func__, + region->paddr); + rc = q6asm_memory_unmap(audio->ac, + (uint32_t)region->paddr, IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + } + } +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + pr_aud_info("%s: audio instance 0x%08x freeing, session %d\n", __func__, + (int)audio, audio->ac->session); + + mutex_lock(&audio->lock); + audio->wflush = 1; + if (audio->out_enabled) + audlpa_async_flush(audio); + audio->wflush = 0; + audlpa_unmap_pmem_region(audio); + audio_disable(audio); + msm_clear_session_id(audio->ac->session); + auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->ac->session); + q6asm_audio_client_free(audio->ac); + audlpa_reset_pmem_region(audio); +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&audio->suspend_ctl.node); +#endif + audio->opened = 0; + audio->out_enabled = 0; + audio->out_prefill = 0; + audio->event_abort = 1; + wake_up(&audio->event_wait); + audlpa_reset_event_queue(audio); + iounmap(audio->data); + pmem_kfree(audio->phys); + if (audio->stopped == 0) + audlpa_allow_sleep(audio); + wake_lock_destroy(&audio->wakelock); + + mutex_unlock(&audio->lock); +#ifdef CONFIG_DEBUG_FS + if (audio->dentry) + debugfs_remove(audio->dentry); +#endif + kfree(audio); + return 0; +} + +static void audlpa_post_event(struct audio *audio, int type, + union msm_audio_event_payload payload) +{ + struct audlpa_event *e_node = NULL; + + spin_lock(&audio->event_queue_lock); + + pr_debug("%s:\n", __func__); + if (!list_empty(&audio->free_event_queue)) { + e_node = list_first_entry(&audio->free_event_queue, + struct audlpa_event, list); + list_del(&e_node->list); + } else { + e_node = kmalloc(sizeof(struct audlpa_event), GFP_ATOMIC); + if (!e_node) { + pr_aud_err("%s: No mem to post event %d\n", __func__, type); + return; + } + } + + e_node->event_type = type; + e_node->payload = payload; + + list_add_tail(&e_node->list, &audio->event_queue); + spin_unlock(&audio->event_queue_lock); + wake_up(&audio->event_wait); +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void audlpa_suspend(struct early_suspend *h) +{ + struct audlpa_suspend_ctl *ctl = + container_of(h, struct audlpa_suspend_ctl, node); + union msm_audio_event_payload payload; + + pr_aud_info("%s:\n", __func__); + audlpa_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload); +} + +static void audlpa_resume(struct early_suspend *h) +{ + struct audlpa_suspend_ctl *ctl = + container_of(h, struct audlpa_suspend_ctl, node); + union msm_audio_event_payload payload; + + pr_aud_info("%s:\n", __func__); + audlpa_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload); +} +#endif + +#ifdef CONFIG_DEBUG_FS +static ssize_t audlpa_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t audlpa_debug_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct audio *audio = file->private_data; + + mutex_lock(&audio->lock); + n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); + n += scnprintf(buffer + n, debug_bufmax - n, + "out_enabled %d\n", audio->out_enabled); + n += scnprintf(buffer + n, debug_bufmax - n, + "stopped %d\n", audio->stopped); + n += scnprintf(buffer + n, debug_bufmax - n, + "volume %x\n", audio->volume); + n += scnprintf(buffer + n, debug_bufmax - n, + "sample rate %d\n", + audio->out_sample_rate); + n += scnprintf(buffer + n, debug_bufmax - n, + "channel mode %d\n", + audio->out_channel_mode); + mutex_unlock(&audio->lock); + /* Following variables are only useful for debugging when + * when playback halts unexpectedly. Thus, no mutual exclusion + * enforced + */ + n += scnprintf(buffer + n, debug_bufmax - n, + "wflush %d\n", audio->wflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "running %d\n", audio->running); + n += scnprintf(buffer + n, debug_bufmax - n, + "out_needed %d\n", audio->out_needed); + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static const struct file_operations audlpa_debug_fops = { + .read = audlpa_debug_read, + .open = audlpa_debug_open, +}; +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct audio *audio = NULL; + int rc, i, dec_attrb = 0; + struct audlpa_event *e_node = NULL; +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_lpa_" + 5]; +#endif + char wake_lock_name[24]; + + /* Allocate audio instance, set to zero */ + audio = kzalloc(sizeof(struct audio), GFP_KERNEL); + if (!audio) { + pr_aud_err("%s: no memory to allocate audio instance\n", __func__); + rc = -ENOMEM; + goto done; + } + + if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { + pr_aud_info("%s: Tunnel Mode playback\n", __func__); + } else { + kfree(audio); + rc = -EACCES; + goto done; + } + + /* Allocate the decoder based on inode minor number*/ + audio->minor_no = iminor(inode); + + if (audio->minor_no >= ARRAY_SIZE(audlpa_decs)) { + pr_aud_err("%s: incorrect inode %d\n", __func__, audio->minor_no); + kfree(audio); + rc = -EINVAL; + goto done; + } + + dec_attrb |= audlpa_decs[audio->minor_no].dec_attrb; + audio->codec_ops.ioctl = audlpa_decs[audio->minor_no].ioctl; + audio->codec_ops.set_params = audlpa_decs[audio->minor_no].set_params; + audio->buffer_size = BUFSZ; + audio->buffer_count = MAX_BUF; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6_audlpa_out_cb, + (void *)audio); + if (!audio->ac) { + pr_aud_err("%s: Could not allocate memory for lpa client\n", + __func__); + rc = -ENOMEM; + goto err; + } + rc = q6asm_open_write(audio->ac, FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_info("%s: lpa out open failed\n", __func__); + goto err; + } + + pr_debug("%s: Set mode to AIO session[%d]\n", + __func__, + audio->ac->session); + rc = q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); + if (rc < 0) + pr_aud_err("%s: Set IO mode failed\n", __func__); + + + /* Initialize all locks of audio instance */ + mutex_init(&audio->lock); + mutex_init(&audio->write_lock); + mutex_init(&audio->get_event_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->write_wait); + INIT_LIST_HEAD(&audio->out_queue); + INIT_LIST_HEAD(&audio->pmem_region_queue); + INIT_LIST_HEAD(&audio->free_event_queue); + INIT_LIST_HEAD(&audio->event_queue); + init_waitqueue_head(&audio->wait); + init_waitqueue_head(&audio->event_wait); + spin_lock_init(&audio->event_queue_lock); + snprintf(wake_lock_name, sizeof wake_lock_name, "audio_lpa_%x", + audio->ac->session); + wake_lock_init(&audio->wakelock, WAKE_LOCK_SUSPEND, wake_lock_name); + + audio->out_sample_rate = 44100; + audio->out_channel_mode = 2; + audio->out_bits = 16; + audio->volume = 0x2000; + + file->private_data = audio; + audio->opened = 1; + audio->out_enabled = 0; + audio->out_prefill = 0; + audio->bytes_consumed = 0; + + audio->device_events = AUDDEV_EVT_STREAM_VOL_CHG; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + + rc = auddev_register_evt_listner(audio->device_events, + AUDDEV_CLNT_DEC, + audio->ac->session, + lpa_listner, + (void *)audio); + if (rc) { + pr_aud_err("%s: failed to register listner\n", __func__); + goto err; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_lpa_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *) audio, &audlpa_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_aud_info("%s: debugfs_create_file failed\n", __func__); +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + audio->suspend_ctl.node.resume = audlpa_resume; + audio->suspend_ctl.node.suspend = audlpa_suspend; + audio->suspend_ctl.audio = audio; + register_early_suspend(&audio->suspend_ctl.node); +#endif + for (i = 0; i < AUDLPA_EVENT_NUM; i++) { + e_node = kmalloc(sizeof(struct audlpa_event), GFP_KERNEL); + if (e_node) + list_add_tail(&e_node->list, &audio->free_event_queue); + else { + pr_aud_err("%s: event pkt alloc failed\n", __func__); + break; + } + } + pr_aud_info("%s: audio instance 0x%08x created session[%d]\n", __func__, + (int)audio, + audio->ac->session); +done: + return rc; +err: + q6asm_audio_client_free(audio->ac); + iounmap(audio->data); + pmem_kfree(audio->phys); + kfree(audio); + return rc; +} + +static const struct file_operations audio_lpa_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audlpa_fsync, +}; + +static dev_t audlpa_devno; +static struct class *audlpa_class; +struct audlpa_device { + const char *name; + struct device *device; + struct cdev cdev; +}; + +static struct audlpa_device *audlpa_devices; + +static void audlpa_create(struct audlpa_device *adev, const char *name, + struct device *parent, dev_t devt) +{ + struct device *dev; + int rc; + + dev = device_create(audlpa_class, parent, devt, "%s", name); + if (IS_ERR(dev)) + return; + + cdev_init(&adev->cdev, &audio_lpa_fops); + adev->cdev.owner = THIS_MODULE; + + rc = cdev_add(&adev->cdev, devt, 1); + if (rc < 0) { + device_destroy(audlpa_class, devt); + } else { + adev->device = dev; + adev->name = name; + } +} + +static int __init audio_init(void) +{ + int rc; + int n = ARRAY_SIZE(audlpa_decs); + + audlpa_devices = kzalloc(sizeof(struct audlpa_device) * n, GFP_KERNEL); + if (!audlpa_devices) + return -ENOMEM; + + audlpa_class = class_create(THIS_MODULE, "audlpa"); + if (IS_ERR(audlpa_class)) + goto fail_create_class; + + rc = alloc_chrdev_region(&audlpa_devno, 0, n, "msm_audio_lpa"); + if (rc < 0) + goto fail_alloc_region; + + for (n = 0; n < ARRAY_SIZE(audlpa_decs); n++) { + audlpa_create(audlpa_devices + n, + audlpa_decs[n].name, NULL, + MKDEV(MAJOR(audlpa_devno), n)); + } + + return 0; + +fail_alloc_region: + class_unregister(audlpa_class); + return rc; +fail_create_class: + kfree(audlpa_devices); + return -ENOMEM; +} + +static void __exit audio_exit(void) +{ + class_unregister(audlpa_class); + kfree(audlpa_devices); +} + +module_init(audio_init); +module_exit(audio_exit); + +MODULE_DESCRIPTION("MSM LPA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_lpa.h b/arch/arm/mach-msm/qdsp6v3/audio_lpa.h new file mode 100644 index 00000000..8e7a22cd --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_lpa.h @@ -0,0 +1,129 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef AUDIO_LPA_H +#define AUDIO_LPA_H + +#include +#include + +#define ADRV_STATUS_OBUF_GIVEN 0x00000001 +#define ADRV_STATUS_IBUF_GIVEN 0x00000002 +#define ADRV_STATUS_FSYNC 0x00000004 +#define ADRV_STATUS_PAUSE 0x00000008 + +#define SOFT_PAUSE_PERIOD 30 /* ramp up/down for 30ms */ +#define SOFT_PAUSE_STEP 2000 /* Step value 2ms or 2000us */ +enum { + SOFT_PAUSE_CURVE_LINEAR = 0, + SOFT_PAUSE_CURVE_EXP, + SOFT_PAUSE_CURVE_LOG, +}; + +struct buffer { + void *data; + unsigned size; + unsigned used; /* Input usage actual DSP produced PCM size */ + unsigned addr; +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +struct audlpa_suspend_ctl { + struct early_suspend node; + struct audio *audio; +}; +#endif + +struct codec_operations { + long (*ioctl)(struct file *, unsigned int, unsigned long); + int (*set_params)(void *); +}; + +struct audio { + spinlock_t dsp_lock; + + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + struct list_head out_queue; /* queue to retain output buffers */ + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t write_wait; + + struct audio_client *ac; + + /* configuration to use on next enable */ + uint32_t out_sample_rate; + uint32_t out_channel_mode; + uint32_t out_bits; /* bits per sample (used by PCM decoder) */ + + /* data allocated for various buffers */ + char *data; + int32_t phys; /* physical address of write buffer */ + + uint32_t drv_status; + int wflush; /* Write flush */ + int opened; + int out_enabled; + int out_prefill; + int running; + int stopped; /* set when stopped, cleared on flush */ + int buf_refresh; + int teos; /* valid only if tunnel mode & no data left for decoder */ + +#ifdef CONFIG_HAS_EARLYSUSPEND + struct audlpa_suspend_ctl suspend_ctl; +#endif + + struct wake_lock wakelock; +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + + wait_queue_head_t wait; + struct list_head free_event_queue; + struct list_head event_queue; + wait_queue_head_t event_wait; + spinlock_t event_queue_lock; + struct mutex get_event_lock; + int event_abort; + + uint32_t device_events; + + struct list_head pmem_region_queue; /* protected by lock */ + + int eq_enable; + int eq_needs_commit; + uint32_t volume; + + unsigned int minor_no; + struct codec_operations codec_ops; + uint32_t buffer_size; + uint32_t buffer_count; + uint32_t bytes_consumed; +}; + +#endif /* !AUDIO_LPA_H */ diff --git a/arch/arm/mach-msm/qdsp6v3/audio_mvs.c b/arch/arm/mach-msm/qdsp6v3/audio_mvs.c new file mode 100644 index 00000000..529b5840 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_mvs.c @@ -0,0 +1,998 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Each buffer is 20 ms, queue holds 200 ms of data. */ +#define MVS_MAX_Q_LEN 10 + +/* Length of the DSP frame info header added to the voc packet. */ +#define DSP_FRAME_HDR_LEN 1 + +enum audio_mvs_state_type { + AUDIO_MVS_CLOSED, + AUDIO_MVS_STARTED, + AUDIO_MVS_STOPPED +}; + +struct audio_mvs_buf_node { + struct list_head list; + struct msm_audio_mvs_frame frame; +}; + +struct audio_mvs_info_type { + enum audio_mvs_state_type state; + + uint32_t mvs_mode; + uint32_t rate_type; + uint32_t dtx_mode; + + struct list_head in_queue; + struct list_head free_in_queue; + + struct list_head out_queue; + struct list_head free_out_queue; + + wait_queue_head_t out_wait; + + struct mutex lock; + struct mutex in_lock; + struct mutex out_lock; + + spinlock_t dsp_lock; + + struct wake_lock suspend_lock; + struct wake_lock idle_lock; + + void *memory_chunk; +}; + +static struct audio_mvs_info_type audio_mvs_info; + +static uint32_t audio_mvs_get_rate(uint32_t mvs_mode, uint32_t rate_type) +{ + uint32_t cvs_rate; + + if (mvs_mode == MVS_MODE_AMR_WB) + cvs_rate = rate_type - MVS_AMR_MODE_0660; + else + cvs_rate = rate_type; + + pr_debug("%s: CVS rate is %d for MVS mode %d\n", + __func__, cvs_rate, mvs_mode); + + return cvs_rate; +} + +static void audio_mvs_process_ul_pkt(uint8_t *voc_pkt, + uint32_t pkt_len, + void *private_data) +{ + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = private_data; + unsigned long dsp_flags; + + /* Copy up-link packet into out_queue. */ + spin_lock_irqsave(&audio->dsp_lock, dsp_flags); + + if (!list_empty(&audio->free_out_queue)) { + buf_node = list_first_entry(&audio->free_out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + switch (audio->mvs_mode) { + case MVS_MODE_AMR: + case MVS_MODE_AMR_WB: { + /* Remove the DSP frame info header. Header format: + * Bits 0-3: Frame rate + * Bits 4-7: Frame type + */ + buf_node->frame.frame_type = ((*voc_pkt) & 0xF0) >> 4; + buf_node->frame.frame_rate = ((*voc_pkt) & 0x0F); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->out_queue); + break; + } + + case MVS_MODE_IS127: { + buf_node->frame.frame_type = 0; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->out_queue); + break; + } + + case MVS_MODE_G729A: { + /* G729 frames are 10ms each, but the DSP works with + * 20ms frames and sends two 10ms frames per buffer. + * Extract the two frames and put them in separate + * buffers. + */ + /* Remove the first DSP frame info header. + * Header format: + * Bits 0-1: Frame type + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length of the + * first frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->out_queue); + + /* Get another buffer from the free Q and fill in the + * second frame. + */ + if (!list_empty(&audio->free_out_queue)) { + buf_node = + list_first_entry(&audio->free_out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Remove the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length + * of the first frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->out_queue); + + } else { + /* Drop the second frame. */ + pr_aud_err("%s: UL data dropped, read is slow\n", + __func__); + } + + break; + } + + case MVS_MODE_G711A: { + /* G711 frames are 10ms each, but the DSP works with + * 20ms frames and sends two 10ms frames per buffer. + * Extract the two frames and put them in separate + * buffers. + */ + /* Remove the first DSP frame info header. + * Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length of the + * first frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->out_queue); + + /* Get another buffer from the free Q and fill in the + * second frame. + */ + if (!list_empty(&audio->free_out_queue)) { + buf_node = + list_first_entry(&audio->free_out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Remove the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length + * of the second frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->out_queue); + } else { + /* Drop the second frame. */ + pr_aud_err("%s: UL data dropped, read is slow\n", + __func__); + } + break; + } + + default: { + buf_node->frame.frame_type = 0; + + buf_node->frame.len = pkt_len; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->out_queue); + } + } + } else { + pr_aud_err("%s: UL data dropped, read is slow\n", __func__); + } + + spin_unlock_irqrestore(&audio->dsp_lock, dsp_flags); + + wake_up(&audio->out_wait); +} + +static void audio_mvs_process_dl_pkt(uint8_t *voc_pkt, + uint32_t *pkt_len, + void *private_data) +{ + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = private_data; + unsigned long dsp_flags; + + spin_lock_irqsave(&audio->dsp_lock, dsp_flags); + + if (!list_empty(&audio->in_queue)) { + uint32_t rate_type = audio_mvs_get_rate(audio->mvs_mode, + audio->rate_type); + + buf_node = list_first_entry(&audio->in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + switch (audio->mvs_mode) { + case MVS_MODE_AMR: + case MVS_MODE_AMR_WB: { + /* Add the DSP frame info header. Header format: + * Bits 0-3: Frame rate + * Bits 4-7: Frame type + */ + *voc_pkt = ((buf_node->frame.frame_type & 0x0F) << 4) | + (rate_type & 0x0F); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->free_in_queue); + break; + } + + case MVS_MODE_IS127: { + /* Add the DSP frame info header. Header format: + * Bits 0-3: Frame rate + * Bits 4-7: Frame type + */ + *voc_pkt = rate_type & 0x0F; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->free_in_queue); + break; + } + + case MVS_MODE_G729A: { + /* G729 frames are 10ms each but the DSP expects 20ms + * worth of data, so send two 10ms frames per buffer. + */ + /* Add the first DSP frame info header. Header format: + * Bits 0-1: Frame type + */ + *voc_pkt = buf_node->frame.frame_type & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->free_in_queue); + + if (!list_empty(&audio->in_queue)) { + /* Get the second buffer. */ + buf_node = list_first_entry(&audio->in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Add the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + */ + *voc_pkt = buf_node->frame.frame_type & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = *pkt_len + + buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->free_in_queue); + } else { + /* Only 10ms worth of data is available, signal + * erasure frame. + */ + *voc_pkt = MVS_G729A_ERASURE & 0x03; + + *pkt_len = *pkt_len + DSP_FRAME_HDR_LEN; + } + + break; + } + + case MVS_MODE_G711A: { + /* G711 frames are 10ms each but the DSP expects 20ms + * worth of data, so send two 10ms frames per buffer. + */ + /* Add the first DSP frame info header. Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + *voc_pkt = ((rate_type & 0x0F) << 2) | + (buf_node->frame.frame_type & 0x03); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->free_in_queue); + + if (!list_empty(&audio->in_queue)) { + /* Get the second buffer. */ + buf_node = list_first_entry(&audio->in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Add the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + *voc_pkt = ((rate_type & 0x0F) << 2) | + (buf_node->frame.frame_type & 0x03); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = *pkt_len + + buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->free_in_queue); + } else { + /* Only 10ms worth of data is available, signal + * erasure frame. + */ + *voc_pkt = ((rate_type & 0x0F) << 2) | + (MVS_G711A_ERASURE & 0x03); + + *pkt_len = *pkt_len + DSP_FRAME_HDR_LEN; + } + break; + } + + default: { + *pkt_len = buf_node->frame.len; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->free_in_queue); + } + } + } else { + *pkt_len = 0; + + pr_aud_info("%s: No DL data available to send to MVS\n", __func__); + } + + spin_unlock_irqrestore(&audio->dsp_lock, dsp_flags); +} + +static uint32_t audio_mvs_get_media_type(uint32_t mvs_mode, uint32_t rate_type) +{ + uint32_t media_type; + + switch (mvs_mode) { + case MVS_MODE_IS127: + media_type = VSS_MEDIA_ID_EVRC_MODEM; + break; + + case MVS_MODE_AMR: + media_type = VSS_MEDIA_ID_AMR_NB_MODEM; + break; + + case MVS_MODE_LINEAR_PCM: + media_type = VSS_MEDIA_ID_PCM_NB; + break; + + case MVS_MODE_PCM: + media_type = VSS_MEDIA_ID_PCM_NB; + break; + + case MVS_MODE_AMR_WB: + media_type = VSS_MEDIA_ID_AMR_WB_MODEM; + break; + + case MVS_MODE_G729A: + media_type = VSS_MEDIA_ID_G729; + break; + + case MVS_MODE_G711A: + if (rate_type == MVS_G711A_MODE_MULAW) + media_type = VSS_MEDIA_ID_G711_MULAW; + else + media_type = VSS_MEDIA_ID_G711_ALAW; + break; + + default: + media_type = VSS_MEDIA_ID_PCM_NB; + } + + pr_debug("%s: media_type is 0x%x\n", __func__, media_type); + + return media_type; +} + +static uint32_t audio_mvs_get_network_type(uint32_t mvs_mode) +{ + uint32_t network_type; + + switch (mvs_mode) { + case MVS_MODE_IS127: + case MVS_MODE_AMR: + case MVS_MODE_LINEAR_PCM: + case MVS_MODE_PCM: + case MVS_MODE_G729A: + case MVS_MODE_G711A: + network_type = VSS_NETWORK_ID_VOIP_NB; + break; + + case MVS_MODE_AMR_WB: + network_type = VSS_NETWORK_ID_VOIP_WB; + break; + + default: + network_type = VSS_NETWORK_ID_DEFAULT; + } + + pr_debug("%s: network_type is 0x%x\n", __func__, network_type); + + return network_type; +} + +static int audio_mvs_start(struct audio_mvs_info_type *audio) +{ + int rc = 0; + + pr_aud_info("%s\n", __func__); + + /* Prevent sleep. */ + wake_lock(&audio->suspend_lock); + wake_lock(&audio->idle_lock); + + rc = voice_set_voc_path_full(1); + + if (rc == 0) { + voice_register_mvs_cb(audio_mvs_process_ul_pkt, + audio_mvs_process_dl_pkt, + audio); + + voice_config_vocoder( + audio_mvs_get_media_type(audio->mvs_mode, audio->rate_type), + audio_mvs_get_rate(audio->mvs_mode, audio->rate_type), + audio_mvs_get_network_type(audio->mvs_mode), + audio->dtx_mode); + + audio->state = AUDIO_MVS_STARTED; + } else { + pr_aud_err("%s: Error %d setting voc path to full\n", __func__, rc); + } + + return rc; +} + +static int audio_mvs_stop(struct audio_mvs_info_type *audio) +{ + int rc = 0; + + pr_aud_info("%s\n", __func__); + + voice_set_voc_path_full(0); + + audio->state = AUDIO_MVS_STOPPED; + + /* Allow sleep. */ + wake_unlock(&audio->suspend_lock); + wake_unlock(&audio->idle_lock); + + return rc; +} + +static int audio_mvs_open(struct inode *inode, struct file *file) +{ + int rc = 0; + int i; + int offset = 0; + struct audio_mvs_buf_node *buf_node = NULL; + + pr_aud_info("%s\n", __func__); + + mutex_lock(&audio_mvs_info.lock); + + /* Allocate input and output buffers. */ + audio_mvs_info.memory_chunk = kmalloc(2 * MVS_MAX_Q_LEN * + sizeof(struct audio_mvs_buf_node), + GFP_KERNEL); + + if (audio_mvs_info.memory_chunk != NULL) { + for (i = 0; i < MVS_MAX_Q_LEN; i++) { + buf_node = audio_mvs_info.memory_chunk + offset; + + list_add_tail(&buf_node->list, + &audio_mvs_info.free_in_queue); + + offset = offset + sizeof(struct audio_mvs_buf_node); + } + + for (i = 0; i < MVS_MAX_Q_LEN; i++) { + buf_node = audio_mvs_info.memory_chunk + offset; + + list_add_tail(&buf_node->list, + &audio_mvs_info.free_out_queue); + + offset = offset + sizeof(struct audio_mvs_buf_node); + } + + audio_mvs_info.state = AUDIO_MVS_STOPPED; + + file->private_data = &audio_mvs_info; + + } else { + pr_aud_err("%s: No memory for IO buffers\n", __func__); + + rc = -ENOMEM; + } + + mutex_unlock(&audio_mvs_info.lock); + + return rc; +} + +static int audio_mvs_release(struct inode *inode, struct file *file) +{ + struct list_head *ptr = NULL; + struct list_head *next = NULL; + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = file->private_data; + + pr_aud_info("%s\n", __func__); + + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STARTED) + audio_mvs_stop(audio); + + /* Free input and output memory. */ + mutex_lock(&audio->in_lock); + + list_for_each_safe(ptr, next, &audio->in_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + list_for_each_safe(ptr, next, &audio->free_in_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + mutex_unlock(&audio->in_lock); + + + mutex_lock(&audio->out_lock); + + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + list_for_each_safe(ptr, next, &audio->free_out_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + mutex_unlock(&audio->out_lock); + + kfree(audio->memory_chunk); + audio->memory_chunk = NULL; + + audio->state = AUDIO_MVS_CLOSED; + + mutex_unlock(&audio->lock); + + return 0; +} + +static ssize_t audio_mvs_read(struct file *file, + char __user *buf, + size_t count, + loff_t *pos) +{ + int rc = 0; + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = file->private_data; + + pr_debug("%s:\n", __func__); + + rc = wait_event_interruptible_timeout(audio->out_wait, + (!list_empty(&audio->out_queue) || + audio->state == AUDIO_MVS_STOPPED), + 1 * HZ); + + if (rc > 0) { + mutex_lock(&audio->out_lock); + + if ((audio->state == AUDIO_MVS_STARTED) && + (!list_empty(&audio->out_queue))) { + + if (count >= sizeof(struct msm_audio_mvs_frame)) { + buf_node = list_first_entry(&audio->out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + rc = copy_to_user(buf, + &buf_node->frame, + sizeof(struct msm_audio_mvs_frame)); + + if (rc == 0) { + rc = buf_node->frame.len + + sizeof(buf_node->frame.frame_type) + + sizeof(buf_node->frame.len); + } else { + pr_aud_err("%s: Copy to user retuned %d", + __func__, rc); + + rc = -EFAULT; + } + + list_add_tail(&buf_node->list, + &audio->free_out_queue); + } else { + pr_aud_err("%s: Read count %d < sizeof(frame) %d", + __func__, count, + sizeof(struct msm_audio_mvs_frame)); + + rc = -ENOMEM; + } + } else { + pr_aud_err("%s: Read performed in state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->out_lock); + + } else if (rc == 0) { + pr_aud_err("%s: No UL data available\n", __func__); + + rc = -ETIMEDOUT; + } else { + pr_aud_err("%s: Read was interrupted\n", __func__); + + rc = -ERESTARTSYS; + } + + return rc; +} + +static ssize_t audio_mvs_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *pos) +{ + int rc = 0; + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = file->private_data; + + pr_debug("%s:\n", __func__); + + mutex_lock(&audio->in_lock); + + if (audio->state == AUDIO_MVS_STARTED) { + if (count <= sizeof(struct msm_audio_mvs_frame)) { + if (!list_empty(&audio->free_in_queue)) { + buf_node = + list_first_entry(&audio->free_in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + rc = copy_from_user(&buf_node->frame, + buf, + count); + + list_add_tail(&buf_node->list, + &audio->in_queue); + } else { + pr_aud_err("%s: No free DL buffs\n", __func__); + } + } else { + pr_aud_err("%s: Write count %d < sizeof(frame) %d", + __func__, count, + sizeof(struct msm_audio_mvs_frame)); + + rc = -ENOMEM; + } + } else { + pr_aud_err("%s: Write performed in invalid state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->in_lock); + + return rc; +} + +static long audio_mvs_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + struct audio_mvs_info_type *audio = file->private_data; + + pr_aud_info("%s:\n", __func__); + + switch (cmd) { + case AUDIO_GET_MVS_CONFIG: { + struct msm_audio_mvs_config config; + + pr_aud_info("%s: IOCTL GET_MVS_CONFIG\n", __func__); + + mutex_lock(&audio->lock); + + config.mvs_mode = audio->mvs_mode; + config.rate_type = audio->rate_type; + config.dtx_mode = audio->dtx_mode; + + mutex_unlock(&audio->lock); + + rc = copy_to_user((void *)arg, &config, sizeof(config)); + if (rc == 0) + rc = sizeof(config); + else + pr_aud_err("%s: Config copy failed %d\n", __func__, rc); + + break; + } + + case AUDIO_SET_MVS_CONFIG: { + struct msm_audio_mvs_config config; + + pr_aud_info("%s: IOCTL SET_MVS_CONFIG\n", __func__); + + rc = copy_from_user(&config, (void *)arg, sizeof(config)); + if (rc == 0) { + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STOPPED) { + audio->mvs_mode = config.mvs_mode; + audio->rate_type = config.rate_type; + audio->dtx_mode = config.dtx_mode; + } else { + pr_aud_err("%s: Set confg called in state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->lock); + } else { + pr_aud_err("%s: Config copy failed %d\n", __func__, rc); + } + + break; + } + + case AUDIO_START: { + pr_aud_info("%s: IOCTL START\n", __func__); + + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STOPPED) { + rc = audio_mvs_start(audio); + + if (rc != 0) + audio_mvs_stop(audio); + } else { + pr_aud_err("%s: Start called in invalid state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->lock); + + break; + } + + case AUDIO_STOP: { + pr_aud_info("%s: IOCTL STOP\n", __func__); + + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STARTED) { + rc = audio_mvs_stop(audio); + } else { + pr_aud_err("%s: Stop called in invalid state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->lock); + + break; + } + + default: { + pr_aud_err("%s: Unknown IOCTL %d\n", __func__, cmd); + } + } + + return rc; +} + +static const struct file_operations audio_mvs_fops = { + .owner = THIS_MODULE, + .open = audio_mvs_open, + .release = audio_mvs_release, + .read = audio_mvs_read, + .write = audio_mvs_write, + .unlocked_ioctl = audio_mvs_ioctl +}; + +struct miscdevice audio_mvs_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_mvs", + .fops = &audio_mvs_fops +}; + +static int __init audio_mvs_init(void) +{ + int rc = 0; + + memset(&audio_mvs_info, 0, sizeof(audio_mvs_info)); + + init_waitqueue_head(&audio_mvs_info.out_wait); + + mutex_init(&audio_mvs_info.lock); + mutex_init(&audio_mvs_info.in_lock); + mutex_init(&audio_mvs_info.out_lock); + + spin_lock_init(&audio_mvs_info.dsp_lock); + + INIT_LIST_HEAD(&audio_mvs_info.in_queue); + INIT_LIST_HEAD(&audio_mvs_info.free_in_queue); + INIT_LIST_HEAD(&audio_mvs_info.out_queue); + INIT_LIST_HEAD(&audio_mvs_info.free_out_queue); + + wake_lock_init(&audio_mvs_info.suspend_lock, + WAKE_LOCK_SUSPEND, + "audio_mvs_suspend"); + wake_lock_init(&audio_mvs_info.idle_lock, + WAKE_LOCK_IDLE, + "audio_mvs_idle"); + + rc = misc_register(&audio_mvs_misc); + + return rc; +} + +static void __exit audio_mvs_exit(void){ + pr_aud_info("%s:\n", __func__); + + misc_deregister(&audio_mvs_misc); +} + +module_init(audio_mvs_init); +module_exit(audio_mvs_exit); + +MODULE_DESCRIPTION("MSM MVS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_utils.c b/arch/arm/mach-msm/qdsp6v3/audio_utils.c new file mode 100644 index 00000000..aa641af7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_utils.c @@ -0,0 +1,644 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +static int audio_in_pause(struct q6audio_in *audio) +{ + int rc; + + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s:session id %d: pause cmd failed rc=%d\n", __func__, + audio->ac->session, rc); + + return rc; +} + +static int audio_in_flush(struct q6audio_in *audio) +{ + int rc; + + pr_debug("%s:session id %d: flush\n", __func__, audio->ac->session); + /* Implicitly issue a pause to the decoder before flushing */ + rc = audio_in_pause(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: pause cmd failed rc=%d\n", __func__, + audio->ac->session, rc); + return rc; + } + + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) { + pr_aud_err("%s:session id %d: flush cmd failed rc=%d\n", __func__, + audio->ac->session, rc); + return rc; + } + audio->rflush = 1; + audio->wflush = 1; + memset(audio->out_frame_info, 0, sizeof(audio->out_frame_info)); + wake_up(&audio->read_wait); + /* get read_lock to ensure no more waiting read thread */ + mutex_lock(&audio->read_lock); + audio->rflush = 0; + mutex_unlock(&audio->read_lock); + wake_up(&audio->write_wait); + /* get write_lock to ensure no more waiting write thread */ + mutex_lock(&audio->write_lock); + audio->wflush = 0; + mutex_unlock(&audio->write_lock); + pr_debug("%s:session id %d: in_bytes %d\n", __func__, + audio->ac->session, atomic_read(&audio->in_bytes)); + pr_debug("%s:session id %d: in_samples %d\n", __func__, + audio->ac->session, atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + return 0; +} + +void audio_in_get_dsp_frames(struct q6audio_in *audio, + uint32_t token, uint32_t *payload) +{ + uint32_t index; + + index = token; + pr_debug("%s:session id %d: index=%d nr frames=%d offset[%d]\n", + __func__, audio->ac->session, token, payload[7], + payload[3]); + pr_debug("%s:session id %d: timemsw=%d lsw=%d\n", __func__, + audio->ac->session, payload[4], payload[5]); + pr_debug("%s:session id %d: uflags=0x%8x uid=0x%8x\n", __func__, + audio->ac->session, payload[6], payload[8]); + pr_debug("%s:session id %d: enc frame size=0x%8x\n", __func__, + audio->ac->session, payload[2]); + + audio->out_frame_info[index][0] = payload[7]; + audio->out_frame_info[index][1] = payload[3]; + + /* statistics of read */ + atomic_add(payload[2], &audio->in_bytes); + atomic_add(payload[7], &audio->in_samples); + + if (atomic_read(&audio->out_count) <= audio->str_cfg.buffer_count) { + atomic_inc(&audio->out_count); + wake_up(&audio->read_wait); + } +} + +/* must be called with audio->lock held */ +int audio_in_enable(struct q6audio_in *audio) +{ + if (audio->enabled) + return 0; + + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +/* must be called with audio->lock held */ +int audio_in_disable(struct q6audio_in *audio) +{ + int rc = 0; + if (audio->opened) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s:session id %d: inbytes[%d] insamples[%d]\n", + __func__, audio->ac->session, + atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("%s:session id %d: Failed to close the\ + session rc=%d\n", __func__, audio->ac->session, + rc); + audio->stopped = 1; + memset(audio->out_frame_info, 0, + sizeof(audio->out_frame_info)); + wake_up(&audio->read_wait); + wake_up(&audio->write_wait); + } + pr_debug("%s:session id %d: enabled[%d]\n", __func__, + audio->ac->session, audio->enabled); + return rc; +} + +int audio_in_buf_alloc(struct q6audio_in *audio) +{ + int rc = 0; + + switch (audio->buf_alloc) { + case NO_BUF_ALLOC: + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_audio_client_buf_alloc(IN, + audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc\ + failed\n", __func__, + audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + } + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + break; + case BUF_ALLOC_IN: + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + break; + case BUF_ALLOC_OUT: + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_audio_client_buf_alloc(IN, audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc\ + failed\n", __func__, + audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + } + break; + default: + pr_debug("%s:session id %d: buf[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + } + + return rc; +} +/* ------------------- device --------------------- */ +long audio_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return rc; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_FLUSH: { + /* Make sure we're stopped and we wake any threads + * that might be blocked holding the read_lock. + * While audio->stopped read threads will always + * exit immediately. + */ + rc = audio_in_flush(audio); + if (rc < 0) + pr_aud_err("%s:session id %d: Flush Fail rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + case AUDIO_PAUSE: { + pr_debug("%s:session id %d: AUDIO_PAUSE\n", __func__, + audio->ac->session); + if (audio->enabled) + audio_in_pause(audio); + break; + } + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + pr_debug("%s:session id %d: AUDIO_GET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, cfg.buffer_size, + cfg.buffer_count); + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + /* Minimum single frame size, + but with in maximum frames number */ + if ((cfg.buffer_size < (audio->min_frame_size+ \ + sizeof(struct meta_out_dsp))) || + (cfg.buffer_count < FRAME_NUM)) { + rc = -EINVAL; + break; + } + audio->str_cfg.buffer_size = cfg.buffer_size; + audio->str_cfg.buffer_count = cfg.buffer_count; + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s: session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + rc = 0; + pr_debug("%s:session id %d: AUDIO_SET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, + audio->str_cfg.buffer_size, + audio->str_cfg.buffer_count); + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *) arg, &audio->ac->session, + sizeof(unsigned short))) { + rc = -EFAULT; + } + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + /* Restrict the num of frames per buf to coincide with + * default buf size */ + if (cfg.frames_per_buf > audio->max_frames_per_buf) { + rc = -EFAULT; + break; + } + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + audio->buf_cfg.frames_per_buf = cfg.frames_per_buf; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, cfg.meta_info_enable, + cfg.frames_per_buf); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) + rc = -EFAULT; + break; + } + case AUDIO_GET_CONFIG: { + if (copy_to_user((void *)arg, &audio->pcm_cfg, + sizeof(struct msm_audio_config))) + rc = -EFAULT; + break; + + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if (audio->feedback != NON_TUNNEL_MODE) { + pr_aud_err("%s:session id %d: Not sufficient permission to" + "change the record mode\n", __func__, + audio->ac->session); + rc = -EACCES; + break; + } + if ((cfg.buffer_count > PCM_BUF_COUNT) || + (cfg.buffer_count == 1)) + cfg.buffer_count = PCM_BUF_COUNT; + + audio->pcm_cfg.buffer_count = cfg.buffer_count; + audio->pcm_cfg.buffer_size = cfg.buffer_size; + audio->pcm_cfg.channel_count = cfg.channel_count; + audio->pcm_cfg.sample_rate = cfg.sample_rate; + rc = q6asm_audio_client_buf_alloc(IN, audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc failed\n", + __func__, audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + rc = 0; + pr_debug("%s:session id %d: AUDIO_SET_CONFIG %d %d\n", __func__, + audio->ac->session, audio->pcm_cfg.buffer_count, + audio->pcm_cfg.buffer_size); + break; + } + default: + /* call codec specific ioctl */ + rc = audio->enc_ioctl(file, cmd, arg); + } + mutex_unlock(&audio->lock); + return rc; +} + +ssize_t audio_in_read(struct file *file, + char __user *buf, + size_t count, loff_t *pos) +{ + struct q6audio_in *audio = file->private_data; + const char __user *start = buf; + unsigned char *data; + uint32_t offset = 0; + uint32_t size = 0; + int rc = 0; + uint32_t idx; + struct meta_out_dsp meta; + uint32_t bytes_to_copy = 0; + uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : + (sizeof(unsigned char) + + (sizeof(struct meta_out_dsp)*(audio->buf_cfg.frames_per_buf))); + + pr_debug("%s:session id %d: read - %d\n", __func__, audio->ac->session, + count); + if (!audio->enabled) + return -EFAULT; + mutex_lock(&audio->read_lock); + while (count > 0) { + rc = wait_event_interruptible( + audio->read_wait, + ((atomic_read(&audio->out_count) > 0) || + (audio->stopped) || + audio->rflush || audio->eos_rsp)); + + if (rc < 0) + break; + + if ((audio->stopped && !(atomic_read(&audio->out_count))) || + audio->rflush) { + pr_debug("%s:session id %d: driver in stop state or\ + flush,No more buf to read", __func__, + audio->ac->session); + rc = 0;/* End of File */ + break; + } + if (!(atomic_read(&audio->out_count)) && + (audio->eos_rsp == 1) && + (count >= (sizeof(unsigned char) + + sizeof(struct meta_out_dsp)))) { + unsigned char num_of_frames; + pr_aud_info("%s:session id %d: eos %d at output\n", + __func__, audio->ac->session, audio->eos_rsp); + if (buf != start) + break; + num_of_frames = 0xFF; + if (copy_to_user(buf, &num_of_frames, + sizeof(unsigned char))) { + rc = -EFAULT; + break; + } + buf += sizeof(unsigned char); + meta.frame_size = 0xFFFF; + meta.encoded_pcm_samples = 0xFFFF; + meta.msw_ts = 0x00; + meta.lsw_ts = 0x00; + meta.nflags = AUD_EOS_SET; + audio->eos_rsp = 0; + if (copy_to_user(buf, &meta, sizeof(meta))) { + rc = -EFAULT; + break; + } + buf += sizeof(meta); + break; + } + data = (unsigned char *)q6asm_is_cpu_buf_avail(OUT, audio->ac, + &size, &idx); + if ((count >= (size + mfield_size)) && data) { + if (audio->buf_cfg.meta_info_enable) { + if (copy_to_user(buf, + &audio->out_frame_info[idx][0], + sizeof(unsigned char))) { + rc = -EFAULT; + break; + } + bytes_to_copy = + (size + audio->out_frame_info[idx][1]); + /* Number of frames information copied */ + buf += sizeof(unsigned char); + count -= sizeof(unsigned char); + } else { + offset = audio->out_frame_info[idx][1]; + bytes_to_copy = size; + } + + pr_debug("%s:session id %d: offset=%d nr of frames= %d\n", + __func__, audio->ac->session, + audio->out_frame_info[idx][1], + audio->out_frame_info[idx][0]); + + if (copy_to_user(buf, &data[offset], bytes_to_copy)) { + rc = -EFAULT; + break; + } + count -= bytes_to_copy; + buf += bytes_to_copy; + } else { + pr_aud_err("%s:session id %d: short read data[%p]\ + bytesavail[%d]bytesrequest[%d]\n", __func__, + audio->ac->session, + data, size, count); + } + atomic_dec(&audio->out_count); + q6asm_read(audio->ac); + break; + } + mutex_unlock(&audio->read_lock); + + pr_debug("%s:session id %d: read: %d bytes\n", __func__, + audio->ac->session, (buf-start)); + if (buf > start) + return buf - start; + return rc; +} + +static int extract_meta_info(char *buf, unsigned long *msw_ts, + unsigned long *lsw_ts, unsigned int *flags) +{ + struct meta_in *meta = (struct meta_in *)buf; + *msw_ts = meta->ntimestamp.highpart; + *lsw_ts = meta->ntimestamp.lowpart; + *flags = meta->nflags; + return 0; +} + +ssize_t audio_in_write(struct file *file, + const char __user *buf, + size_t count, loff_t *pos) +{ + struct q6audio_in *audio = file->private_data; + const char __user *start = buf; + size_t xfer = 0; + char *cpy_ptr; + int rc = 0; + unsigned char *data; + uint32_t size = 0; + uint32_t idx = 0; + uint32_t nflags = 0; + unsigned long msw_ts = 0; + unsigned long lsw_ts = 0; + uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : + sizeof(struct meta_in); + + pr_debug("%s:session id %d: to write[%d]\n", __func__, + audio->ac->session, count); + if (!audio->enabled) + return -EFAULT; + mutex_lock(&audio->write_lock); + + while (count > 0) { + rc = wait_event_interruptible(audio->write_wait, + ((atomic_read(&audio->in_count) > 0) || + (audio->stopped) || + (audio->wflush))); + if (rc < 0) + break; + if (audio->stopped || audio->wflush) { + pr_debug("%s: session id %d: stop or flush\n", __func__, + audio->ac->session); + rc = -EBUSY; + break; + } + data = (unsigned char *)q6asm_is_cpu_buf_avail(IN, audio->ac, + &size, &idx); + if (!data) { + pr_debug("%s:session id %d: No buf available\n", + __func__, audio->ac->session); + continue; + } + cpy_ptr = data; + if (audio->buf_cfg.meta_info_enable) { + if (buf == start) { + /* Processing beginning of user buffer */ + if (copy_from_user(cpy_ptr, buf, mfield_size)) { + rc = -EFAULT; + break; + } + /* Check if EOS flag is set and buffer has + * contains just meta field + */ + extract_meta_info(cpy_ptr, &msw_ts, &lsw_ts, + &nflags); + buf += mfield_size; + if (count == mfield_size) { + /* send the EOS and return */ + pr_debug("%s:session id %d: send EOS\ + 0x%8x\n", __func__, + audio->ac->session, nflags); + break; + } + count -= mfield_size; + } else { + pr_debug("%s:session id %d: continuous\ + buffer\n", __func__, audio->ac->session); + } + } + xfer = (count > (audio->pcm_cfg.buffer_size)) ? + (audio->pcm_cfg.buffer_size) : count; + + if (copy_from_user(cpy_ptr, buf, xfer)) { + rc = -EFAULT; + break; + } + rc = q6asm_write(audio->ac, xfer, msw_ts, lsw_ts, 0x00); + if (rc < 0) { + rc = -EFAULT; + break; + } + atomic_dec(&audio->in_count); + count -= xfer; + buf += xfer; + } + mutex_unlock(&audio->write_lock); + pr_debug("%s:session id %d: eos_condition 0x%8x buf[0x%x]\ + start[0x%x]\n", __func__, audio->ac->session, + nflags, (int) buf, (int) start); + if (nflags & AUD_EOS_SET) { + rc = q6asm_cmd(audio->ac, CMD_EOS); + pr_aud_info("%s:session id %d: eos %d at input\n", __func__, + audio->ac->session, audio->eos_rsp); + } + pr_debug("%s:session id %d: Written %d Avail Buf[%d]", __func__, + audio->ac->session, (buf - start - mfield_size), + atomic_read(&audio->in_count)); + if (!rc) { + if (buf > start) + return buf - start; + } + return rc; +} + +int audio_in_release(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = file->private_data; + pr_aud_info("%s: session id %d\n", __func__, audio->ac->session); + mutex_lock(&audio->lock); + audio_in_disable(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->lock); + kfree(audio->enc_cfg); + kfree(audio); + return 0; +} + diff --git a/arch/arm/mach-msm/qdsp6v3/audio_utils.h b/arch/arm/mach-msm/qdsp6v3/audio_utils.h new file mode 100644 index 00000000..da4a520d --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_utils.h @@ -0,0 +1,109 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ +#include + +#define FRAME_NUM (8) + +#define PCM_BUF_COUNT (2) + +#define AUD_EOS_SET 0x01 +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 + +#define NO_BUF_ALLOC 0x00 +#define BUF_ALLOC_IN 0x01 +#define BUF_ALLOC_OUT 0x02 +#define BUF_ALLOC_INOUT 0x03 +#define ALIGN_BUF_SIZE(size) ((size + 4095) & (~4095)) + +struct timestamp{ + unsigned long lowpart; + unsigned long highpart; +} __attribute__ ((packed)); + +struct meta_in{ + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __attribute__ ((packed)); + +struct meta_out_dsp{ + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __attribute__ ((packed)); + +struct meta_out{ + unsigned char num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __attribute__ ((packed)); + +struct q6audio_in{ + spinlock_t dsp_lock; + atomic_t in_bytes; + atomic_t in_samples; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + wait_queue_head_t read_wait; + wait_queue_head_t write_wait; + + struct audio_client *ac; + struct msm_audio_stream_config str_cfg; + void *enc_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + void *codec_cfg; + + /* number of buffers available to read/write */ + atomic_t in_count; + atomic_t out_count; + + /* first idx: num of frames per buf, second idx: offset to frame */ + uint32_t out_frame_info[FRAME_NUM][2]; + int eos_rsp; + int opened; + int enabled; + int stopped; + int feedback; /* Flag indicates whether used + in Non Tunnel mode */ + int rflush; + int wflush; + int buf_alloc; + uint16_t min_frame_size; + uint16_t max_frames_per_buf; + long (*enc_ioctl)(struct file *, unsigned int, unsigned long); +}; + +void audio_in_get_dsp_frames(struct q6audio_in *audio, + uint32_t token, uint32_t *payload); +int audio_in_enable(struct q6audio_in *audio); +int audio_in_disable(struct q6audio_in *audio); +int audio_in_buf_alloc(struct q6audio_in *audio); +long audio_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +ssize_t audio_in_read(struct file *file, char __user *buf, + size_t count, loff_t *pos); +ssize_t audio_in_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos); +int audio_in_release(struct inode *inode, struct file *file); + diff --git a/arch/arm/mach-msm/qdsp6v3/audio_wma.c b/arch/arm/mach-msm/qdsp6v3/audio_wma.c new file mode 100644 index 00000000..f2deb4f5 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_wma.c @@ -0,0 +1,1585 @@ +/* wma audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ADRV_STATUS_AIO_INTF 0x00000001 /* AIO interface */ +#define ADRV_STATUS_FSYNC 0x00000008 +#define ADRV_STATUS_PAUSE 0x00000010 + +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 +#define AUDWMA_EOS_SET 0x00000001 + +/* Default number of pre-allocated event packets */ +#define AUDWMA_EVENT_NUM 10 + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = ((__v >= __r->vaddr) && \ + (__e <= __r->vaddr + __r->len)); \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +struct timestamp { + unsigned long lowpart; + unsigned long highpart; +} __attribute__ ((packed)); + +struct meta_in { + unsigned char reserved[18]; + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __attribute__ ((packed)); + +struct meta_out_dsp{ + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __attribute__ ((packed)); + +struct dec_meta_out{ + unsigned int reserved[7]; + unsigned int num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __attribute__ ((packed)); + +/* General meta field to store meta info +locally */ +union meta_data { + struct dec_meta_out meta_out; + struct meta_in meta_in; +} __attribute__ ((packed)); + +struct audwma_event { + struct list_head list; + int event_type; + union msm_audio_event_payload payload; +}; + +struct audwma_pmem_region { + struct list_head list; + struct file *file; + int fd; + void *vaddr; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + unsigned ref_cnt; +}; + +struct audwma_buffer_node { + struct list_head list; + struct msm_audio_aio_buf buf; + unsigned long paddr; + unsigned long token; + void *kvaddr; + union meta_data meta_info; +}; + +struct q6audio; + +struct audwma_drv_operations { + void (*out_flush) (struct q6audio *); + void (*in_flush) (struct q6audio *); + int (*fsync)(struct q6audio *); +}; + +#define PCM_BUF_COUNT (2) +/* Buffer with meta */ +#define PCM_BUFSZ_MIN ((4*1024) + sizeof(struct dec_meta_out)) + +/* FRAME_NUM must be a power of two */ +#define FRAME_NUM (2) +#define FRAME_SIZE ((4*1024) + sizeof(struct meta_in)) + +struct q6audio { + atomic_t in_bytes; + atomic_t in_samples; + + struct msm_audio_stream_config str_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + struct msm_audio_wma_config_v2 wma_config; + + struct audio_client *ac; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + struct mutex get_event_lock; + wait_queue_head_t cmd_wait; + wait_queue_head_t write_wait; + wait_queue_head_t event_wait; + spinlock_t dsp_lock; + spinlock_t event_queue_lock; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + struct list_head out_queue; /* queue to retain output buffers */ + struct list_head in_queue; /* queue to retain input buffers */ + struct list_head free_event_queue; + struct list_head event_queue; + struct list_head pmem_region_queue; /* protected by lock */ + struct audwma_drv_operations drv_ops; + union msm_audio_event_payload eos_write_payload; + + uint32_t drv_status; + int event_abort; + int eos_rsp; + int eos_flag; + int opened; + int enabled; + int stopped; + int feedback; + int rflush; /* Read flush */ + int wflush; /* Write flush */ +}; + +static int insert_eos_buf(struct q6audio *audio, + struct audwma_buffer_node *buf_node) { + struct dec_meta_out *eos_buf = buf_node->kvaddr; + eos_buf->num_of_frames = 0xFFFFFFFF; + eos_buf->meta_out_dsp[0].offset_to_frame = 0x0; + eos_buf->meta_out_dsp[0].nflags = AUDWMA_EOS_SET; + return sizeof(struct dec_meta_out) + + sizeof(eos_buf->meta_out_dsp[0]); +} + +/* Routine which updates read buffers of driver/dsp, + for flush operation as DSP output might not have proper + value set */ +static int insert_meta_data(struct q6audio *audio, + struct audwma_buffer_node *buf_node) { + struct dec_meta_out *meta_data = buf_node->kvaddr; + meta_data->num_of_frames = 0x0; + meta_data->meta_out_dsp[0].offset_to_frame = 0x0; + meta_data->meta_out_dsp[0].nflags = 0x0; + return sizeof(struct dec_meta_out) + + sizeof(meta_data->meta_out_dsp[0]); +} + +static void extract_meta_info(struct q6audio *audio, + struct audwma_buffer_node *buf_node, int dir) +{ + if (dir) { /* Read */ + if (audio->buf_cfg.meta_info_enable) + memcpy(&buf_node->meta_info.meta_in, + (char *)buf_node->kvaddr, sizeof(struct meta_in)); + else + memset(&buf_node->meta_info.meta_in, + 0, sizeof(struct meta_in)); + pr_debug("i/p: msw_ts 0x%lx lsw_ts 0x%lx nflags 0x%8x\n", + buf_node->meta_info.meta_in.ntimestamp.highpart, + buf_node->meta_info.meta_in.ntimestamp.lowpart, + buf_node->meta_info.meta_in.nflags); + } else { /* Write */ + memcpy((char *)buf_node->kvaddr, + &buf_node->meta_info.meta_out, + sizeof(struct dec_meta_out)); + pr_debug("o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x\n", + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].msw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].lsw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].nflags); + } +} + +static int audwma_pmem_lookup_vaddr(struct q6audio *audio, void *addr, + unsigned long len, struct audwma_pmem_region **region) +{ + struct audwma_pmem_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) { + /* offset since we could pass vaddr inside a registerd + * pmem buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + pr_aud_err("multiple hits for vaddr %p, len %ld\n", addr, len); + list_for_each_entry(region_elt, &audio->pmem_region_queue, + list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) + pr_aud_err("\t%p, %ld --> %p\n", region_elt->vaddr, + region_elt->len, + (void *)region_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +static unsigned long audwma_pmem_fixup(struct q6audio *audio, void *addr, + unsigned long len, int ref_up, void **kvaddr) +{ + struct audwma_pmem_region *region; + unsigned long paddr; + int ret; + + ret = audwma_pmem_lookup_vaddr(audio, addr, len, ®ion); + if (ret) { + pr_aud_err("lookup (%p, %ld) failed\n", addr, len); + return 0; + } + if (ref_up) + region->ref_cnt++; + else + region->ref_cnt--; + pr_debug("found region %p ref_cnt %d\n", region, region->ref_cnt); + paddr = region->paddr + (addr - region->vaddr); + /* provide kernel virtual address for accessing meta information */ + if (kvaddr) + *kvaddr = (void *) (region->kvaddr + (addr - region->vaddr)); + return paddr; +} + +static void audwma_post_event(struct q6audio *audio, int type, + union msm_audio_event_payload payload) +{ + struct audwma_event *e_node = NULL; + unsigned long flags; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + + if (!list_empty(&audio->free_event_queue)) { + e_node = list_first_entry(&audio->free_event_queue, + struct audwma_event, list); + list_del(&e_node->list); + } else { + e_node = kmalloc(sizeof(struct audwma_event), GFP_ATOMIC); + if (!e_node) { + pr_aud_err("No mem to post event %d\n", type); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return; + } + } + + e_node->event_type = type; + e_node->payload = payload; + + list_add_tail(&e_node->list, &audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + wake_up(&audio->event_wait); +} + +static int audwma_enable(struct q6audio *audio) +{ + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +static int audwma_disable(struct q6audio *audio) +{ + int rc = 0; + if (audio->opened) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s: inbytes[%d] insamples[%d]\n", __func__, + atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + /* Close the session */ + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("Failed to close the session rc=%d\n", rc); + audio->stopped = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + pr_debug("enabled[%d]\n", audio->enabled); + return rc; +} + +static int audwma_pause(struct q6audio *audio) +{ + int rc = 0; + + pr_aud_info("%s, enabled = %d\n", __func__, + audio->enabled); + if (audio->enabled) { + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, rc); + + } else + pr_aud_err("%s: Driver not enabled\n", __func__); + return rc; +} + +static int audwma_flush(struct q6audio *audio) +{ + int rc; + + if (audio->enabled) { + /* Implicitly issue a pause to the decoder before flushing if + it is not in pause state */ + if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { + rc = audwma_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, + rc); + else + audio->drv_status |= ADRV_STATUS_PAUSE; + } + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) + pr_aud_err("%s: flush cmd failed rc=%d\n", __func__, rc); + /* Not in stop state, reenable the stream */ + if (audio->stopped == 0) { + rc = audwma_enable(audio); + if (rc) + pr_aud_err("%s:audio re-enable failed\n", __func__); + else { + audio->enabled = 1; + if (audio->drv_status & ADRV_STATUS_PAUSE) + audio->drv_status &= ~ADRV_STATUS_PAUSE; + } + } + } + pr_debug("in_bytes %d\n", atomic_read(&audio->in_bytes)); + pr_debug("in_samples %d\n", atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + return 0; +} + +static void audwma_async_read(struct q6audio *audio, + struct audwma_buffer_node *buf_node) +{ + struct audio_client *ac; + struct audio_aio_read_param param; + int rc; + + pr_debug("%s: Send read buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.buf_len); + ac = audio->ac; + /* Provide address so driver can append nr frames information */ + param.paddr = buf_node->paddr + + sizeof(struct dec_meta_out); + param.len = buf_node->buf.buf_len - + sizeof(struct dec_meta_out); + param.uid = param.paddr; + /* Write command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_read(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +static void audwma_async_write(struct q6audio *audio, + struct audwma_buffer_node *buf_node) +{ + int rc; + struct audio_client *ac; + struct audio_aio_write_param param; + + pr_debug("%s: Send write buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.data_len); + + ac = audio->ac; + /* Offset with appropriate meta */ + param.paddr = buf_node->paddr + sizeof(struct meta_in); + param.len = buf_node->buf.data_len - sizeof(struct meta_in); + param.msw_ts = buf_node->meta_info.meta_in.ntimestamp.highpart; + param.lsw_ts = buf_node->meta_info.meta_in.ntimestamp.lowpart; + /* If no meta_info enaled, indicate no time stamp valid */ + if (audio->buf_cfg.meta_info_enable) + param.flags = 0; + else + param.flags = 0xFF00; + param.uid = param.paddr; + /* Read command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_write(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +/* Write buffer to DSP / Handle Ack from DSP */ +static void audwma_async_write_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwma_buffer_node *used_buf; + + /* No active flush in progress */ + if (audio->wflush) + return; + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->out_queue)); + used_buf = list_first_entry(&audio->out_queue, + struct audwma_buffer_node, list); + if (token == used_buf->token) { + list_del(&used_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + pr_debug("consumed buffer\n"); + event_payload.aio_buf = used_buf->buf; + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(used_buf); + if (list_empty(&audio->out_queue) && + (audio->drv_status & ADRV_STATUS_FSYNC)) { + pr_debug("%s: list is empty, reached EOS in\ + Tunnel\n", __func__); + wake_up(&audio->write_wait); + } + } else { + pr_aud_err("expected=%lx ret=%x\n", used_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +/* Read buffer from DSP / Handle Ack from DSP */ +static void audwma_async_read_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwma_buffer_node *filled_buf; + + /* No active flush in progress */ + if (audio->rflush) + return; + + /* Statistics of read */ + atomic_add(payload[2], &audio->in_bytes); + atomic_add(payload[7], &audio->in_samples); + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->in_queue)); + filled_buf = list_first_entry(&audio->in_queue, + struct audwma_buffer_node, list); + if (token == (filled_buf->token)) { + list_del(&filled_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + event_payload.aio_buf = filled_buf->buf; + /* Read done Buffer due to flush/normal condition + after EOS event, so append EOS buffer */ + if (audio->eos_rsp == 0x1) { + event_payload.aio_buf.data_len = + insert_eos_buf(audio, filled_buf); + /* Reset flag back to indicate eos intimated */ + audio->eos_rsp = 0; + } else { + filled_buf->meta_info.meta_out.num_of_frames = + payload[7]; + pr_debug("nr of frames 0x%8x\n", + filled_buf->meta_info.meta_out.num_of_frames); + event_payload.aio_buf.data_len = payload[2] + \ + payload[3] + \ + sizeof(struct dec_meta_out); + extract_meta_info(audio, filled_buf, 0); + audio->eos_rsp = 0; + } + audwma_post_event(audio, AUDIO_EVENT_READ_DONE, event_payload); + kfree(filled_buf); + } else { + pr_aud_err("expected=%lx ret=%x\n", filled_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +static void q6_audwma_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio *audio = (struct q6audio *)priv; + + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + pr_debug("%s:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n", + __func__, token); + audwma_async_write_ack(audio, token, payload); + break; + case ASM_DATA_EVENT_READ_DONE: + pr_debug("%s:ASM_DATA_EVENT_READ_DONE token = 0x%x\n", + __func__, token); + audwma_async_read_ack(audio, token, payload); + break; + case ASM_DATA_CMDRSP_EOS: + /* EOS Handle */ + pr_debug("%s:ASM_DATA_CMDRSP_EOS\n", __func__); + if (audio->feedback) { /* Non-Tunnel mode */ + audio->eos_rsp = 1; + /* propagate input EOS i/p buffer, + after receiving DSP acknowledgement */ + if (audio->eos_flag && + (audio->eos_write_payload.aio_buf.buf_addr)) { + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + audio->eos_flag = 0; + } + } else { /* Tunnel mode */ + audio->eos_rsp = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + break; + default: + pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode); + break; + } +} + +/* ------------------- device --------------------- */ +static void audwma_async_out_flush(struct q6audio *audio) +{ + struct audwma_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + unsigned long flags; + + pr_debug("%s\n", __func__); + /* EOS followed by flush, EOS response not guranteed, free EOS i/p + buffer */ + spin_lock_irqsave(&audio->dsp_lock, flags); + if (audio->eos_flag && (audio->eos_write_payload.aio_buf.buf_addr)) { + pr_debug("%s: EOS followed by flush received,acknowledge eos"\ + " i/p buffer immediately\n", __func__); + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audwma_buffer_node, list); + list_del(&buf_node->list); + payload.aio_buf = buf_node->buf; + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate WRITE_DONE during flush\n", __func__); + } +} + +static void audwma_async_in_flush(struct q6audio *audio) +{ + struct audwma_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + + pr_debug("%s\n", __func__); + list_for_each_safe(ptr, next, &audio->in_queue) { + buf_node = list_entry(ptr, struct audwma_buffer_node, list); + list_del(&buf_node->list); + /* Forcefull send o/p eos buffer after flush, if no eos response + * received by dsp even after sending eos command */ + if ((audio->eos_rsp != 1) && audio->eos_flag) { + pr_debug("%s: send eos on o/p buffer during flush\n",\ + __func__); + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + audio->eos_flag = 0; + } else { + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_meta_data(audio, buf_node); + } + audwma_post_event(audio, AUDIO_EVENT_READ_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate READ_DONE during flush\n", __func__); + } +} + +static void audwma_ioport_reset(struct q6audio *audio) +{ + if (audio->drv_status & ADRV_STATUS_AIO_INTF) { + /* If fsync is in progress, make sure + * return value of fsync indicates + * abort due to flush + */ + if (audio->drv_status & ADRV_STATUS_FSYNC) { + pr_debug("fsync in progress\n"); + audio->drv_ops.out_flush(audio); + } else + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + } +} + +static int audwma_events_pending(struct q6audio *audio) +{ + unsigned long flags; + int empty; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + empty = !list_empty(&audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return empty || audio->event_abort; +} + +static void audwma_reset_event_queue(struct q6audio *audio) +{ + unsigned long flags; + struct audwma_event *drv_evt; + struct list_head *ptr, *next; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + list_for_each_safe(ptr, next, &audio->event_queue) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwma_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + list_for_each_safe(ptr, next, &audio->free_event_queue) { + drv_evt = list_first_entry(&audio->free_event_queue, + struct audwma_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + return; +} + +static long audwma_process_event_req(struct q6audio *audio, void __user * arg) +{ + long rc; + struct msm_audio_event usr_evt; + struct audwma_event *drv_evt = NULL; + int timeout; + unsigned long flags; + + if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) + return -EFAULT; + + timeout = (int)usr_evt.timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout(audio->event_wait, + audwma_events_pending + (audio), + msecs_to_jiffies + (timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible(audio->event_wait, + audwma_events_pending(audio)); + } + + if (rc < 0) + return rc; + + if (audio->event_abort) { + audio->event_abort = 0; + return -ENODEV; + } + + rc = 0; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + if (!list_empty(&audio->event_queue)) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwma_event, list); + list_del(&drv_evt->list); + } + if (drv_evt) { + usr_evt.event_type = drv_evt->event_type; + usr_evt.event_payload = drv_evt->payload; + list_add_tail(&drv_evt->list, &audio->free_event_queue); + } else { + pr_aud_err("Unexpected path\n"); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return -EPERM; + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) { + pr_debug("posted AUDIO_EVENT_WRITE_DONE to user\n"); + mutex_lock(&audio->write_lock); + audwma_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->write_lock); + } else if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) { + pr_debug("posted AUDIO_EVENT_READ_DONE to user\n"); + mutex_lock(&audio->read_lock); + audwma_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->read_lock); + } + + /* Some read buffer might be held up in DSP,release all + Once EOS indicated*/ + if (audio->eos_rsp && !list_empty(&audio->in_queue)) { + pr_debug("Send flush command to release read buffers"\ + "held up in DSP\n"); + audwma_flush(audio); + } + + if (copy_to_user(arg, &usr_evt, sizeof(usr_evt))) + rc = -EFAULT; + + return rc; +} + +static int audwma_pmem_check(struct q6audio *audio, + void *vaddr, unsigned long len) +{ + struct audwma_pmem_region *region_elt; + struct audwma_pmem_region t = {.vaddr = vaddr, .len = len }; + + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + pr_aud_err("region (vaddr %p len %ld)" + " clashes with registered region" + " (vaddr %p paddr %p len %ld)\n", + vaddr, len, + region_elt->vaddr, + (void *)region_elt->paddr, region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int audwma_pmem_add(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + unsigned long paddr, kvaddr, len; + struct file *file; + struct audwma_pmem_region *region; + int rc = -EINVAL; + + pr_debug("%s:\n", __func__); + region = kmalloc(sizeof(*region), GFP_KERNEL); + + if (!region) { + rc = -ENOMEM; + goto end; + } + + if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { + kfree(region); + goto end; + } + + rc = audwma_pmem_check(audio, info->vaddr, len); + if (rc < 0) { + put_pmem_file(file); + kfree(region); + goto end; + } + + region->vaddr = info->vaddr; + region->fd = info->fd; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->file = file; + region->ref_cnt = 0; + pr_debug("add region paddr %lx vaddr %p, len %lu kvaddr %lx\n", + region->paddr, region->vaddr, region->len, region->kvaddr); + list_add_tail(®ion->list, &audio->pmem_region_queue); + + rc = q6asm_memory_map(audio->ac, (uint32_t) paddr, IN, (uint32_t) len, + 1); + if (rc < 0) + pr_aud_err("%s: memory map failed\n", __func__); +end: + return rc; +} + +static int audwma_pmem_remove(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + struct audwma_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_debug("info fd %d vaddr %p\n", info->fd, info->vaddr); + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwma_pmem_region, list); + + if ((region->fd == info->fd) && + (region->vaddr == info->vaddr)) { + if (region->ref_cnt) { + pr_debug("region %p in use ref_cnt %d\n", + region, region->ref_cnt); + break; + } + pr_debug("remove region fd %d vaddr %p\n", + info->fd, info->vaddr); + rc = q6asm_memory_unmap(audio->ac, + (uint32_t) region->paddr, IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + rc = 0; + break; + } + } + + return rc; +} + +/* audio -> lock must be held at this point */ +static int audwma_aio_buf_add(struct q6audio *audio, unsigned dir, + void __user *arg) +{ + unsigned long flags; + struct audwma_buffer_node *buf_node; + + buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) + return -ENOMEM; + + if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { + kfree(buf_node); + return -EFAULT; + } + + pr_debug("node %p dir %x buf_addr %p buf_len %d data_len \ + %d\n", buf_node, dir, buf_node->buf.buf_addr, + buf_node->buf.buf_len, buf_node->buf.data_len); + + buf_node->paddr = audwma_pmem_fixup(audio, buf_node->buf.buf_addr, + buf_node->buf.buf_len, 1, + &buf_node->kvaddr); + if (dir) { + /* write */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (!audio->feedback && !buf_node->buf.data_len)) { + kfree(buf_node); + return -EINVAL; + } + extract_meta_info(audio, buf_node, 1); + /* Not a EOS buffer */ + if (!(buf_node->meta_info.meta_in.nflags & AUDWMA_EOS_SET)) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwma_async_write(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->out_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + if (buf_node->meta_info.meta_in.nflags & AUDWMA_EOS_SET) { + if (!audio->wflush) { + pr_debug("%s:Send EOS cmd at i/p\n", __func__); + /* Driver will forcefully post writedone event + once eos ack recived from DSP*/ + audio->eos_write_payload.aio_buf =\ + buf_node->buf; + audio->eos_flag = 1; + audio->eos_rsp = 0; + q6asm_cmd(audio->ac, CMD_EOS); + kfree(buf_node); + } else { /* Flush in progress, send back i/p EOS buffer + as is */ + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(buf_node); + } + } + } else { + /* read */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (buf_node->buf.buf_len < PCM_BUFSZ_MIN)) { + kfree(buf_node); + return -EINVAL; + } + /* No EOS reached */ + if (!audio->eos_rsp) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwma_async_read(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->in_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + /* EOS reached at input side fake all upcoming read buffer to + indicate the same */ + else { + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + event_payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + pr_debug("%s: propagate READ_DONE as EOS done\n",\ + __func__); + audwma_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(buf_node); + } + } + return 0; +} + +/* TBD: Only useful in tunnel-mode */ +int audwma_async_fsync(struct q6audio *audio) +{ + int rc = 0; + + /* Blocking client sends more data */ + mutex_lock(&audio->lock); + audio->drv_status |= ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + pr_aud_info("%s:\n", __func__); + + mutex_lock(&audio->write_lock); + audio->eos_rsp = 0; + + rc = wait_event_interruptible(audio->write_wait, + (list_empty(&audio->out_queue)) || + audio->wflush || audio->stopped); + + if (rc < 0) { + pr_aud_err("%s: wait event for list_empty failed, rc = %d\n", + __func__, rc); + goto done; + } + + rc = q6asm_cmd(audio->ac, CMD_EOS); + + if (rc < 0) + pr_aud_err("%s: q6asm_cmd failed, rc = %d", __func__, rc); + + rc = wait_event_interruptible(audio->write_wait, + (audio->eos_rsp || audio->wflush || + audio->stopped)); + + if (rc < 0) { + pr_aud_err("%s: wait event for eos_rsp failed, rc = %d\n", __func__, + rc); + goto done; + } + + if (audio->eos_rsp == 1) { + rc = audwma_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + + if (audio->stopped || audio->wflush) + rc = -EBUSY; + +done: + mutex_unlock(&audio->write_lock); + mutex_lock(&audio->lock); + audio->drv_status &= ~ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + return rc; +} + +int audwma_fsync(struct file *file, int datasync) +{ + struct q6audio *audio = file->private_data; + + if (!audio->enabled || audio->feedback) + return -EINVAL; + + return audio->drv_ops.fsync(audio); +} + +static void audwma_reset_pmem_region(struct q6audio *audio) +{ + struct audwma_pmem_region *region; + struct list_head *ptr, *next; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwma_pmem_region, list); + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + } + + return; +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t audwma_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t audwma_debug_read(struct file *file, char __user * buf, + size_t count, loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct q6audio *audio = file->private_data; + + mutex_lock(&audio->lock); + n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); + n += scnprintf(buffer + n, debug_bufmax - n, + "enabled %d\n", audio->enabled); + n += scnprintf(buffer + n, debug_bufmax - n, + "stopped %d\n", audio->stopped); + n += scnprintf(buffer + n, debug_bufmax - n, + "feedback %d\n", audio->feedback); + mutex_unlock(&audio->lock); + /* Following variables are only useful for debugging when + * when playback halts unexpectedly. Thus, no mutual exclusion + * enforced + */ + n += scnprintf(buffer + n, debug_bufmax - n, + "wflush %d\n", audio->wflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "rflush %d\n", audio->rflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "inqueue empty %d\n", list_empty(&audio->in_queue)); + n += scnprintf(buffer + n, debug_bufmax - n, + "outqueue empty %d\n", list_empty(&audio->out_queue)); + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static const struct file_operations audwma_debug_fops = { + .read = audwma_debug_read, + .open = audwma_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return rc; + } + + if (cmd == AUDIO_GET_EVENT) { + pr_debug("AUDIO_GET_EVENT\n"); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audwma_process_event_req(audio, + (void __user *)arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + return rc; + } + + if (cmd == AUDIO_ASYNC_WRITE) { + mutex_lock(&audio->write_lock); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else { + if (audio->enabled) + rc = audwma_aio_buf_add(audio, 1, + (void __user *)arg); + else + rc = -EPERM; + } + mutex_unlock(&audio->write_lock); + return rc; + } + + if (cmd == AUDIO_ASYNC_READ) { + mutex_lock(&audio->read_lock); + if ((audio->feedback) && (audio->enabled)) + rc = audwma_aio_buf_add(audio, 0, + (void __user *)arg); + else + rc = -EPERM; + mutex_unlock(&audio->read_lock); + return rc; + } + + if (cmd == AUDIO_ABORT_GET_EVENT) { + audio->event_abort = 1; + wake_up(&audio->event_wait); + return 0; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: { + struct asm_wma_cfg wma_cfg; + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_aud_err("pcm output block config failed\n"); + break; + } + } + wma_cfg.format_tag = audio->wma_config.format_tag; + wma_cfg.ch_cfg = audio->wma_config.numchannels; + wma_cfg.sample_rate = audio->wma_config.samplingrate; + wma_cfg.avg_bytes_per_sec = audio->wma_config.avgbytespersecond; + wma_cfg.block_align = audio->wma_config.block_align; + wma_cfg.valid_bits_per_sample = + audio->wma_config.validbitspersample; + wma_cfg.ch_mask = audio->wma_config.channelmask; + wma_cfg.encode_opt = audio->wma_config.encodeopt; + /* Configure Media format block */ + rc = q6asm_media_format_block_wma(audio->ac, &wma_cfg); + if (rc < 0) { + pr_aud_err("cmd media format block failed\n"); + break; + } + rc = audwma_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + case AUDIO_STOP: { + pr_debug("AUDIO_STOP\n"); + audio->stopped = 1; + audwma_flush(audio); + audio->enabled = 0; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + if (rc < 0) { + pr_aud_err("Audio Stop procedure failed rc=%d\n", rc); + break; + } + break; + } + case AUDIO_PAUSE: { + pr_debug("AUDIO_PAUSE %ld\n", arg); + if (arg == 1) { + rc = audwma_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause FAILED rc=%d\n", __func__, + rc); + audio->drv_status |= ADRV_STATUS_PAUSE; + } else if (arg == 0) { + if (audio->drv_status & ADRV_STATUS_PAUSE) { + rc = audwma_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", + __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + } + break; + } + case AUDIO_FLUSH: { + pr_debug("AUDIO_FLUSH\n"); + audio->rflush = 1; + audio->wflush = 1; + /* Flush DSP */ + rc = audwma_flush(audio); + /* Flush input / Output buffer in software*/ + audwma_ioport_reset(audio); + if (rc < 0) { + pr_aud_err("AUDIO_FLUSH interrupted\n"); + rc = -EINTR; + } else { + audio->rflush = 0; + audio->wflush = 0; + } + audio->eos_flag = 0; + audio->eos_rsp = 0; + break; + } + case AUDIO_REGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_REGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwma_pmem_add(audio, &info); + break; + } + case AUDIO_DEREGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_DEREGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwma_pmem_remove(audio, &info); + break; + } + case AUDIO_GET_WMA_CONFIG_V2: { + if (copy_to_user((void *)arg, &audio->wma_config, + sizeof(struct msm_audio_wma_config_v2))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_WMA_CONFIG_V2: { + if (copy_from_user(&audio->wma_config, (void *)arg, + sizeof(struct msm_audio_wma_config_v2))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + pr_debug("GET STREAM CFG %d %d\n", cfg.buffer_size, + cfg.buffer_count); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + pr_debug("SET STREAM CONFIG\n"); + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + rc = 0; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config cfg; + if (copy_to_user((void *)arg, &audio->pcm_cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (copy_from_user(&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (audio->feedback != NON_TUNNEL_MODE) { + pr_aud_err("Not sufficient permission to" + "change the playback mode\n"); + rc = -EACCES; + break; + } + if ((config.buffer_count > PCM_BUF_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + audio->pcm_cfg.buffer_count = config.buffer_count; + audio->pcm_cfg.buffer_size = config.buffer_size; + audio->pcm_cfg.channel_count = config.channel_count; + audio->pcm_cfg.sample_rate = config.sample_rate; + rc = 0; + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]", __func__, + audio->ac->session, cfg.meta_info_enable); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) + rc = -EFAULT; + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *)arg, &audio->ac->session, + sizeof(unsigned short))) { + rc = -EFAULT; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct q6audio *audio = file->private_data; + mutex_lock(&audio->lock); + audwma_disable(audio); + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + audwma_reset_pmem_region(audio); + audio->event_abort = 1; + wake_up(&audio->event_wait); + audwma_reset_event_queue(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->lock); + mutex_destroy(&audio->lock); + mutex_destroy(&audio->read_lock); + mutex_destroy(&audio->write_lock); + mutex_destroy(&audio->get_event_lock); +#ifdef CONFIG_DEBUG_FS + if (audio->dentry) + debugfs_remove(audio->dentry); +#endif + kfree(audio); + pr_aud_info("%s: wma_decoder success\n", __func__); + return 0; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio *audio = NULL; + int rc = 0; + int i; + struct audwma_event *e_node = NULL; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_wma_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("Could not allocate memory for wma decode driver\n"); + return -ENOMEM; + } + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.sample_rate = 48000; + audio->pcm_cfg.channel_count = 2; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audwma_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + /* Only AIO interface */ + if (file->f_flags & O_NONBLOCK) { + pr_debug("set to aio interface\n"); + audio->drv_status |= ADRV_STATUS_AIO_INTF; + audio->drv_ops.out_flush = audwma_async_out_flush; + audio->drv_ops.in_flush = audwma_async_in_flush; + audio->drv_ops.fsync = audwma_async_fsync; + q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); + } else { + pr_aud_err("SIO interface not supported\n"); + rc = -EACCES; + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_WMA_V9); + if (rc < 0) { + pr_aud_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open WMA decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_WMA_V9); + if (rc < 0) { + pr_aud_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_aud_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + /* Initialize all locks of audio instance */ + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + mutex_init(&audio->get_event_lock); + spin_lock_init(&audio->dsp_lock); + spin_lock_init(&audio->event_queue_lock); + init_waitqueue_head(&audio->cmd_wait); + init_waitqueue_head(&audio->write_wait); + init_waitqueue_head(&audio->event_wait); + INIT_LIST_HEAD(&audio->out_queue); + INIT_LIST_HEAD(&audio->in_queue); + INIT_LIST_HEAD(&audio->pmem_region_queue); + INIT_LIST_HEAD(&audio->free_event_queue); + INIT_LIST_HEAD(&audio->event_queue); + + audio->drv_ops.out_flush(audio); + audio->opened = 1; + file->private_data = audio; + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_wma_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audwma_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + for (i = 0; i < AUDWMA_EVENT_NUM; i++) { + e_node = kmalloc(sizeof(struct audwma_event), GFP_KERNEL); + if (e_node) + list_add_tail(&e_node->list, &audio->free_event_queue); + else { + pr_aud_err("event pkt alloc failed\n"); + break; + } + } + pr_aud_info("%s:wma decoder open success, session_id = %d\n", __func__, + audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_wma_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audwma_fsync, +}; + +struct miscdevice audwma_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_wma", + .fops = &audio_wma_fops, +}; + +static int __init audio_wma_init(void) +{ + return misc_register(&audwma_misc); +} + +device_initcall(audio_wma_init); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_wmapro.c b/arch/arm/mach-msm/qdsp6v3/audio_wmapro.c new file mode 100644 index 00000000..d0e67af8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_wmapro.c @@ -0,0 +1,1644 @@ +/* wmapro audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ADRV_STATUS_AIO_INTF 0x00000001 /* AIO interface */ +#define ADRV_STATUS_FSYNC 0x00000008 +#define ADRV_STATUS_PAUSE 0x00000010 + +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 +#define AUDWMAPRO_EOS_SET 0x00000001 + +/* Default number of pre-allocated event packets */ +#define AUDWMAPRO_EVENT_NUM 10 + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = ((__v >= __r->vaddr) && \ + (__e <= __r->vaddr + __r->len)); \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +struct timestamp { + unsigned long lowpart; + unsigned long highpart; +} __attribute__ ((packed)); + +struct meta_in { + unsigned char reserved[18]; + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __attribute__ ((packed)); + +struct meta_out_dsp{ + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __attribute__ ((packed)); + +struct dec_meta_out{ + unsigned int reserved[7]; + unsigned int num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __attribute__ ((packed)); + +/* General meta field to store meta info +locally */ +union meta_data { + struct dec_meta_out meta_out; + struct meta_in meta_in; +} __attribute__ ((packed)); + +struct audwmapro_event { + struct list_head list; + int event_type; + union msm_audio_event_payload payload; +}; + +struct audwmapro_pmem_region { + struct list_head list; + struct file *file; + int fd; + void *vaddr; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + unsigned ref_cnt; +}; + +struct audwmapro_buffer_node { + struct list_head list; + struct msm_audio_aio_buf buf; + unsigned long paddr; + unsigned long token; + void *kvaddr; + union meta_data meta_info; +}; + +struct q6audio; + +struct audwmapro_drv_operations { + void (*out_flush) (struct q6audio *); + void (*in_flush) (struct q6audio *); + int (*fsync)(struct q6audio *); +}; + +#define PCM_BUF_COUNT (2) +/* Buffer with meta */ +#define PCM_BUFSZ_MIN ((4*1024) + sizeof(struct dec_meta_out)) + +/* FRAME_NUM must be a power of two */ +#define FRAME_NUM (2) +#define FRAME_SIZE ((4*1024) + sizeof(struct meta_in)) + +struct q6audio { + atomic_t in_bytes; + atomic_t in_samples; + + struct msm_audio_stream_config str_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + struct msm_audio_wmapro_config wmapro_config; + + struct audio_client *ac; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + struct mutex get_event_lock; + wait_queue_head_t cmd_wait; + wait_queue_head_t write_wait; + wait_queue_head_t event_wait; + spinlock_t dsp_lock; + spinlock_t event_queue_lock; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + struct list_head out_queue; /* queue to retain output buffers */ + struct list_head in_queue; /* queue to retain input buffers */ + struct list_head free_event_queue; + struct list_head event_queue; + struct list_head pmem_region_queue; /* protected by lock */ + struct audwmapro_drv_operations drv_ops; + union msm_audio_event_payload eos_write_payload; + + uint32_t drv_status; + int event_abort; + int eos_rsp; + int eos_flag; + int opened; + int enabled; + int stopped; + int feedback; + int rflush; /* Read flush */ + int wflush; /* Write flush */ +}; + +static int insert_eos_buf(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) { + struct dec_meta_out *eos_buf = buf_node->kvaddr; + eos_buf->num_of_frames = 0xFFFFFFFF; + eos_buf->meta_out_dsp[0].offset_to_frame = 0x0; + eos_buf->meta_out_dsp[0].nflags = AUDWMAPRO_EOS_SET; + return sizeof(struct dec_meta_out) + + sizeof(eos_buf->meta_out_dsp[0]); +} + +/* Routine which updates read buffers of driver/dsp, + for flush operation as DSP output might not have proper + value set */ +static int insert_meta_data(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) { + struct dec_meta_out *meta_data = buf_node->kvaddr; + meta_data->num_of_frames = 0x0; + meta_data->meta_out_dsp[0].offset_to_frame = 0x0; + meta_data->meta_out_dsp[0].nflags = 0x0; + return sizeof(struct dec_meta_out) + + sizeof(meta_data->meta_out_dsp[0]); +} + +static void extract_meta_info(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node, int dir) +{ + if (dir) { /* Read */ + if (audio->buf_cfg.meta_info_enable) + memcpy(&buf_node->meta_info.meta_in, + (char *)buf_node->kvaddr, sizeof(struct meta_in)); + else + memset(&buf_node->meta_info.meta_in, + 0, sizeof(struct meta_in)); + pr_debug("i/p: msw_ts 0x%lx lsw_ts 0x%lx nflags 0x%8x\n", + buf_node->meta_info.meta_in.ntimestamp.highpart, + buf_node->meta_info.meta_in.ntimestamp.lowpart, + buf_node->meta_info.meta_in.nflags); + } else { /* Write */ + memcpy((char *)buf_node->kvaddr, + &buf_node->meta_info.meta_out, + sizeof(struct dec_meta_out)); + pr_debug("o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x\n", + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].msw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].lsw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].nflags); + } +} + +static int audwmapro_pmem_lookup_vaddr(struct q6audio *audio, void *addr, + unsigned long len, struct audwmapro_pmem_region **region) +{ + struct audwmapro_pmem_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) { + /* offset since we could pass vaddr inside a registerd + * pmem buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + pr_aud_err("multiple hits for vaddr %p, len %ld\n", addr, len); + list_for_each_entry(region_elt, &audio->pmem_region_queue, + list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) + pr_aud_err("\t%p, %ld --> %p\n", region_elt->vaddr, + region_elt->len, + (void *)region_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +static unsigned long audwmapro_pmem_fixup(struct q6audio *audio, void *addr, + unsigned long len, int ref_up, void **kvaddr) +{ + struct audwmapro_pmem_region *region; + unsigned long paddr; + int ret; + + ret = audwmapro_pmem_lookup_vaddr(audio, addr, len, ®ion); + if (ret) { + pr_aud_err("lookup (%p, %ld) failed\n", addr, len); + return 0; + } + if (ref_up) + region->ref_cnt++; + else + region->ref_cnt--; + pr_debug("found region %p ref_cnt %d\n", region, region->ref_cnt); + paddr = region->paddr + (addr - region->vaddr); + /* provide kernel virtual address for accessing meta information */ + if (kvaddr) + *kvaddr = (void *) (region->kvaddr + (addr - region->vaddr)); + return paddr; +} + +static void audwmapro_post_event(struct q6audio *audio, int type, + union msm_audio_event_payload payload) +{ + struct audwmapro_event *e_node = NULL; + unsigned long flags; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + + if (!list_empty(&audio->free_event_queue)) { + e_node = list_first_entry(&audio->free_event_queue, + struct audwmapro_event, list); + list_del(&e_node->list); + } else { + e_node = kmalloc(sizeof(struct audwmapro_event), GFP_ATOMIC); + if (!e_node) { + pr_aud_err("No mem to post event %d\n", type); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return; + } + } + + e_node->event_type = type; + e_node->payload = payload; + + list_add_tail(&e_node->list, &audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + wake_up(&audio->event_wait); +} + +static int audwmapro_enable(struct q6audio *audio) +{ + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +static int audwmapro_disable(struct q6audio *audio) +{ + int rc = 0; + if (audio->opened) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s: inbytes[%d] insamples[%d]\n", __func__, + atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + /* Close the session */ + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("Failed to close the session rc=%d\n", rc); + audio->stopped = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + pr_debug("enabled[%d]\n", audio->enabled); + return rc; +} + +static int audwmapro_pause(struct q6audio *audio) +{ + int rc = 0; + + pr_aud_info("%s, enabled = %d\n", __func__, + audio->enabled); + if (audio->enabled) { + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, rc); + + } else + pr_aud_err("%s: Driver not enabled\n", __func__); + return rc; +} + +static int audwmapro_flush(struct q6audio *audio) +{ + int rc; + + if (audio->enabled) { + /* Implicitly issue a pause to the decoder before flushing if + it is not in pause state */ + if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { + rc = audwmapro_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, + rc); + else + audio->drv_status |= ADRV_STATUS_PAUSE; + } + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) + pr_aud_err("%s: flush cmd failed rc=%d\n", __func__, rc); + /* Not in stop state, reenable the stream */ + if (audio->stopped == 0) { + rc = audwmapro_enable(audio); + if (rc) + pr_aud_err("%s:audio re-enable failed\n", __func__); + else { + audio->enabled = 1; + if (audio->drv_status & ADRV_STATUS_PAUSE) + audio->drv_status &= ~ADRV_STATUS_PAUSE; + } + } + } + pr_debug("in_bytes %d\n", atomic_read(&audio->in_bytes)); + pr_debug("in_samples %d\n", atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + return 0; +} + +static void audwmapro_async_read(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) +{ + struct audio_client *ac; + struct audio_aio_read_param param; + int rc; + + pr_debug("%s: Send read buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.buf_len); + ac = audio->ac; + /* Provide address so driver can append nr frames information */ + param.paddr = buf_node->paddr + + sizeof(struct dec_meta_out); + param.len = buf_node->buf.buf_len - + sizeof(struct dec_meta_out); + param.uid = param.paddr; + /* Write command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_read(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +static void audwmapro_async_write(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) +{ + int rc; + struct audio_client *ac; + struct audio_aio_write_param param; + + pr_debug("%s: Send write buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.data_len); + + ac = audio->ac; + /* Offset with appropriate meta */ + param.paddr = buf_node->paddr + sizeof(struct meta_in); + param.len = buf_node->buf.data_len - sizeof(struct meta_in); + param.msw_ts = buf_node->meta_info.meta_in.ntimestamp.highpart; + param.lsw_ts = buf_node->meta_info.meta_in.ntimestamp.lowpart; + /* If no meta_info enaled, indicate no time stamp valid */ + if (audio->buf_cfg.meta_info_enable) + param.flags = 0; + else + param.flags = 0xFF00; + param.uid = param.paddr; + /* Read command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_write(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +/* Write buffer to DSP / Handle Ack from DSP */ +static void audwmapro_async_write_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwmapro_buffer_node *used_buf; + + /* No active flush in progress */ + if (audio->wflush) + return; + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->out_queue)); + used_buf = list_first_entry(&audio->out_queue, + struct audwmapro_buffer_node, list); + if (token == used_buf->token) { + list_del(&used_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + pr_debug("consumed buffer\n"); + event_payload.aio_buf = used_buf->buf; + audwmapro_post_event(audio, AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(used_buf); + if (list_empty(&audio->out_queue) && + (audio->drv_status & ADRV_STATUS_FSYNC)) { + pr_debug("%s: list is empty, reached EOS in\ + Tunnel\n", __func__); + wake_up(&audio->write_wait); + } + } else { + pr_aud_err("expected=%lx ret=%x\n", used_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +/* Read buffer from DSP / Handle Ack from DSP */ +static void audwmapro_async_read_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwmapro_buffer_node *filled_buf; + + /* No active flush in progress */ + if (audio->rflush) + return; + + /* Statistics of read */ + atomic_add(payload[2], &audio->in_bytes); + atomic_add(payload[7], &audio->in_samples); + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->in_queue)); + filled_buf = list_first_entry(&audio->in_queue, + struct audwmapro_buffer_node, list); + if (token == (filled_buf->token)) { + list_del(&filled_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + event_payload.aio_buf = filled_buf->buf; + /* Read done Buffer due to flush/normal condition + after EOS event, so append EOS buffer */ + if (audio->eos_rsp == 0x1) { + event_payload.aio_buf.data_len = + insert_eos_buf(audio, filled_buf); + /* Reset flag back to indicate eos intimated */ + audio->eos_rsp = 0; + } else { + filled_buf->meta_info.meta_out.num_of_frames = + payload[7]; + pr_debug("nr of frames 0x%8x\n", + filled_buf->meta_info.meta_out.num_of_frames); + event_payload.aio_buf.data_len = payload[2] + \ + payload[3] + \ + sizeof(struct dec_meta_out); + extract_meta_info(audio, filled_buf, 0); + audio->eos_rsp = 0; + } + audwmapro_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(filled_buf); + } else { + pr_aud_err("expected=%lx ret=%x\n", filled_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +static void q6_audwmapro_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio *audio = (struct q6audio *)priv; + + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + pr_debug("%s:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n", + __func__, token); + audwmapro_async_write_ack(audio, token, payload); + break; + case ASM_DATA_EVENT_READ_DONE: + pr_debug("%s:ASM_DATA_EVENT_READ_DONE token = 0x%x\n", + __func__, token); + audwmapro_async_read_ack(audio, token, payload); + break; + case ASM_DATA_CMDRSP_EOS: + /* EOS Handle */ + pr_debug("%s:ASM_DATA_CMDRSP_EOS\n", __func__); + if (audio->feedback) { /* Non-Tunnel mode */ + audio->eos_rsp = 1; + /* propagate input EOS i/p buffer, + after receiving DSP acknowledgement */ + if (audio->eos_flag && + (audio->eos_write_payload.aio_buf.buf_addr)) { + audwmapro_post_event(audio, + AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + audio->eos_flag = 0; + } + } else { /* Tunnel mode */ + audio->eos_rsp = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + break; + default: + pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode); + break; + } +} + +/* ------------------- device --------------------- */ +static void audwmapro_async_out_flush(struct q6audio *audio) +{ + struct audwmapro_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + unsigned long flags; + + pr_debug("%s\n", __func__); + /* EOS followed by flush, EOS response not guranteed, free EOS i/p + buffer */ + spin_lock_irqsave(&audio->dsp_lock, flags); + if (audio->eos_flag && (audio->eos_write_payload.aio_buf.buf_addr)) { + pr_debug("%s: EOS followed by flush received,acknowledge eos"\ + " i/p buffer immediately\n", __func__); + audwmapro_post_event(audio, AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audwmapro_buffer_node, list); + list_del(&buf_node->list); + payload.aio_buf = buf_node->buf; + audwmapro_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate WRITE_DONE during flush\n", __func__); + } +} + +static void audwmapro_async_in_flush(struct q6audio *audio) +{ + struct audwmapro_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + + pr_debug("%s\n", __func__); + list_for_each_safe(ptr, next, &audio->in_queue) { + buf_node = list_entry(ptr, struct audwmapro_buffer_node, list); + list_del(&buf_node->list); + /* Forcefull send o/p eos buffer after flush, if no eos response + * received by dsp even after sending eos command */ + if ((audio->eos_rsp != 1) && audio->eos_flag) { + pr_debug("%s: send eos on o/p buffer during flush\n",\ + __func__); + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + audio->eos_flag = 0; + } else { + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_meta_data(audio, buf_node); + } + audwmapro_post_event(audio, AUDIO_EVENT_READ_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate READ_DONE during flush\n", __func__); + } +} + +static void audwmapro_ioport_reset(struct q6audio *audio) +{ + if (audio->drv_status & ADRV_STATUS_AIO_INTF) { + /* If fsync is in progress, make sure + * return value of fsync indicates + * abort due to flush + */ + if (audio->drv_status & ADRV_STATUS_FSYNC) { + pr_debug("fsync in progress\n"); + audio->drv_ops.out_flush(audio); + } else + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + } +} + +static int audwmapro_events_pending(struct q6audio *audio) +{ + unsigned long flags; + int empty; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + empty = !list_empty(&audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return empty || audio->event_abort; +} + +static void audwmapro_reset_event_queue(struct q6audio *audio) +{ + unsigned long flags; + struct audwmapro_event *drv_evt; + struct list_head *ptr, *next; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + list_for_each_safe(ptr, next, &audio->event_queue) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwmapro_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + list_for_each_safe(ptr, next, &audio->free_event_queue) { + drv_evt = list_first_entry(&audio->free_event_queue, + struct audwmapro_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + return; +} + +static long audwmapro_process_event_req(struct q6audio *audio, + void __user *arg) +{ + long rc; + struct msm_audio_event usr_evt; + struct audwmapro_event *drv_evt = NULL; + int timeout; + unsigned long flags; + + if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) + return -EFAULT; + + timeout = (int)usr_evt.timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout(audio->event_wait, + audwmapro_events_pending + (audio), + msecs_to_jiffies + (timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible(audio->event_wait, + audwmapro_events_pending(audio)); + } + + if (rc < 0) + return rc; + + if (audio->event_abort) { + audio->event_abort = 0; + return -ENODEV; + } + + rc = 0; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + if (!list_empty(&audio->event_queue)) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwmapro_event, list); + list_del(&drv_evt->list); + } + if (drv_evt) { + usr_evt.event_type = drv_evt->event_type; + usr_evt.event_payload = drv_evt->payload; + list_add_tail(&drv_evt->list, &audio->free_event_queue); + } else { + pr_aud_err("Unexpected path\n"); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return -EPERM; + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) { + pr_debug("posted AUDIO_EVENT_WRITE_DONE to user\n"); + mutex_lock(&audio->write_lock); + audwmapro_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->write_lock); + } else if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) { + pr_debug("posted AUDIO_EVENT_READ_DONE to user\n"); + mutex_lock(&audio->read_lock); + audwmapro_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->read_lock); + } + + /* Some read buffer might be held up in DSP,release all + Once EOS indicated*/ + if (audio->eos_rsp && !list_empty(&audio->in_queue)) { + pr_debug("Send flush command to release read buffers"\ + " held up in DSP\n"); + audwmapro_flush(audio); + } + + if (copy_to_user(arg, &usr_evt, sizeof(usr_evt))) + rc = -EFAULT; + + return rc; +} + +static int audwmapro_pmem_check(struct q6audio *audio, + void *vaddr, unsigned long len) +{ + struct audwmapro_pmem_region *region_elt; + struct audwmapro_pmem_region t = {.vaddr = vaddr, .len = len }; + + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + pr_aud_err("region (vaddr %p len %ld)" + " clashes with registered region" + " (vaddr %p paddr %p len %ld)\n", + vaddr, len, + region_elt->vaddr, + (void *)region_elt->paddr, region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int audwmapro_pmem_add(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + unsigned long paddr, kvaddr, len; + struct file *file; + struct audwmapro_pmem_region *region; + int rc = -EINVAL; + + pr_debug("%s\n", __func__); + region = kmalloc(sizeof(*region), GFP_KERNEL); + + if (!region) { + rc = -ENOMEM; + goto end; + } + + if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { + kfree(region); + goto end; + } + + rc = audwmapro_pmem_check(audio, info->vaddr, len); + if (rc < 0) { + put_pmem_file(file); + kfree(region); + goto end; + } + + region->vaddr = info->vaddr; + region->fd = info->fd; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->file = file; + region->ref_cnt = 0; + pr_debug("add region paddr %lx vaddr %p, len %lu kvaddr %lx\n", + region->paddr, region->vaddr, region->len, region->kvaddr); + list_add_tail(®ion->list, &audio->pmem_region_queue); + + rc = q6asm_memory_map(audio->ac, (uint32_t) paddr, IN, (uint32_t) len, + 1); + if (rc < 0) + pr_aud_err("%s: memory map failed\n", __func__); +end: + return rc; +} + +static int audwmapro_pmem_remove(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + struct audwmapro_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_debug("info fd %d vaddr %p\n", info->fd, info->vaddr); + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwmapro_pmem_region, list); + + if ((region->fd == info->fd) && + (region->vaddr == info->vaddr)) { + if (region->ref_cnt) { + pr_debug("region %p in use ref_cnt %d\n", + region, region->ref_cnt); + break; + } + pr_debug("remove region fd %d vaddr %p\n", + info->fd, info->vaddr); + rc = q6asm_memory_unmap(audio->ac, + (uint32_t) region->paddr, IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + rc = 0; + break; + } + } + + return rc; +} + +/* audio -> lock must be held at this point */ +static int audwmapro_aio_buf_add(struct q6audio *audio, unsigned dir, + void __user *arg) +{ + unsigned long flags; + struct audwmapro_buffer_node *buf_node; + + buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) + return -ENOMEM; + + if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { + kfree(buf_node); + return -EFAULT; + } + + pr_debug("node %p dir %x buf_addr %p buf_len %d data_len \ + %d\n", buf_node, dir, buf_node->buf.buf_addr, + buf_node->buf.buf_len, buf_node->buf.data_len); + + buf_node->paddr = audwmapro_pmem_fixup(audio, buf_node->buf.buf_addr, + buf_node->buf.buf_len, 1, + &buf_node->kvaddr); + if (dir) { + /* write */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (!audio->feedback && !buf_node->buf.data_len)) { + kfree(buf_node); + return -EINVAL; + } + extract_meta_info(audio, buf_node, 1); + /* Not a EOS buffer */ + if (!(buf_node->meta_info.meta_in.nflags & AUDWMAPRO_EOS_SET)) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwmapro_async_write(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->out_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + if (buf_node->meta_info.meta_in.nflags & AUDWMAPRO_EOS_SET) { + if (!audio->wflush) { + pr_debug("%s:Send EOS cmd at i/p\n", __func__); + /* Driver will forcefully post writedone event + once eos ack recived from DSP*/ + audio->eos_write_payload.aio_buf =\ + buf_node->buf; + audio->eos_flag = 1; + audio->eos_rsp = 0; + q6asm_cmd(audio->ac, CMD_EOS); + kfree(buf_node); + } else { /* Flush in progress, send back i/p EOS buffer + as is */ + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + audwmapro_post_event(audio, + AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(buf_node); + } + } + } else { + /* read */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (buf_node->buf.buf_len < PCM_BUFSZ_MIN)) { + kfree(buf_node); + return -EINVAL; + } + /* No EOS reached */ + if (!audio->eos_rsp) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwmapro_async_read(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->in_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + /* EOS reached at input side fake all upcoming read buffer to + indicate the same */ + else { + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + event_payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + pr_debug("%s: propagate READ_DONE as EOS done\n",\ + __func__); + audwmapro_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(buf_node); + } + } + return 0; +} + +/* TBD: Only useful in tunnel-mode */ +int audwmapro_async_fsync(struct q6audio *audio) +{ + int rc = 0; + + /* Blocking client sends more data */ + mutex_lock(&audio->lock); + audio->drv_status |= ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + pr_aud_info("%s:\n", __func__); + + mutex_lock(&audio->write_lock); + audio->eos_rsp = 0; + + rc = wait_event_interruptible(audio->write_wait, + (list_empty(&audio->out_queue)) || + audio->wflush || audio->stopped); + + if (rc < 0) { + pr_aud_err("%s: wait event for list_empty failed, rc = %d\n", + __func__, rc); + goto done; + } + + rc = q6asm_cmd(audio->ac, CMD_EOS); + + if (rc < 0) + pr_aud_err("%s: q6asm_cmd failed, rc = %d", __func__, rc); + + rc = wait_event_interruptible(audio->write_wait, + (audio->eos_rsp || audio->wflush || + audio->stopped)); + + if (rc < 0) { + pr_aud_err("%s: wait event for eos_rsp failed, rc = %d\n", __func__, + rc); + goto done; + } + + if (audio->eos_rsp == 1) { + rc = audwmapro_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + + if (audio->stopped || audio->wflush) + rc = -EBUSY; + +done: + mutex_unlock(&audio->write_lock); + mutex_lock(&audio->lock); + audio->drv_status &= ~ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + return rc; +} + +int audwmapro_fsync(struct file *file, int datasync) +{ + struct q6audio *audio = file->private_data; + + if (!audio->enabled || audio->feedback) + return -EINVAL; + + return audio->drv_ops.fsync(audio); +} + +static void audwmapro_reset_pmem_region(struct q6audio *audio) +{ + struct audwmapro_pmem_region *region; + struct list_head *ptr, *next; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwmapro_pmem_region, list); + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + } + + return; +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t audwmapro_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t audwmapro_debug_read(struct file *file, char __user * buf, + size_t count, loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct q6audio *audio = file->private_data; + + mutex_lock(&audio->lock); + n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); + n += scnprintf(buffer + n, debug_bufmax - n, + "enabled %d\n", audio->enabled); + n += scnprintf(buffer + n, debug_bufmax - n, + "stopped %d\n", audio->stopped); + n += scnprintf(buffer + n, debug_bufmax - n, + "feedback %d\n", audio->feedback); + mutex_unlock(&audio->lock); + /* Following variables are only useful for debugging when + * when playback halts unexpectedly. Thus, no mutual exclusion + * enforced + */ + n += scnprintf(buffer + n, debug_bufmax - n, + "wflush %d\n", audio->wflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "rflush %d\n", audio->rflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "inqueue empty %d\n", list_empty(&audio->in_queue)); + n += scnprintf(buffer + n, debug_bufmax - n, + "outqueue empty %d\n", list_empty(&audio->out_queue)); + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static const struct file_operations audwmapro_debug_fops = { + .read = audwmapro_debug_read, + .open = audwmapro_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return rc; + } + + if (cmd == AUDIO_GET_EVENT) { + pr_debug("AUDIO_GET_EVENT\n"); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audwmapro_process_event_req(audio, + (void __user *)arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + return rc; + } + + if (cmd == AUDIO_ASYNC_WRITE) { + mutex_lock(&audio->write_lock); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else { + if (audio->enabled) + rc = audwmapro_aio_buf_add(audio, 1, + (void __user *)arg); + else + rc = -EPERM; + } + mutex_unlock(&audio->write_lock); + return rc; + } + + if (cmd == AUDIO_ASYNC_READ) { + mutex_lock(&audio->read_lock); + if ((audio->feedback) && (audio->enabled)) + rc = audwmapro_aio_buf_add(audio, 0, + (void __user *)arg); + else + rc = -EPERM; + mutex_unlock(&audio->read_lock); + return rc; + } + + if (cmd == AUDIO_ABORT_GET_EVENT) { + audio->event_abort = 1; + wake_up(&audio->event_wait); + return 0; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: { + struct asm_wmapro_cfg wmapro_cfg; + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_aud_err("pcm output block config failed\n"); + break; + } + } + if ((audio->wmapro_config.formattag == 0x162) || + (audio->wmapro_config.formattag == 0x166)) { + wmapro_cfg.format_tag = audio->wmapro_config.formattag; + } else { + pr_aud_err("%s:AUDIO_START failed: formattag = %d\n", + __func__, audio->wmapro_config.formattag); + rc = -EINVAL; + break; + } + if ((audio->wmapro_config.numchannels == 1) || + (audio->wmapro_config.numchannels == 2)) { + wmapro_cfg.ch_cfg = audio->wmapro_config.numchannels; + } else { + pr_aud_err("%s:AUDIO_START failed: channels = %d\n", + __func__, audio->wmapro_config.numchannels); + rc = -EINVAL; + break; + } + if ((audio->wmapro_config.samplingrate <= 48000) || + (audio->wmapro_config.samplingrate > 0)) { + wmapro_cfg.sample_rate = + audio->wmapro_config.samplingrate; + } else { + pr_aud_err("%s:AUDIO_START failed: sample_rate = %d\n", + __func__, audio->wmapro_config.samplingrate); + rc = -EINVAL; + break; + } + wmapro_cfg.avg_bytes_per_sec = + audio->wmapro_config.avgbytespersecond; + if ((audio->wmapro_config.asfpacketlength <= 13376) || + (audio->wmapro_config.asfpacketlength > 0)) { + wmapro_cfg.block_align = + audio->wmapro_config.asfpacketlength; + } else { + pr_aud_err("%s:AUDIO_START failed: block_align = %d\n", + __func__, audio->wmapro_config.asfpacketlength); + rc = -EINVAL; + break; + } + if (audio->wmapro_config.validbitspersample == 16) { + wmapro_cfg.valid_bits_per_sample = + audio->wmapro_config.validbitspersample; + } else { + pr_aud_err("%s:AUDIO_START failed: bitspersample = %d\n", + __func__, + audio->wmapro_config.validbitspersample); + rc = -EINVAL; + break; + } + if ((audio->wmapro_config.channelmask == 4) || + (audio->wmapro_config.channelmask == 3)) { + wmapro_cfg.ch_mask = audio->wmapro_config.channelmask; + } else { + pr_aud_err("%s:AUDIO_START failed: channel_mask = %d\n", + __func__, audio->wmapro_config.channelmask); + rc = -EINVAL; + break; + } + wmapro_cfg.encode_opt = audio->wmapro_config.encodeopt; + wmapro_cfg.adv_encode_opt = + audio->wmapro_config.advancedencodeopt; + wmapro_cfg.adv_encode_opt2 = + audio->wmapro_config.advancedencodeopt2; + /* Configure Media format block */ + rc = q6asm_media_format_block_wmapro(audio->ac, &wmapro_cfg); + if (rc < 0) { + pr_aud_err("cmd media format block failed\n"); + break; + } + rc = audwmapro_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + case AUDIO_STOP: { + pr_debug("AUDIO_STOP\n"); + audio->stopped = 1; + audwmapro_flush(audio); + audio->enabled = 0; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + if (rc < 0) { + pr_aud_err("Audio Stop procedure failed rc=%d\n", rc); + break; + } + break; + } + case AUDIO_PAUSE: { + pr_debug("AUDIO_PAUSE %ld\n", arg); + if (arg == 1) { + rc = audwmapro_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause FAILED rc=%d\n", __func__, + rc); + audio->drv_status |= ADRV_STATUS_PAUSE; + } else if (arg == 0) { + if (audio->drv_status & ADRV_STATUS_PAUSE) { + rc = audwmapro_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", + __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + } + break; + } + case AUDIO_FLUSH: { + pr_debug("AUDIO_FLUSH\n"); + audio->rflush = 1; + audio->wflush = 1; + /* Flush DSP */ + rc = audwmapro_flush(audio); + /* Flush input / Output buffer in software*/ + audwmapro_ioport_reset(audio); + if (rc < 0) { + pr_aud_err("AUDIO_FLUSH interrupted\n"); + rc = -EINTR; + } else { + audio->rflush = 0; + audio->wflush = 0; + } + audio->eos_flag = 0; + audio->eos_rsp = 0; + break; + } + case AUDIO_REGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_REGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwmapro_pmem_add(audio, &info); + break; + } + case AUDIO_DEREGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_DEREGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwmapro_pmem_remove(audio, &info); + break; + } + case AUDIO_GET_WMAPRO_CONFIG: { + if (copy_to_user((void *)arg, &audio->wmapro_config, + sizeof(struct msm_audio_wmapro_config))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_WMAPRO_CONFIG: { + if (copy_from_user(&audio->wmapro_config, (void *)arg, + sizeof(struct msm_audio_wmapro_config))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + pr_debug("GET STREAM CFG %d %d\n", cfg.buffer_size, + cfg.buffer_count); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + pr_debug("SET STREAM CONFIG\n"); + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + rc = 0; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config cfg; + if (copy_to_user((void *)arg, &audio->pcm_cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (copy_from_user(&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (audio->feedback != NON_TUNNEL_MODE) { + pr_aud_err("Not sufficient permission to" + "change the playback mode\n"); + rc = -EACCES; + break; + } + if ((config.buffer_count > PCM_BUF_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + audio->pcm_cfg.buffer_count = config.buffer_count; + audio->pcm_cfg.buffer_size = config.buffer_size; + audio->pcm_cfg.channel_count = config.channel_count; + audio->pcm_cfg.sample_rate = config.sample_rate; + rc = 0; + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]", __func__, + audio->ac->session, cfg.meta_info_enable); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) + rc = -EFAULT; + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *)arg, &audio->ac->session, + sizeof(unsigned short))) { + rc = -EFAULT; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct q6audio *audio = file->private_data; + mutex_lock(&audio->lock); + audwmapro_disable(audio); + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + audwmapro_reset_pmem_region(audio); + audio->event_abort = 1; + wake_up(&audio->event_wait); + audwmapro_reset_event_queue(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->lock); + mutex_destroy(&audio->lock); + mutex_destroy(&audio->read_lock); + mutex_destroy(&audio->write_lock); + mutex_destroy(&audio->get_event_lock); +#ifdef CONFIG_DEBUG_FS + if (audio->dentry) + debugfs_remove(audio->dentry); +#endif + kfree(audio); + pr_aud_info("%s: wmapro decoder success\n", __func__); + return 0; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio *audio = NULL; + int rc = 0; + int i; + struct audwmapro_event *e_node = NULL; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_wmapro_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("Could not allocate memory for wma decode driver\n"); + return -ENOMEM; + } + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.sample_rate = 48000; + audio->pcm_cfg.channel_count = 2; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audwmapro_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + /* Only AIO interface */ + if (file->f_flags & O_NONBLOCK) { + pr_debug("set to aio interface\n"); + audio->drv_status |= ADRV_STATUS_AIO_INTF; + audio->drv_ops.out_flush = audwmapro_async_out_flush; + audio->drv_ops.in_flush = audwmapro_async_in_flush; + audio->drv_ops.fsync = audwmapro_async_fsync; + q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); + } else { + pr_aud_err("SIO interface not supported\n"); + rc = -EACCES; + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_WMA_V10PRO); + if (rc < 0) { + pr_aud_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open WMA decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_WMA_V10PRO); + if (rc < 0) { + pr_aud_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_aud_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + /* Initialize all locks of audio instance */ + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + mutex_init(&audio->get_event_lock); + spin_lock_init(&audio->dsp_lock); + spin_lock_init(&audio->event_queue_lock); + init_waitqueue_head(&audio->cmd_wait); + init_waitqueue_head(&audio->write_wait); + init_waitqueue_head(&audio->event_wait); + INIT_LIST_HEAD(&audio->out_queue); + INIT_LIST_HEAD(&audio->in_queue); + INIT_LIST_HEAD(&audio->pmem_region_queue); + INIT_LIST_HEAD(&audio->free_event_queue); + INIT_LIST_HEAD(&audio->event_queue); + + audio->drv_ops.out_flush(audio); + audio->opened = 1; + file->private_data = audio; + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_wmapro_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audwmapro_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + for (i = 0; i < AUDWMAPRO_EVENT_NUM; i++) { + e_node = kmalloc(sizeof(struct audwmapro_event), GFP_KERNEL); + if (e_node) + list_add_tail(&e_node->list, &audio->free_event_queue); + else { + pr_aud_err("event pkt alloc failed\n"); + break; + } + } + pr_aud_info("%s:wmapro decoder open success, session_id = %d\n", __func__, + audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_wmapro_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audwmapro_fsync, +}; + +struct miscdevice audwmapro_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_wmapro", + .fops = &audio_wmapro_fops, +}; + +static int __init audio_wmapro_init(void) +{ + return misc_register(&audwmapro_misc); +} + +device_initcall(audio_wmapro_init); diff --git a/arch/arm/mach-msm/qdsp6v3/board-msm8x60-audio.c b/arch/arm/mach-msm/qdsp6v3/board-msm8x60-audio.c new file mode 100644 index 00000000..00ba57e5 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/board-msm8x60-audio.c @@ -0,0 +1,1901 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "timpani_profile_8x60.h" + +#ifdef CONFIG_MACH_VIGOR +#include "timpani_profile_8x60_vigor.h" +#else +#include "timpani_profile_8x60_lead.h" +#endif + +#include +#include "snddev_mi2s.h" +#include + +#include "snddev_virtual.h" + +#ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_hsed_config; +static void snddev_hsed_config_modify_setting(int type); +static void snddev_hsed_config_restore_setting(void); +#endif + +/* define the value for BT_SCO */ + + +#define SNDDEV_GPIO_MIC2_ANCR_SEL 294 + +static struct q6v2audio_analog_ops default_audio_ops; +static struct q6v2audio_analog_ops *audio_ops = &default_audio_ops; + +void speaker_enable(int en) +{ + if (audio_ops->speaker_enable) + audio_ops->speaker_enable(en); +} + +void headset_enable(int en) +{ + if (audio_ops->headset_enable) + audio_ops->headset_enable(en); +} + +void handset_enable(int en) +{ + if (audio_ops->handset_enable) + audio_ops->handset_enable(en); +} + +void headset_speaker_enable(int en) +{ + if (audio_ops->headset_speaker_enable) + audio_ops->headset_speaker_enable(en); +} + +void int_mic_enable(int en) +{ + if (audio_ops->int_mic_enable) + audio_ops->int_mic_enable(en); +} + +void back_mic_enable(int en) +{ + if (audio_ops->back_mic_enable) + audio_ops->back_mic_enable(en); +} + +void ext_mic_enable(int en) +{ + if (audio_ops->ext_mic_enable) + audio_ops->ext_mic_enable(en); +} + +void stereo_mic_enable(int en) +{ + if (audio_ops->stereo_mic_enable) + audio_ops->stereo_mic_enable(en); +} + +void usb_headset_enable(int en) +{ + if (audio_ops->usb_headset_enable) + audio_ops->usb_headset_enable(en); +} + +void fm_headset_enable(int en) +{ + if (audio_ops->fm_headset_enable) + audio_ops->fm_headset_enable(en); +} + +void fm_speaker_enable(int en) +{ + if (audio_ops->fm_speaker_enable) + audio_ops->fm_speaker_enable(en); +} + +void voltage_on(int on) +{ + if (audio_ops->voltage_on) + audio_ops->voltage_on(on); +} + +static struct regulator *s3; +static struct regulator *mvs; + +static void msm_snddev_enable_dmic_power(void) +{ + int ret; + + pr_aud_err("%s", __func__); + s3 = regulator_get(NULL, "8058_s3"); + if (IS_ERR(s3)) + return; + + ret = regulator_set_voltage(s3, 1800000, 1800000); + if (ret) { + pr_aud_err("%s: error setting voltage\n", __func__); + goto fail_s3; + } + + ret = regulator_enable(s3); + if (ret) { + pr_aud_err("%s: error enabling regulator\n", __func__); + goto fail_s3; + } + + mvs = regulator_get(NULL, "8901_mvs0"); + if (IS_ERR(mvs)) + goto fail_mvs0_get; + + ret = regulator_enable(mvs); + if (ret) { + pr_aud_err("%s: error setting regulator\n", __func__); + goto fail_mvs0_enable; + } + + stereo_mic_enable(1); + return; + +fail_mvs0_enable: + regulator_put(mvs); + mvs = NULL; +fail_mvs0_get: + regulator_disable(s3); +fail_s3: + regulator_put(s3); + s3 = NULL; +} + +static void msm_snddev_disable_dmic_power(void) +{ + int ret; + pr_aud_err("%s", __func__); + + if (mvs) { + ret = regulator_disable(mvs); + if (ret < 0) + pr_aud_err("%s: error disabling vreg mvs\n", __func__); + regulator_put(mvs); + mvs = NULL; + } + + if (s3) { + ret = regulator_disable(s3); + if (ret < 0) + pr_aud_err("%s: error disabling regulator s3\n", __func__); + regulator_put(s3); + s3 = NULL; + } + stereo_mic_enable(0); + +} + +static void msm_snddev_dmic_power(int en) +{ + pr_aud_err("%s", __func__); + if (en) + msm_snddev_enable_dmic_power(); + else + msm_snddev_disable_dmic_power(); +} + +/* GPIO_CLASS_D0_EN */ +#define SNDDEV_GPIO_CLASS_D0_EN 227 + +/* GPIO_CLASS_D1_EN */ +#define SNDDEV_GPIO_CLASS_D1_EN 229 + +#define PM8901_MPP_3 (2) /* PM8901 MPP starts from 0 */ +static void config_class_d1_gpio(int enable) +{ + int rc; + + if (enable) { + rc = gpio_request(SNDDEV_GPIO_CLASS_D1_EN, "CLASSD1_EN"); + if (rc) { + pr_aud_err("%s: spkr pamp gpio %d request" + "failed\n", __func__, SNDDEV_GPIO_CLASS_D1_EN); + return; + } + gpio_direction_output(SNDDEV_GPIO_CLASS_D1_EN, 1); + gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D1_EN, 1); + } else { + gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D1_EN, 0); + gpio_free(SNDDEV_GPIO_CLASS_D1_EN); + } +} + +static void config_class_d0_gpio(int enable) +{ + int rc; + + if (enable) { + rc = pm8901_mpp_config_digital_out(PM8901_MPP_3, + PM8901_MPP_DIG_LEVEL_MSMIO, 1); + + if (rc) { + pr_aud_err("%s: CLASS_D0_EN failed\n", __func__); + return; + } + + rc = gpio_request(SNDDEV_GPIO_CLASS_D0_EN, "CLASSD0_EN"); + + if (rc) { + pr_aud_err("%s: spkr pamp gpio pm8901 mpp3 request" + "failed\n", __func__); + pm8901_mpp_config_digital_out(PM8901_MPP_3, + PM8901_MPP_DIG_LEVEL_MSMIO, 0); + return; + } + + gpio_direction_output(SNDDEV_GPIO_CLASS_D0_EN, 1); + gpio_set_value(SNDDEV_GPIO_CLASS_D0_EN, 1); + + } else { + pm8901_mpp_config_digital_out(PM8901_MPP_3, + PM8901_MPP_DIG_LEVEL_MSMIO, 0); + gpio_set_value(SNDDEV_GPIO_CLASS_D0_EN, 0); + gpio_free(SNDDEV_GPIO_CLASS_D0_EN); + } +} + +void msm_snddev_poweramp_on(void) +{ + + pr_debug("%s: enable stereo spkr amp\n", __func__); + config_class_d0_gpio(1); + config_class_d1_gpio(1); +} + +void msm_snddev_poweramp_off(void) +{ + + pr_debug("%s: disable stereo spkr amp\n", __func__); + config_class_d0_gpio(0); + config_class_d1_gpio(0); + msleep(30); +} + + +static void msm_snddev_enable_dmic_sec_power(void) +{ + pr_aud_err("%s", __func__); + msm_snddev_enable_dmic_power(); + +#ifdef CONFIG_PMIC8058_OTHC + pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_ALWAYS_ON); +#endif +} + +static void msm_snddev_disable_dmic_sec_power(void) +{ + pr_aud_err("%s", __func__); + msm_snddev_disable_dmic_power(); + +#ifdef CONFIG_PMIC8058_OTHC + pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_OFF); +#endif +} + +static void msm_snddev_dmic_sec_power(int en) +{ + pr_aud_err("%s", __func__); + if (en) + msm_snddev_enable_dmic_sec_power(); + else + msm_snddev_disable_dmic_sec_power(); +} + +static struct adie_codec_action_unit iearpiece_48KHz_osr256_actions[] = + EAR_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iearpiece_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iearpiece_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(iearpiece_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iearpiece_profile = { + .path_type = ADIE_CODEC_RX, + .settings = iearpiece_settings, + .setting_sz = ARRAY_SIZE(iearpiece_settings), +}; + +static struct snddev_icodec_data snddev_iearpiece_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "handset_rx", + .copp_id = 0, + .profile = &iearpiece_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = handset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_RECEIVER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_RECEIVER, + .default_aic3254_id = PLAYBACK_RECEIVER, +}; + +static struct platform_device msm_iearpiece_device = { + .name = "snddev_icodec", + .id = 0, + .dev = { .platform_data = &snddev_iearpiece_data }, +}; + +static struct adie_codec_action_unit iearpiece_hac_48KHz_osr256_actions[] = + EAR_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iearpiece_hac_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iearpiece_hac_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(iearpiece_hac_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iearpiece_hac_profile = { + .path_type = ADIE_CODEC_RX, + .settings = iearpiece_hac_settings, + .setting_sz = ARRAY_SIZE(iearpiece_hac_settings), +}; + +static struct snddev_icodec_data snddev_ihac_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "hac_rx", + .copp_id = 0, + .profile = &iearpiece_hac_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = handset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_RECEIVER, + .aic3254_voc_id = HAC, + .default_aic3254_id = PLAYBACK_RECEIVER, +}; + +static struct platform_device msm_ihac_device = { + .name = "snddev_icodec", + .id = 38, + .dev = { .platform_data = &snddev_ihac_data }, +}; + +static struct adie_codec_action_unit imic_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry imic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = imic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(imic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile imic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = imic_settings, + .setting_sz = ARRAY_SIZE(imic_settings), +}; + +static struct snddev_icodec_data snddev_imic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "handset_tx", + .copp_id = 1, + .profile = &imic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECOGNITION_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECOGNITION_IMIC, +}; + +static struct platform_device msm_imic_device = { + .name = "snddev_icodec", + .id = 1, + .dev = { .platform_data = &snddev_imic_data }, +}; + +static struct snddev_icodec_data snddev_nomic_headset_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "nomic_headset_tx", + .copp_id = 1, + .profile = &imic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_HEADSET, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_nomic_headset_tx_device = { + .name = "snddev_icodec", + .id = 40, + .dev = { .platform_data = &snddev_nomic_headset_data }, +}; + +static struct adie_codec_action_unit headset_ab_cpls_48KHz_osr256_actions[] = + HEADSET_AB_CPLS_48000_OSR_256; + +static struct adie_codec_hwsetting_entry headset_ab_cpls_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = headset_ab_cpls_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_ab_cpls_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_ab_cpls_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_ab_cpls_settings, + .setting_sz = ARRAY_SIZE(headset_ab_cpls_settings), +}; + +static struct snddev_icodec_data snddev_ihs_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_stereo_rx", + .copp_id = 0, + .profile = &headset_ab_cpls_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_EMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_headset_stereo_device = { + .name = "snddev_icodec", + .id = 34, + .dev = { .platform_data = &snddev_ihs_stereo_rx_data }, +}; + +static struct snddev_icodec_data snddev_nomic_ihs_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "nomic_headset_stereo_rx", + .copp_id = 0, + .profile = &headset_ab_cpls_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_nomic_headset_stereo_device = { + .name = "snddev_icodec", + .id = 39, + .dev = { .platform_data = &snddev_nomic_ihs_stereo_rx_data }, +}; + +static struct adie_codec_action_unit headset_anc_48KHz_osr256_actions[] = + ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256; + +static struct adie_codec_hwsetting_entry headset_anc_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = headset_anc_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_anc_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_anc_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_anc_settings, + .setting_sz = ARRAY_SIZE(headset_anc_settings), +}; + +static struct snddev_icodec_data snddev_anc_headset_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE | SNDDEV_CAP_ANC), + .name = "anc_headset_stereo_rx", + .copp_id = PRIMARY_I2S_RX, + .profile = &headset_anc_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, +}; + +static struct platform_device msm_anc_headset_device = { + .name = "snddev_icodec", + .id = 51, + .dev = { .platform_data = &snddev_anc_headset_data }, +}; + +static struct adie_codec_action_unit ispkr_stereo_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_stereo_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ispkr_stereo_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_stereo_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_stereo_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_stereo_settings, + .setting_sz = ARRAY_SIZE(ispkr_stereo_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_stereo_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "speaker_stereo_rx", + .copp_id = 0, + .profile = &ispkr_stereo_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_stereo_device = { + .name = "snddev_icodec", + .id = 2, + .dev = { .platform_data = &snddev_ispkr_stereo_data }, +}; + +static struct adie_codec_action_unit idmic_mono_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry idmic_mono_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = idmic_mono_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(idmic_mono_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile idmic_mono_profile = { + .path_type = ADIE_CODEC_TX, + .settings = idmic_mono_settings, + .setting_sz = ARRAY_SIZE(idmic_mono_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "speaker_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &idmic_mono_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_SPEAKER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_ispkr_mic_device = { + .name = "snddev_icodec", + .id = 3, + .dev = { .platform_data = &snddev_ispkr_mic_data }, +}; + +static struct adie_codec_action_unit handset_dual_mic_endfire_8KHz_osr256_actions[] = + DMIC1_PRI_STEREO_8000_OSR_256; + +static struct adie_codec_action_unit spk_dual_mic_endfire_8KHz_osr256_actions[] = + DMIC1_PRI_STEREO_8000_OSR_256; + +static struct adie_codec_hwsetting_entry handset_dual_mic_endfire_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = handset_dual_mic_endfire_8KHz_osr256_actions, + .action_sz = ARRAY_SIZE(handset_dual_mic_endfire_8KHz_osr256_actions), + } +}; + +static struct adie_codec_hwsetting_entry spk_dual_mic_endfire_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = spk_dual_mic_endfire_8KHz_osr256_actions, + .action_sz = ARRAY_SIZE(spk_dual_mic_endfire_8KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile handset_dual_mic_endfire_profile = { + .path_type = ADIE_CODEC_TX, + .settings = handset_dual_mic_endfire_settings, + .setting_sz = ARRAY_SIZE(handset_dual_mic_endfire_settings), +}; + +static struct adie_codec_dev_profile spk_dual_mic_endfire_profile = { + .path_type = ADIE_CODEC_TX, + .settings = spk_dual_mic_endfire_settings, + .setting_sz = ARRAY_SIZE(spk_dual_mic_endfire_settings), +}; + +static struct snddev_icodec_data snddev_dual_mic_endfire_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "handset_dual_mic_endfire_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &handset_dual_mic_endfire_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_power, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_hs_dual_mic_endfire_device = { + .name = "snddev_icodec", + .id = 14, + .dev = { .platform_data = &snddev_dual_mic_endfire_data }, +}; + +static struct snddev_icodec_data snddev_dual_mic_spkr_endfire_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "speaker_dual_mic_endfire_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &spk_dual_mic_endfire_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_power, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_SPEAKER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_spkr_dual_mic_endfire_device = { + .name = "snddev_icodec", + .id = 15, + .dev = { .platform_data = &snddev_dual_mic_spkr_endfire_data }, +}; + +static struct adie_codec_action_unit dual_mic_broadside_8osr256_actions[] = + HS_DMIC2_STEREO_8000_OSR_256; + +static struct adie_codec_hwsetting_entry dual_mic_broadside_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = dual_mic_broadside_8osr256_actions, + .action_sz = ARRAY_SIZE(dual_mic_broadside_8osr256_actions), + } +}; + +static struct adie_codec_dev_profile dual_mic_broadside_profile = { + .path_type = ADIE_CODEC_TX, + .settings = dual_mic_broadside_settings, + .setting_sz = ARRAY_SIZE(dual_mic_broadside_settings), +}; + +static struct snddev_icodec_data snddev_hs_dual_mic_broadside_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "handset_dual_mic_broadside_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &dual_mic_broadside_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_sec_power, +}; + +static struct platform_device msm_hs_dual_mic_broadside_device = { + .name = "snddev_icodec", + .id = 21, + .dev = { .platform_data = &snddev_hs_dual_mic_broadside_data }, +}; + +static struct snddev_icodec_data snddev_spkr_dual_mic_broadside_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "speaker_dual_mic_broadside_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &dual_mic_broadside_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_sec_power, +}; + +static struct platform_device msm_spkr_dual_mic_broadside_device = { + .name = "snddev_icodec", + .id = 18, + .dev = { .platform_data = &snddev_spkr_dual_mic_broadside_data }, +}; + +static struct snddev_hdmi_data snddev_hdmi_stereo_rx_data = { + .capability = SNDDEV_CAP_RX , + .name = "hdmi_stereo_rx", + .copp_id = HDMI_RX, + .channel_mode = 0, + .default_sample_rate = 48000, +}; + +static struct platform_device msm_snddev_hdmi_stereo_rx_device = { + .name = "snddev_hdmi", + .id = 0, + .dev = { .platform_data = &snddev_hdmi_stereo_rx_data }, +}; + +static struct snddev_mi2s_data snddev_mi2s_fm_tx_data = { + .capability = SNDDEV_CAP_TX , + .name = "fmradio_stereo_tx", + .copp_id = MI2S_TX, + .channel_mode = 2, /* stereo */ + .sd_lines = MI2S_SD3, /* sd3 */ + .sample_rate = 48000, +}; + +static struct platform_device msm_mi2s_fm_tx_device = { + .name = "snddev_mi2s", + .id = 0, + .dev = { .platform_data = &snddev_mi2s_fm_tx_data }, +}; + +static struct adie_codec_action_unit ifmradio_speaker_osr256_actions[] = + AUXPGA_SPEAKER_RX; + +static struct adie_codec_hwsetting_entry ifmradio_speaker_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ifmradio_speaker_osr256_actions, + .action_sz = ARRAY_SIZE(ifmradio_speaker_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ifmradio_speaker_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ifmradio_speaker_settings, + .setting_sz = ARRAY_SIZE(ifmradio_speaker_settings), +}; + +static struct snddev_mi2s_data snddev_mi2s_fm_rx_data = { + .capability = SNDDEV_CAP_RX , + .name = "fmradio_stereo_rx", + .copp_id = MI2S_RX, + .channel_mode = 2, /* stereo */ + .sd_lines = MI2S_SD3, /* sd3 */ + .sample_rate = 48000, +}; + +static struct platform_device msm_mi2s_fm_rx_device = { + .name = "snddev_mi2s", + .id = 1, + .dev = { .platform_data = &snddev_mi2s_fm_rx_data }, +}; + +static struct snddev_icodec_data snddev_ifmradio_speaker_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_FM), + .name = "fmradio_speaker_rx", + .copp_id = 0, + .profile = &ifmradio_speaker_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = fm_speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = FM_OUT_SPEAKER, + .aic3254_voc_id = FM_OUT_SPEAKER, + .default_aic3254_id = FM_OUT_SPEAKER, +}; + +static struct platform_device msm_ifmradio_speaker_device = { + .name = "snddev_icodec", + .id = 9, + .dev = { .platform_data = &snddev_ifmradio_speaker_data }, +}; + +static struct adie_codec_action_unit ifmradio_headset_osr256_actions[] = + AUXPGA_HEADSET_AB_CPLS_RX_48000; + +static struct adie_codec_hwsetting_entry ifmradio_headset_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ifmradio_headset_osr256_actions, + .action_sz = ARRAY_SIZE(ifmradio_headset_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ifmradio_headset_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ifmradio_headset_settings, + .setting_sz = ARRAY_SIZE(ifmradio_headset_settings), +}; + +static struct snddev_icodec_data snddev_ifmradio_headset_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_FM), + .name = "fmradio_headset_rx", + .copp_id = 0, + .profile = &ifmradio_headset_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = fm_headset_enable, + .voltage_on = voltage_on, + .aic3254_id = FM_OUT_HEADSET, + .aic3254_voc_id = FM_OUT_HEADSET, + .default_aic3254_id = FM_OUT_HEADSET, +}; + +static struct platform_device msm_ifmradio_headset_device = { + .name = "snddev_icodec", + .id = 10, + .dev = { .platform_data = &snddev_ifmradio_headset_data }, +}; + +static struct adie_codec_action_unit iheadset_mic_tx_osr256_actions[] = + HS_AMIC2_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iheadset_mic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iheadset_mic_tx_osr256_actions, + .action_sz = ARRAY_SIZE(iheadset_mic_tx_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iheadset_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = iheadset_mic_tx_settings, + .setting_sz = ARRAY_SIZE(iheadset_mic_tx_settings), +}; + +static struct snddev_icodec_data snddev_headset_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "headset_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &iheadset_mic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECOGNITION_EMIC, + .aic3254_voc_id = CALL_UPLINK_EMIC_HEADSET, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_headset_mic_device = { + .name = "snddev_icodec", + .id = 33, + .dev = { .platform_data = &snddev_headset_mic_data }, +}; + +static struct adie_codec_action_unit + ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions[] = + SPEAKER_HPH_AB_CPL_PRI_STEREO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry + ihs_stereo_speaker_stereo_rx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions, + .action_sz = + ARRAY_SIZE(ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ihs_stereo_speaker_stereo_rx_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ihs_stereo_speaker_stereo_rx_settings, + .setting_sz = ARRAY_SIZE(ihs_stereo_speaker_stereo_rx_settings), +}; + +static struct snddev_icodec_data snddev_ihs_stereo_speaker_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_stereo_speaker_stereo_rx", + .copp_id = 0, + .profile = &ihs_stereo_speaker_stereo_rx_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = RING_HEADSET_SPEAKER, + .aic3254_voc_id = RING_HEADSET_SPEAKER, + .default_aic3254_id = RING_HEADSET_SPEAKER, +}; + +static struct platform_device msm_ihs_stereo_speaker_stereo_rx_device = { + .name = "snddev_icodec", + .id = 22, + .dev = { .platform_data = &snddev_ihs_stereo_speaker_stereo_rx_data }, +}; + +static struct snddev_ecodec_data snddev_bt_sco_earpiece_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "bt_sco_rx", + .copp_id = PCM_RX, + .channel_mode = 1, +}; + +static struct snddev_ecodec_data snddev_bt_sco_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "bt_sco_tx", + .copp_id = PCM_TX, + .channel_mode = 1, +}; + +struct platform_device msm_bt_sco_earpiece_device = { + .name = "msm_snddev_ecodec", + .id = 0, + .dev = { .platform_data = &snddev_bt_sco_earpiece_data }, +}; + +struct platform_device msm_bt_sco_mic_device = { + .name = "msm_snddev_ecodec", + .id = 1, + .dev = { .platform_data = &snddev_bt_sco_mic_data }, +}; + +static struct adie_codec_action_unit itty_mono_tx_actions[] = + TTY_HEADSET_MONO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry itty_mono_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = itty_mono_tx_actions, + .action_sz = ARRAY_SIZE(itty_mono_tx_actions), + }, +}; + +static struct adie_codec_dev_profile itty_mono_tx_profile = { + .path_type = ADIE_CODEC_TX, + .settings = itty_mono_tx_settings, + .setting_sz = ARRAY_SIZE(itty_mono_tx_settings), +}; + +static struct snddev_icodec_data snddev_itty_mono_tx_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE | SNDDEV_CAP_TTY), + .name = "tty_headset_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &itty_mono_tx_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = TTY_IN_FULL, + .aic3254_voc_id = TTY_IN_FULL, + .default_aic3254_id = TTY_IN_FULL, +}; + +static struct platform_device msm_itty_mono_tx_device = { + .name = "snddev_icodec", + .id = 16, + .dev = { .platform_data = &snddev_itty_mono_tx_data }, +}; + +static struct adie_codec_action_unit itty_mono_rx_actions[] = + TTY_HEADSET_MONO_RX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry itty_mono_rx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = itty_mono_rx_actions, + .action_sz = ARRAY_SIZE(itty_mono_rx_actions), + }, +}; + +static struct adie_codec_dev_profile itty_mono_rx_profile = { + .path_type = ADIE_CODEC_RX, + .settings = itty_mono_rx_settings, + .setting_sz = ARRAY_SIZE(itty_mono_rx_settings), +}; + +static struct snddev_icodec_data snddev_itty_mono_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE | SNDDEV_CAP_TTY), + .name = "tty_headset_mono_rx", + .copp_id = PRIMARY_I2S_RX, + .profile = &itty_mono_rx_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = TTY_OUT_FULL, + .aic3254_voc_id = TTY_OUT_FULL, + .default_aic3254_id = TTY_OUT_FULL, +}; + +static struct platform_device msm_itty_mono_rx_device = { + .name = "snddev_icodec", + .id = 17, + .dev = { .platform_data = &snddev_itty_mono_rx_data }, +}; + +#if 1 //HTC created device +static struct adie_codec_action_unit bmic_tx_48KHz_osr256_actions[] = + AMIC_SEC_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry bmic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = bmic_tx_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(bmic_tx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile bmic_tx_profile = { + .path_type = ADIE_CODEC_TX, + .settings = bmic_tx_settings, + .setting_sz = ARRAY_SIZE(bmic_tx_settings), +}; + +static struct snddev_icodec_data snddev_bmic_tx_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "back_mic_tx", + .copp_id = 1, + .profile = &bmic_tx_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = back_mic_enable, + .aic3254_id = VOICERECORD_EMIC, + .aic3254_voc_id = VOICERECORD_EMIC, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_bmic_tx_device = { + .name = "snddev_icodec", + .id = 50, /* FIX ME */ + .dev = { .platform_data = &snddev_bmic_tx_data }, +}; + +static struct adie_codec_action_unit headset_mono_ab_cpls_48KHz_osr256_actions[] = + HEADSET_AB_CPLS_48000_OSR_256; /* FIX ME */ + +static struct adie_codec_hwsetting_entry headset_mono_ab_cpls_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = headset_mono_ab_cpls_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_mono_ab_cpls_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_mono_ab_cpls_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_mono_ab_cpls_settings, + .setting_sz = ARRAY_SIZE(headset_mono_ab_cpls_settings), +}; + +static struct snddev_icodec_data snddev_ihs_mono_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_mono_rx", + .copp_id = 0, + .profile = &headset_mono_ab_cpls_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_EMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_headset_mono_ab_cpls_device = { + .name = "snddev_icodec", + .id = 35, /* FIX ME */ + .dev = { .platform_data = &snddev_ihs_mono_rx_data }, +}; + +static struct adie_codec_action_unit ihs_ispk_stereo_rx_48KHz_osr256_actions[] = + SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ihs_ispk_stereo_rx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ihs_ispk_stereo_rx_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ihs_ispk_stereo_rx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ihs_ispk_stereo_rx_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ihs_ispk_stereo_rx_settings, + .setting_sz = ARRAY_SIZE(ihs_ispk_stereo_rx_settings), +}; + +static struct snddev_icodec_data snddev_ihs_ispk_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_speaker_stereo_rx", + .copp_id = 0, + .profile = &ihs_ispk_stereo_rx_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = RING_HEADSET_SPEAKER, + .aic3254_voc_id = RING_HEADSET_SPEAKER, + .default_aic3254_id = RING_HEADSET_SPEAKER, +}; + +static struct platform_device msm_iheadset_ispeaker_rx_device = { + .name = "snddev_icodec", + .id = 36, /* FIX ME */ + .dev = { .platform_data = &snddev_ihs_ispk_stereo_rx_data }, +}; + +static struct adie_codec_action_unit idual_mic_48KHz_osr256_actions[] = + DUAL_MIC_STEREO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry idual_mic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = idual_mic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(idual_mic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile idual_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = idual_mic_settings, + .setting_sz = ARRAY_SIZE(idual_mic_settings), +}; + +static struct snddev_icodec_data snddev_idual_mic_endfire_real_stereo_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "dual_mic_stereo_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &idual_mic_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VIDEORECORD_IMIC, + .aic3254_voc_id = VOICERECORD_EMIC, /* FIX ME */ + .default_aic3254_id = VIDEORECORD_IMIC, +}; + +static struct platform_device msm_real_stereo_tx_device = { + .name = "snddev_icodec", + .id = 26, /* FIX ME */ + .dev = { .platform_data = &snddev_idual_mic_endfire_real_stereo_data }, +}; + + + +static struct adie_codec_action_unit iusb_headset_stereo_rx_48KHz_osr256_actions[] = + SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iusb_headset_stereo_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iusb_headset_stereo_rx_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(iusb_headset_stereo_rx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iusb_headset_stereo_profile = { + .path_type = ADIE_CODEC_RX, + .settings = iusb_headset_stereo_settings, + .setting_sz = ARRAY_SIZE(iusb_headset_stereo_settings), +}; + +static struct snddev_icodec_data snddev_iusb_headset_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "usb_headset_stereo_rx", + .copp_id = 0, + .profile = &iusb_headset_stereo_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = usb_headset_enable, + .voltage_on = voltage_on, + .aic3254_id = USB_AUDIO, + .aic3254_voc_id = USB_AUDIO, + .default_aic3254_id = USB_AUDIO, +}; + +static struct platform_device msm_iusb_headset_rx_device = { + .name = "snddev_icodec", + .id = 27, /* FIX ME */ + .dev = { .platform_data = &snddev_iusb_headset_stereo_rx_data }, +}; + +static struct adie_codec_action_unit ispkr_mono_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_mono_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ispkr_mono_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_mono_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_mono_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_mono_settings, + .setting_sz = ARRAY_SIZE(ispkr_mono_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_mono_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "speaker_mono_rx", + .copp_id = 0, + .profile = &ispkr_mono_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_mono_device = { + .name = "snddev_icodec", + .id = 28, + .dev = { .platform_data = &snddev_ispkr_mono_data }, +}; + +static struct adie_codec_action_unit camcorder_imic_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_imic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_imic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_imic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_imic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_imic_settings, + .setting_sz = ARRAY_SIZE(camcorder_imic_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_imic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_mono_tx", + .copp_id = 1, + .profile = &camcorder_imic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECOGNITION_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECOGNITION_IMIC, +}; + +static struct platform_device msm_camcorder_imic_device = { + .name = "snddev_icodec", + .id = 53, + .dev = { .platform_data = &snddev_camcorder_imic_data }, +}; + +static struct adie_codec_action_unit camcorder_idual_mic_48KHz_osr256_actions[] = + DUAL_MIC_STEREO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_idual_mic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_idual_mic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_idual_mic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_idual_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_idual_mic_settings, + .setting_sz = ARRAY_SIZE(camcorder_idual_mic_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_imic_stereo_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_stereo_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &camcorder_idual_mic_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VIDEORECORD_IMIC, + .aic3254_voc_id = VOICERECORD_EMIC, /* FIX ME */ + .default_aic3254_id = VIDEORECORD_IMIC, +}; + +static struct platform_device msm_camcorder_imic_stereo_device = { + .name = "snddev_icodec", + .id = 54, /* FIX ME */ + .dev = { .platform_data = &snddev_camcorder_imic_stereo_data }, +}; + +static struct adie_codec_action_unit camcorder_idual_mic_rev_48KHz_osr256_actions[] = + DUAL_MIC_STEREO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_idual_mic_rev_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_idual_mic_rev_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_idual_mic_rev_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_idual_mic_rev_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_idual_mic_rev_settings, + .setting_sz = ARRAY_SIZE(camcorder_idual_mic_rev_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_imic_stereo_rev_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_stereo_rev_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &camcorder_idual_mic_rev_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VIDEORECORD_IMIC, + .aic3254_voc_id = VOICERECORD_EMIC, /* FIX ME */ + .default_aic3254_id = VIDEORECORD_IMIC, +}; + +static struct platform_device msm_camcorder_imic_stereo_rev_device = { + .name = "snddev_icodec", + .id = 55, + .dev = { .platform_data = &snddev_camcorder_imic_stereo_rev_data }, +}; + +static struct adie_codec_action_unit camcorder_iheadset_mic_tx_osr256_actions[] = + HS_AMIC2_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_iheadset_mic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_iheadset_mic_tx_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_iheadset_mic_tx_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_iheadset_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_iheadset_mic_tx_settings, + .setting_sz = ARRAY_SIZE(camcorder_iheadset_mic_tx_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_headset_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_headset_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &camcorder_iheadset_mic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECOGNITION_EMIC, + .aic3254_voc_id = CALL_UPLINK_EMIC_HEADSET, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_camcorder_headset_mic_device = { + .name = "snddev_icodec", + .id = 56, + .dev = { .platform_data = &snddev_camcorder_headset_mic_data }, +}; + +static struct adie_codec_action_unit vr_iearpiece_48KHz_osr256_actions[] = + EAR_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry vr_iearpiece_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = vr_iearpiece_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(vr_iearpiece_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile vr_iearpiece_profile = { + .path_type = ADIE_CODEC_RX, + .settings = vr_iearpiece_settings, + .setting_sz = ARRAY_SIZE(vr_iearpiece_settings), +}; + +static struct snddev_icodec_data snddev_vr_iearpiece_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "vr_handset_mono_tx", + .copp_id = 0, + .profile = &vr_iearpiece_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = handset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_RECEIVER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_RECEIVER, + .default_aic3254_id = PLAYBACK_RECEIVER, +}; + +static struct platform_device msm_vr_iearpiece_device = { + .name = "snddev_icodec", + .id = 57, + .dev = { .platform_data = &snddev_vr_iearpiece_data }, +}; + +static struct adie_codec_action_unit vr_iheadset_mic_tx_osr256_actions[] = + HS_AMIC2_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry vr_iheadset_mic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = vr_iheadset_mic_tx_osr256_actions, + .action_sz = ARRAY_SIZE(vr_iheadset_mic_tx_osr256_actions), + } +}; + +static struct adie_codec_dev_profile vr_iheadset_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = vr_iheadset_mic_tx_settings, + .setting_sz = ARRAY_SIZE(vr_iheadset_mic_tx_settings), +}; + +static struct snddev_icodec_data snddev_vr_headset_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "vr_headset_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &vr_iheadset_mic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECOGNITION_EMIC, + .aic3254_voc_id = CALL_UPLINK_EMIC_HEADSET, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_vr_headset_mic_device = { + .name = "snddev_icodec", + .id = 58, + .dev = { .platform_data = &snddev_vr_headset_mic_data }, +}; + +static struct adie_codec_action_unit ispkr_mono_alt_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_mono_alt_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ispkr_mono_alt_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_mono_alt_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_mono_alt_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_mono_alt_settings, + .setting_sz = ARRAY_SIZE(ispkr_mono_alt_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_mono_alt_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "speaker_mono_alt_rx", + .copp_id = 0, + .profile = &ispkr_mono_alt_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_mono_alt_device = { + .name = "snddev_icodec", + .id = 59, + .dev = { .platform_data = &snddev_ispkr_mono_alt_data }, +}; + + +static struct adie_codec_action_unit imic_note_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry imic_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = imic_note_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(imic_note_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile imic_note_profile = { + .path_type = ADIE_CODEC_TX, + .settings = imic_note_settings, + .setting_sz = ARRAY_SIZE(imic_note_settings), +}; + +static struct snddev_icodec_data snddev_imic_note_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "imic_note_tx", + .copp_id = 1, + .profile = &imic_note_profile, + .channel_mode = 2, + .default_sample_rate = 16000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_imic_note_device = { + .name = "snddev_icodec", + .id = 60, + .dev = { .platform_data = &snddev_imic_note_data }, +}; + +static struct adie_codec_action_unit ispkr_note_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = ispkr_note_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_note_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_note_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_note_settings, + .setting_sz = ARRAY_SIZE(ispkr_note_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_note_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "ispkr_note_rx", + .copp_id = 0, + .profile = &ispkr_note_profile, + .channel_mode = 2, + .default_sample_rate = 16000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_note_device = { + .name = "snddev_icodec", + .id = 61, + .dev = { .platform_data = &snddev_ispkr_note_data }, +}; + +static struct adie_codec_action_unit emic_note_16KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry emic_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = emic_note_16KHz_osr256_actions, + .action_sz = ARRAY_SIZE(emic_note_16KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile emic_note_profile = { + .path_type = ADIE_CODEC_TX, + .settings = emic_note_settings, + .setting_sz = ARRAY_SIZE(emic_note_settings), +}; + +static struct snddev_icodec_data snddev_emic_note_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "emic_note_tx", + .copp_id = 1, + .profile = &emic_note_profile, + .channel_mode = 1, + .default_sample_rate = 16000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECORD_EMIC, + .aic3254_voc_id = VOICERECORD_EMIC, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_emic_note_device = { + .name = "snddev_icodec", + .id = 62, + .dev = { .platform_data = &snddev_emic_note_data }, +}; + +static struct adie_codec_action_unit headset_note_48KHz_osr256_actions[] = + HEADSET_AB_CPLS_48000_OSR_256; + +static struct adie_codec_hwsetting_entry headset_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = headset_note_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_note_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_note_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_note_settings, + .setting_sz = ARRAY_SIZE(headset_note_settings), +}; + +static struct snddev_icodec_data snddev_ihs_note_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_note_rx", + .copp_id = 0, + .profile = &headset_note_profile, + .channel_mode = 2, + .default_sample_rate = 16000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_EMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_headset_note_device = { + .name = "snddev_icodec", + .id = 63, + .dev = { .platform_data = &snddev_ihs_note_data }, +}; + + + +#endif + +#ifdef CONFIG_DEBUG_FS +static struct adie_codec_action_unit + ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions[] = + HPH_PRI_D_LEG_STEREO; + +static struct adie_codec_hwsetting_entry + ihs_stereo_rx_class_d_legacy_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = + ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE + (ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions), + } +}; + +static struct adie_codec_action_unit + ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions[] = + HPH_PRI_AB_LEG_STEREO; + +static struct adie_codec_hwsetting_entry + ihs_stereo_rx_class_ab_legacy_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = + ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE + (ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions), + } +}; + +static void snddev_hsed_config_modify_setting(int type) +{ + struct platform_device *device; + struct snddev_icodec_data *icodec_data; + + device = &msm_headset_stereo_device; + icodec_data = (struct snddev_icodec_data *)device->dev.platform_data; + + if (icodec_data) { + if (type == 1) { + icodec_data->voltage_on = NULL; + icodec_data->profile->settings = + ihs_stereo_rx_class_d_legacy_settings; + icodec_data->profile->setting_sz = + ARRAY_SIZE(ihs_stereo_rx_class_d_legacy_settings); + } else if (type == 2) { + icodec_data->voltage_on = NULL; + icodec_data->profile->settings = + ihs_stereo_rx_class_ab_legacy_settings; + icodec_data->profile->setting_sz = + ARRAY_SIZE(ihs_stereo_rx_class_ab_legacy_settings); + } + } +} + +static void snddev_hsed_config_restore_setting(void) +{ + struct platform_device *device; + struct snddev_icodec_data *icodec_data; + + device = &msm_headset_stereo_device; + icodec_data = (struct snddev_icodec_data *)device->dev.platform_data; + + if (icodec_data) { + icodec_data->voltage_on = voltage_on; + icodec_data->profile->settings = headset_ab_cpls_settings; + icodec_data->profile->setting_sz = + ARRAY_SIZE(headset_ab_cpls_settings); + } +} + +static ssize_t snddev_hsed_config_debug_write(struct file *filp, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char *lb_str = filp->private_data; + char cmd; + + if (get_user(cmd, ubuf)) + return -EFAULT; + + if (!strcmp(lb_str, "msm_hsed_config")) { + switch (cmd) { + case '0': + snddev_hsed_config_restore_setting(); + break; + + case '1': + snddev_hsed_config_modify_setting(1); + break; + + case '2': + snddev_hsed_config_modify_setting(2); + break; + + default: + break; + } + } + return cnt; +} + +static int snddev_hsed_config_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static const struct file_operations snddev_hsed_config_debug_fops = { + .open = snddev_hsed_config_debug_open, + .write = snddev_hsed_config_debug_write +}; +#endif + +static struct snddev_virtual_data snddev_uplink_rx_data = { + .capability = SNDDEV_CAP_RX, + .name = "uplink_rx", + .copp_id = VOICE_PLAYBACK_TX, +}; + +static struct platform_device msm_uplink_rx_device = { + .name = "snddev_virtual", + .dev = { .platform_data = &snddev_uplink_rx_data }, +}; + +static struct platform_device *snd_devices_common[] __initdata = { + &msm_uplink_rx_device, +}; + +static struct platform_device *snd_devices_surf[] __initdata = { + &msm_iearpiece_device, + &msm_imic_device, + &msm_ispkr_stereo_device, + &msm_snddev_hdmi_stereo_rx_device, + &msm_headset_mic_device, + &msm_ispkr_mic_device, + &msm_bt_sco_earpiece_device, + &msm_bt_sco_mic_device, + &msm_headset_stereo_device, + &msm_itty_mono_tx_device, + &msm_itty_mono_rx_device, + &msm_mi2s_fm_tx_device, + &msm_mi2s_fm_rx_device, + &msm_ifmradio_speaker_device, + &msm_ifmradio_headset_device, + &msm_hs_dual_mic_endfire_device, + &msm_spkr_dual_mic_endfire_device, + &msm_hs_dual_mic_broadside_device, + &msm_spkr_dual_mic_broadside_device, + &msm_ihs_stereo_speaker_stereo_rx_device, + &msm_headset_mono_ab_cpls_device, + &msm_iheadset_ispeaker_rx_device, + &msm_bmic_tx_device, + &msm_anc_headset_device, + &msm_real_stereo_tx_device, + &msm_ihac_device, + &msm_nomic_headset_tx_device, + &msm_nomic_headset_stereo_device, + &msm_iusb_headset_rx_device, + &msm_ispkr_mono_device, + &msm_camcorder_imic_device, + &msm_camcorder_imic_stereo_device, + &msm_camcorder_imic_stereo_rev_device, + &msm_camcorder_headset_mic_device, + &msm_vr_iearpiece_device, + &msm_vr_headset_mic_device, + &msm_ispkr_mono_alt_device, + &msm_imic_note_device, + &msm_ispkr_note_device, + &msm_emic_note_device, + &msm_headset_note_device, +}; + +void htc_8x60_register_analog_ops(struct q6v2audio_analog_ops *ops) +{ + audio_ops = ops; +} + +void __init msm_snddev_init(void) +{ + int rc, i; + int dev_id; + + platform_add_devices(snd_devices_surf, ARRAY_SIZE(snd_devices_surf)); +#ifdef CONFIG_DEBUG_FS + debugfs_hsed_config = debugfs_create_file("msm_hsed_config", + S_IFREG | S_IRUGO, NULL, + (void *) "msm_hsed_config", &snddev_hsed_config_debug_fops); +#endif + + for (i = 0, dev_id = 0; i < ARRAY_SIZE(snd_devices_common); i++) + snd_devices_common[i]->id = dev_id++; + + platform_add_devices(snd_devices_common, + ARRAY_SIZE(snd_devices_common)); + + rc = gpio_request(SNDDEV_GPIO_CLASS_D1_EN, "CLASSD1_EN"); + if (rc) { + pr_aud_err("%s: spkr pamp gpio %d request" + "failed\n", __func__, SNDDEV_GPIO_CLASS_D1_EN); + } else { + gpio_direction_output(SNDDEV_GPIO_CLASS_D1_EN, 0); + gpio_free(SNDDEV_GPIO_CLASS_D1_EN); + } +} diff --git a/arch/arm/mach-msm/qdsp6v3/dsp_debug.c b/arch/arm/mach-msm/qdsp6v3/dsp_debug.c new file mode 100644 index 00000000..8bf7fee3 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/dsp_debug.c @@ -0,0 +1,202 @@ +/* arch/arm/mach-msm/qdsp6/dsp_dump.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../proc_comm.h" +#include +#include "dsp_debug.h" + +static wait_queue_head_t dsp_wait; +static int dsp_has_crashed; +static int dsp_wait_count; + +static atomic_t dsp_crash_count = ATOMIC_INIT(0); +dsp_state_cb cb_ptr; + +void q6audio_dsp_not_responding(void) +{ + if (cb_ptr) + cb_ptr(DSP_STATE_CRASHED); + if (atomic_add_return(1, &dsp_crash_count) != 1) { + pr_aud_err("q6audio_dsp_not_responding() \ + - parking additional crasher...\n"); + for (;;) + msleep(1000); + } + if (dsp_wait_count) { + dsp_has_crashed = 1; + wake_up(&dsp_wait); + + while (dsp_has_crashed != 2) + wait_event(dsp_wait, dsp_has_crashed == 2); + } else { + pr_aud_err("q6audio_dsp_not_responding() - no waiter?\n"); + } + if (cb_ptr) + cb_ptr(DSP_STATE_CRASH_DUMP_DONE); + + BUG(); +} + +static int dsp_open(struct inode *inode, struct file *file) +{ + return 0; +} + +#define DSP_NMI_ADDR 0x28800010 + +static ssize_t dsp_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + char cmd[32]; + void __iomem *ptr; + + if (count >= sizeof(cmd)) + return -EINVAL; + if (copy_from_user(cmd, buf, count)) + return -EFAULT; + cmd[count] = 0; + + if ((count > 1) && (cmd[count-1] == '\n')) + cmd[count-1] = 0; + + if (!strcmp(cmd, "wait-for-crash")) { + while (!dsp_has_crashed) { + int res; + dsp_wait_count++; + res = wait_event_interruptible(dsp_wait, + dsp_has_crashed); + if (res < 0) { + dsp_wait_count--; + return res; + } + } + /* assert DSP NMI */ + ptr = ioremap(DSP_NMI_ADDR, 0x16); + if (!ptr) { + pr_aud_err("Unable to map DSP NMI\n"); + return -EFAULT; + } + writel(0x1, (void *)ptr); + iounmap(ptr); + } else if (!strcmp(cmd, "boom")) { + q6audio_dsp_not_responding(); + } else if (!strcmp(cmd, "continue-crash")) { + dsp_has_crashed = 2; + wake_up(&dsp_wait); + } else { + pr_aud_err("[%s:%s] unknown dsp_debug command: %s\n", __MM_FILE__, + __func__, cmd); + } + + return count; +} + +#define DSP_RAM_BASE 0x46700000 +#define DSP_RAM_SIZE 0x2000000 + +static unsigned copy_ok_count; + +static ssize_t dsp_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + size_t actual = 0; + size_t mapsize = PAGE_SIZE; + unsigned addr; + void __iomem *ptr; + + if (*pos >= DSP_RAM_SIZE) + return 0; + + if (*pos & (PAGE_SIZE - 1)) + return -EINVAL; + + addr = (*pos + DSP_RAM_BASE); + + /* don't blow up if we're unaligned */ + if (addr & (PAGE_SIZE - 1)) + mapsize *= 2; + + while (count >= PAGE_SIZE) { + ptr = ioremap(addr, mapsize); + if (!ptr) { + pr_aud_err("[%s:%s] map error @ %x\n", __MM_FILE__, + __func__, addr); + return -EFAULT; + } + if (copy_to_user(buf, ptr, PAGE_SIZE)) { + iounmap(ptr); + pr_aud_err("[%s:%s] copy error @ %p\n", __MM_FILE__, + __func__, buf); + return -EFAULT; + } + copy_ok_count += PAGE_SIZE; + iounmap(ptr); + addr += PAGE_SIZE; + buf += PAGE_SIZE; + actual += PAGE_SIZE; + count -= PAGE_SIZE; + } + + *pos += actual; + return actual; +} + +static int dsp_release(struct inode *inode, struct file *file) +{ + return 0; +} + +int dsp_debug_register(dsp_state_cb ptr) +{ + if (ptr == NULL) + return -EINVAL; + cb_ptr = ptr; + + return 0; +} + +static const struct file_operations dsp_fops = { + .owner = THIS_MODULE, + .open = dsp_open, + .read = dsp_read, + .write = dsp_write, + .release = dsp_release, +}; + +static struct miscdevice dsp_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "dsp_debug", + .fops = &dsp_fops, +}; + + +static int __init dsp_init(void) +{ + init_waitqueue_head(&dsp_wait); + return misc_register(&dsp_misc); +} + +device_initcall(dsp_init); diff --git a/arch/arm/mach-msm/qdsp6v3/dsp_debug.h b/arch/arm/mach-msm/qdsp6v3/dsp_debug.h new file mode 100644 index 00000000..1a73dd42 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/dsp_debug.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __DSP_DEBUG_H_ +#define __DSP_DEBUG_H_ + +typedef int (*dsp_state_cb)(int state); +int dsp_debug_register(dsp_state_cb ptr); + +#define DSP_STATE_CRASHED 0x0 +#define DSP_STATE_CRASH_DUMP_DONE 0x1 + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/evrc_in.c b/arch/arm/mach-msm/qdsp6v3/evrc_in.c new file mode 100644 index 00000000..5d446358 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/evrc_in.c @@ -0,0 +1,337 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((23+sizeof(struct meta_out_dsp)) * 10)) + +void q6asm_evrc_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode - %d\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ +static long evrc_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_evrc_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + /* rate_modulation_cmd set to zero + currently not configurable from user space */ + rc = q6asm_enc_cfg_blk_evrc(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate, 0); + + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd evrc media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + break; + } + case AUDIO_GET_EVRC_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_evrc_enc_config))) + rc = -EFAULT; + break; + } + case AUDIO_SET_EVRC_ENC_CONFIG: { + struct msm_audio_evrc_enc_config cfg; + struct msm_audio_evrc_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + + if (copy_from_user(&cfg, (void *) arg, + sizeof(struct msm_audio_evrc_enc_config))) { + rc = -EFAULT; + break; + } + + if (cfg.min_bit_rate > 4 || + cfg.min_bit_rate < 1 || + (cfg.min_bit_rate == 2)) { + pr_aud_err("%s:session id %d: invalid min bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + if (cfg.max_bit_rate > 4 || + cfg.max_bit_rate < 1 || + (cfg.max_bit_rate == 2)) { + pr_aud_err("%s:session id %d: invalid max bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->min_bit_rate = cfg.min_bit_rate; + enc_cfg->max_bit_rate = cfg.max_bit_rate; + pr_debug("%s:session id %d: min_bit_rate= 0x%x\ + max_bit_rate=0x%x\n", __func__, + audio->ac->session, enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate); + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int evrc_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_evrc_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for evrc\ + driver\n", __func__, audio->ac->session); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_evrc_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 23; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->min_bit_rate = 4; + enc_cfg->max_bit_rate = 4; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_evrc_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s:session id %d: Could not allocate memory for audio\ + client\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open evrc encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_EVRC, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: NT mode encoder success\n", + __func__, audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_EVRC); + if (rc < 0) { + pr_aud_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, + audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = evrc_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = evrc_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_evrc_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_evrc_in", + .fops = &audio_in_fops, +}; + +static int __init evrc_in_init(void) +{ + return misc_register(&audio_evrc_in_misc); +} + +device_initcall(evrc_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/fm.c b/arch/arm/mach-msm/qdsp6v3/fm.c new file mode 100644 index 00000000..8bbd4d55 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/fm.c @@ -0,0 +1,259 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Based on the mp3 native driver in arch/arm/mach-msm/qdsp5v2/audio_mp3.c + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * All source code in this file is licensed under the following license except + * where indicated. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SESSION_ID_FM (MAX_SESSIONS + 1) +#define FM_ENABLE 0x1 +#define FM_DISABLE 0x0 +#define FM_COPP 0x7 + +struct audio { + struct mutex lock; + + int opened; + int enabled; + int running; + + uint16_t fm_source; + uint16_t fm_src_copp_id; + uint16_t fm_dest; + uint16_t fm_dst_copp_id; + uint16_t dec_id; + uint32_t device_events; + uint16_t volume; +}; + +static struct audio fm_audio; +static int fm_audio_enable(struct audio *audio) +{ + if (audio->enabled) + return 0; + + pr_aud_info("%s: fm dest= %08x fm_source = %08x\n", __func__, + audio->fm_dst_copp_id, audio->fm_src_copp_id); + + /* do afe loopback here */ + + if (audio->fm_dest && audio->fm_source) { + if (afe_loopback(FM_ENABLE, audio->fm_dst_copp_id, + audio->fm_src_copp_id) < 0) { + pr_aud_err("%s: afe_loopback failed\n", __func__); + } + + audio->running = 1; + } + + audio->enabled = 1; + return 0; +} + +static void fm_audio_listner(u32 evt_id, union auddev_evt_data *evt_payload, + void *private_data) +{ + struct audio *audio = (struct audio *) private_data; + switch (evt_id) { + case AUDDEV_EVT_DEV_RDY: + pr_aud_info("%s :AUDDEV_EVT_DEV_RDY\n", __func__); + if (evt_payload->routing_id == FM_COPP) { + audio->fm_source = 1; + audio->fm_src_copp_id = FM_COPP; + } else { + audio->fm_dest = 1; + audio->fm_dst_copp_id = evt_payload->routing_id; + } + + if (audio->enabled && + audio->fm_dest && + audio->fm_source && !audio->running) { + + afe_loopback(FM_ENABLE, audio->fm_dst_copp_id, + audio->fm_src_copp_id); + audio->running = 1; + } + break; + case AUDDEV_EVT_DEV_RLS: + pr_aud_info("%s: AUDDEV_EVT_DEV_RLS\n", __func__); + if (evt_payload->routing_id == audio->fm_src_copp_id) + audio->fm_source = 0; + else + audio->fm_dest = 0; + if (audio->running + && (!audio->fm_dest && !audio->fm_source)) { + afe_loopback(FM_DISABLE, audio->fm_dst_copp_id, + audio->fm_src_copp_id); + audio->running = 0; + } else { + pr_aud_err("%s: device switch happened\n", __func__); + } + break; + case AUDDEV_EVT_STREAM_VOL_CHG: + pr_debug("%s: AUDDEV_EVT_STREAM_VOL_CHG\n", __func__); + if (audio->fm_source) { + audio->volume = evt_payload->session_vol; + afe_loopback_gain(audio->fm_src_copp_id, + audio->volume); + } + break; + + default: + pr_aud_err("%s: ERROR:wrong event %08x\n", __func__, evt_id); + break; + } +} + +static int fm_audio_disable(struct audio *audio) +{ + + /* break the AFE loopback here */ + afe_loopback(FM_DISABLE, audio->fm_dst_copp_id, audio->fm_src_copp_id); + return 0; +} + +static long fm_audio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = -EINVAL; + + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + pr_aud_info("%s: AUDIO_START\n", __func__); + rc = fm_audio_enable(audio); + break; + case AUDIO_STOP: + pr_aud_info("%s: AUDIO_STOP\n", __func__); + rc = fm_audio_disable(audio); + audio->running = 0; + audio->enabled = 0; + break; + case AUDIO_GET_SESSION_ID: + if (copy_to_user((void *) arg, &audio->dec_id, + sizeof(unsigned short))) + rc = -EFAULT; + else + rc = 0; + break; + default: + rc = -EINVAL; + pr_aud_err("%s: Un supported IOCTL\n", __func__); + } + mutex_unlock(&audio->lock); + return rc; +} + +static int fm_audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + pr_debug("audio instance 0x%08x freeing\n", (int)audio); + mutex_lock(&audio->lock); + auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id); + fm_audio_disable(audio); + audio->running = 0; + audio->enabled = 0; + audio->opened = 0; + mutex_unlock(&audio->lock); + return 0; +} + +static int fm_audio_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &fm_audio; + int rc = 0; + + + if (audio->opened) + return -EPERM; + + /* Allocate the decoder */ + audio->dec_id = SESSION_ID_FM; + + audio->running = 0; + audio->fm_source = 0; + audio->fm_dest = 0; + + audio->device_events = AUDDEV_EVT_DEV_RDY + |AUDDEV_EVT_DEV_RLS| + AUDDEV_EVT_STREAM_VOL_CHG; + + rc = auddev_register_evt_listner(audio->device_events, + AUDDEV_CLNT_DEC, + audio->dec_id, + fm_audio_listner, + (void *)audio); + + if (rc) { + pr_aud_err("%s: failed to register listnet\n", __func__); + goto event_err; + } + + audio->opened = 1; + file->private_data = audio; + +event_err: + return rc; +} + +static const struct file_operations audio_fm_fops = { + .owner = THIS_MODULE, + .open = fm_audio_open, + .release = fm_audio_release, + .unlocked_ioctl = fm_audio_ioctl, +}; + +struct miscdevice audio_fm_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_fm", + .fops = &audio_fm_fops, +}; + +static int __init fm_audio_init(void) +{ + struct audio *audio = &fm_audio; + + mutex_init(&audio->lock); + return misc_register(&audio_fm_misc); +} + +device_initcall(fm_audio_init); + +MODULE_DESCRIPTION("MSM FM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/pcm_in.c b/arch/arm/mach-msm/qdsp6v3/pcm_in.c new file mode 100644 index 00000000..7282e2de --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/pcm_in.c @@ -0,0 +1,504 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_BUF 4 +#define BUFSZ (480 * 8) +#define BUFFER_SIZE_MULTIPLE 4 +#define MIN_BUFFER_SIZE 160 + +#define VOC_REC_NONE 0xFF + +struct pcm { + struct mutex lock; + struct mutex read_lock; + wait_queue_head_t wait; + spinlock_t dsp_lock; + struct audio_client *ac; + uint32_t sample_rate; + uint32_t channel_count; + uint32_t buffer_size; + uint32_t buffer_count; + uint32_t rec_mode; + uint32_t in_frame_info[MAX_BUF][2]; + atomic_t in_count; + atomic_t in_enabled; + atomic_t in_opened; + atomic_t in_stopped; +}; + +static atomic_t pcm_opened = ATOMIC_INIT(0); + +static void pcm_in_get_dsp_buffers(struct pcm*, + uint32_t token, uint32_t *payload); + +void pcm_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct pcm *pcm = (struct pcm *) priv; + unsigned long flags; + + spin_lock_irqsave(&pcm->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + pcm_in_get_dsp_buffers(pcm, token, payload); + break; + default: + break; + } + spin_unlock_irqrestore(&pcm->dsp_lock, flags); +} + +static void pcm_in_get_dsp_buffers(struct pcm *pcm, + uint32_t token, uint32_t *payload) +{ + pcm->in_frame_info[token][0] = payload[7]; + pcm->in_frame_info[token][1] = payload[3]; + if (atomic_read(&pcm->in_count) <= pcm->buffer_count) + atomic_inc(&pcm->in_count); + wake_up(&pcm->wait); +} + +static int pcm_in_enable(struct pcm *pcm) +{ + if (atomic_read(&pcm->in_enabled)) + return 0; + return q6asm_run(pcm->ac, 0, 0, 0); +} + +static int pcm_in_disable(struct pcm *pcm) +{ + int rc = 0; + + if (atomic_read(&pcm->in_opened)) { + atomic_set(&pcm->in_enabled, 0); + atomic_set(&pcm->in_opened, 0); + rc = q6asm_cmd(pcm->ac, CMD_CLOSE); + + atomic_set(&pcm->in_stopped, 1); + memset(pcm->in_frame_info, 0, + sizeof(char) * pcm->buffer_count * 2); + wake_up(&pcm->wait); + } + return rc; +} + +static int config(struct pcm *pcm) +{ + int rc = 0; + + pr_debug("%s: pcm prefill, buffer_size = %d\n", __func__, + pcm->buffer_size); + rc = q6asm_audio_client_buf_alloc(OUT, pcm->ac, + pcm->buffer_size, pcm->buffer_count); + if (rc < 0) { + pr_aud_err("Audio Start: Buffer Allocation failed \ + rc = %d\n", rc); + goto fail; + } + + rc = q6asm_enc_cfg_blk_pcm(pcm->ac, pcm->sample_rate, + pcm->channel_count); + if (rc < 0) { + pr_aud_err("%s: cmd media format block failed", __func__); + goto fail; + } +fail: + return rc; +} + +static long pcm_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pcm *pcm = file->private_data; + int rc = 0; + + mutex_lock(&pcm->lock); + switch (cmd) { + case AUDIO_SET_VOLUME: + break; + case AUDIO_GET_STATS: { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + rc = -EFAULT; + break; + } + case AUDIO_START: { + int cnt = 0; + if (atomic_read(&pcm->in_enabled)) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = config(pcm); + if (rc) { + pr_aud_err("%s: IN Configuration failed\n", __func__); + rc = -EFAULT; + break; + } + + rc = pcm_in_enable(pcm); + if (rc) { + pr_aud_err("%s: In Enable failed\n", __func__); + rc = -EFAULT; + break; + } + + atomic_set(&pcm->in_enabled, 1); + + while (cnt++ < pcm->buffer_count) + q6asm_read(pcm->ac); + pr_aud_info("%s: AUDIO_START session id[%d]\n", __func__, + pcm->ac->session); + + if (pcm->rec_mode != VOC_REC_NONE) + msm_enable_incall_recording(pcm->ac->session, + pcm->rec_mode, pcm->sample_rate, pcm->channel_count); + + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *) arg, &pcm->ac->session, + sizeof(unsigned short))) + rc = -EFAULT; + break; + } + case AUDIO_STOP: + break; + case AUDIO_FLUSH: + break; + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + + if (copy_from_user(&config, (void *) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + pr_debug("%s: SET_CONFIG: buffer_size:%d channel_count:%d" + "sample_rate:%d, buffer_count:%d\n", __func__, + config.buffer_size, config.channel_count, + config.sample_rate, config.buffer_count); + + if (!config.channel_count || config.channel_count > 2) { + rc = -EINVAL; + break; + } + + if (config.sample_rate < 8000 || config.sample_rate > 48000) { + rc = -EINVAL; + break; + } + + if ((config.buffer_size % (config.channel_count * + BUFFER_SIZE_MULTIPLE)) || + (config.buffer_size < MIN_BUFFER_SIZE)) { + pr_aud_err("%s: Buffer Size should be multiple of " + "[4 * no. of channels] and greater than 160\n", + __func__); + rc = -EINVAL; + break; + } + + pcm->sample_rate = config.sample_rate; + pcm->channel_count = config.channel_count; + pcm->buffer_size = config.buffer_size; + pcm->buffer_count = config.buffer_count; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = pcm->buffer_size; + config.buffer_count = pcm->buffer_count; + config.sample_rate = pcm->sample_rate; + config.channel_count = pcm->channel_count; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *) arg, &config, sizeof(config))) + rc = -EFAULT; + break; + } + case AUDIO_ENABLE_AUDPRE: { + + uint16_t enable_mask; + + if (copy_from_user(&enable_mask, (void *) arg, + sizeof(enable_mask))) { + rc = -EFAULT; + break; + } + if (enable_mask & FLUENCE_ENABLE) + rc = auddev_cfg_tx_copp_topology(pcm->ac->session, + VPM_TX_DM_FLUENCE_COPP_TOPOLOGY); + else if (enable_mask & STEREO_RECORD_ENABLE) + rc = auddev_cfg_tx_copp_topology(pcm->ac->session, + HTC_STEREO_RECORD_TOPOLOGY); + else + rc = auddev_cfg_tx_copp_topology(pcm->ac->session, + DEFAULT_COPP_TOPOLOGY); + break; + } + case AUDIO_SET_INCALL: { + if (copy_from_user(&pcm->rec_mode, + (void *) arg, + sizeof(pcm->rec_mode))) { + rc = -EFAULT; + pr_aud_err("%s: Error copying in-call mode\n", __func__); + break; + } + + if (pcm->rec_mode != VOC_REC_UPLINK && + pcm->rec_mode != VOC_REC_DOWNLINK && + pcm->rec_mode != VOC_REC_BOTH) { + rc = -EINVAL; + pcm->rec_mode = VOC_REC_NONE; + + pr_aud_err("%s: Invalid %d in-call rec_mode\n", + __func__, pcm->rec_mode); + break; + } + + pr_debug("%s: In-call rec_mode %d\n", __func__, pcm->rec_mode); + break; + } + + + default: + rc = -EINVAL; + break; + } + mutex_unlock(&pcm->lock); + return rc; +} + +static int pcm_in_open(struct inode *inode, struct file *file) +{ + struct pcm *pcm; + int rc = 0; + struct timespec ts; + struct rtc_time tm; + + if (atomic_cmpxchg(&pcm_opened, 0, 1) != 0) { + rc = -EBUSY; + return rc; + } + + pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL); + if (!pcm) + return -ENOMEM; + + pcm->channel_count = 1; + pcm->sample_rate = 8000; + pcm->buffer_size = BUFSZ; + pcm->buffer_count = MAX_BUF; + + pcm->ac = q6asm_audio_client_alloc((app_cb)pcm_in_cb, (void *)pcm); + if (!pcm->ac) { + pr_aud_err("%s: Could not allocate memory\n", __func__); + rc = -ENOMEM; + goto fail; + } + + mutex_init(&pcm->lock); + mutex_init(&pcm->read_lock); + spin_lock_init(&pcm->dsp_lock); + init_waitqueue_head(&pcm->wait); + + rc = q6asm_open_read(pcm->ac, FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s: Cmd Open Failed\n", __func__); + goto fail; + } + + atomic_set(&pcm->in_stopped, 0); + atomic_set(&pcm->in_enabled, 0); + atomic_set(&pcm->in_count, 0); + atomic_set(&pcm->in_opened, 1); + + pcm->rec_mode = VOC_REC_NONE; + + file->private_data = pcm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_aud_info1("[ATS][start_recording][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + pr_aud_info("%s: pcm in open session id[%d]\n", __func__, pcm->ac->session); + + return 0; +fail: + if (pcm->ac) + q6asm_audio_client_free(pcm->ac); + kfree(pcm); + atomic_set(&pcm_opened, 0); + return rc; +} + +static ssize_t pcm_in_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct pcm *pcm = file->private_data; + const char __user *start = buf; + void *data; + uint32_t offset = 0; + uint32_t size = 0; + uint32_t idx; + int rc = 0; + int len = 0; + + if (!atomic_read(&pcm->in_enabled)) + return -EFAULT; + mutex_lock(&pcm->read_lock); + while (count > 0) { + rc = wait_event_timeout(pcm->wait, + (atomic_read(&pcm->in_count) || + atomic_read(&pcm->in_stopped)), 5 * HZ); + if (!rc) { + pr_aud_err("%s: wait_event_timeout failed\n", __func__); + goto fail; + } + + if (atomic_read(&pcm->in_stopped) && + !atomic_read(&pcm->in_count)) { + mutex_unlock(&pcm->read_lock); + return 0; + } + + data = q6asm_is_cpu_buf_avail(OUT, pcm->ac, &size, &idx); + if (count >= size) + len = size; + else { + len = count; + pr_aud_err("%s: short read data[%p]bytesavail[%d]" + "bytesrequest[%d]" + "bytesrejected%d]\n",\ + __func__, data, size, + count, (size - count)); + } + if ((len) && data) { + offset = pcm->in_frame_info[idx][1]; + if (copy_to_user(buf, data+offset, len)) { + pr_aud_err("%s copy_to_user failed len[%d]\n", + __func__, len); + rc = -EFAULT; + goto fail; + } + count -= len; + buf += len; + } + atomic_dec(&pcm->in_count); + memset(&pcm->in_frame_info[idx], 0, + sizeof(uint32_t) * 2); + + rc = q6asm_read(pcm->ac); + if (rc < 0) { + pr_aud_err("%s q6asm_read faile\n", __func__); + goto fail; + } + rmb(); + break; + } + rc = buf-start; +fail: + mutex_unlock(&pcm->read_lock); + return rc; +} + +static int pcm_in_release(struct inode *inode, struct file *file) +{ + int rc = 0; + struct timespec ts; + struct rtc_time tm; + struct pcm *pcm = file->private_data; + + if (pcm == NULL) { + pr_aud_err("%s: Nothing need to be released.\n", __func__); + return 0; + } + if (pcm->ac) { + mutex_lock(&pcm->lock); + + if ((pcm->rec_mode != VOC_REC_NONE) && atomic_read(&pcm->in_enabled)) { + msm_disable_incall_recording(pcm->ac->session, pcm->rec_mode); + + pcm->rec_mode = VOC_REC_NONE; + } + + + /* remove this session from topology list */ + auddev_cfg_tx_copp_topology(pcm->ac->session, + DEFAULT_COPP_TOPOLOGY); + mutex_unlock(&pcm->lock); + } + + rc = pcm_in_disable(pcm); + if (pcm->ac) { + pr_aud_info("[%s:%s] release session id[%d]\n", __MM_FILE__, + __func__, pcm->ac->session); + msm_clear_session_id(pcm->ac->session); + q6asm_audio_client_free(pcm->ac); + } + + kfree(pcm); + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + atomic_set(&pcm_opened, 0); + pr_aud_info1("[ATS][stop_recording][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + return rc; +} + +static const struct file_operations pcm_in_fops = { + .owner = THIS_MODULE, + .open = pcm_in_open, + .read = pcm_in_read, + .release = pcm_in_release, + .unlocked_ioctl = pcm_in_ioctl, +}; + +struct miscdevice pcm_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_in", + .fops = &pcm_in_fops, +}; + +static int __init pcm_in_init(void) +{ + return misc_register(&pcm_in_misc); +} + +device_initcall(pcm_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/pcm_out.c b/arch/arm/mach-msm/qdsp6v3/pcm_out.c new file mode 100644 index 00000000..4ba5c287 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/pcm_out.c @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_BUF 2 +#define BUFSZ (4800) + +struct pcm { + struct mutex lock; + struct mutex write_lock; + spinlock_t dsp_lock; + wait_queue_head_t write_wait; + struct audio_client *ac; + uint32_t sample_rate; + uint32_t channel_count; + uint32_t buffer_size; + uint32_t buffer_count; + uint32_t rec_mode; + uint32_t stream_event; + uint32_t volume; + atomic_t out_count; + atomic_t out_enabled; + atomic_t out_opened; + atomic_t out_stopped; + atomic_t out_prefill; + struct wake_lock wakelock; +}; + +void pcm_out_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct pcm *pcm = (struct pcm *) priv; + unsigned long flags; + + spin_lock_irqsave(&pcm->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&pcm->out_count); + wake_up(&pcm->write_wait); + break; + default: + break; + } + spin_unlock_irqrestore(&pcm->dsp_lock, flags); +} + +static void audio_prevent_sleep(struct pcm *audio) +{ + pr_debug("%s:\n", __func__); + wake_lock(&audio->wakelock); +} + +static void audio_allow_sleep(struct pcm *audio) +{ + pr_debug("%s:\n", __func__); + wake_unlock(&audio->wakelock); +} + +static int pcm_out_enable(struct pcm *pcm) +{ + if (atomic_read(&pcm->out_enabled)) + return 0; + return q6asm_run(pcm->ac, 0, 0, 0); +} + +static int pcm_out_disable(struct pcm *pcm) +{ + int rc = 0; + + if (atomic_read(&pcm->out_opened)) { + atomic_set(&pcm->out_enabled, 0); + atomic_set(&pcm->out_opened, 0); + rc = q6asm_cmd(pcm->ac, CMD_CLOSE); + + atomic_set(&pcm->out_stopped, 1); + wake_up(&pcm->write_wait); + } + return rc; +} + +static int config(struct pcm *pcm) +{ + int rc = 0; + if (!atomic_read(&pcm->out_prefill)) { + pr_debug("%s: pcm prefill\n", __func__); + rc = q6asm_audio_client_buf_alloc(IN, pcm->ac, + pcm->buffer_size, pcm->buffer_count); + if (rc < 0) { + pr_aud_err("Audio Start: Buffer Allocation failed \ + rc = %d\n", rc); + goto fail; + } + + rc = q6asm_media_format_block_pcm(pcm->ac, pcm->sample_rate, + pcm->channel_count); + if (rc < 0) + pr_aud_err("%s: CMD Format block failed\n", __func__); + + atomic_set(&pcm->out_prefill, 1); + atomic_set(&pcm->out_count, pcm->buffer_count); + } +fail: + return rc; +} + +static void pcm_event_listner(u32 evt_id, union auddev_evt_data *evt_payload, + void *private_data) +{ + struct pcm *pcm = (struct pcm *) private_data; + int rc = 0; + + switch (evt_id) { + case AUDDEV_EVT_STREAM_VOL_CHG: + pcm->volume = evt_payload->session_vol; + pr_debug("%s: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, " + "enabled = %d\n", __func__, pcm->volume, + atomic_read(&pcm->out_enabled)); + if (atomic_read(&pcm->out_enabled)) { + if (pcm->ac) { + rc = q6asm_set_volume(pcm->ac, pcm->volume); + if (rc < 0) + pr_aud_err("%s: Send Volume command" + "failed rc=%d\n", __func__, rc); + } + } + break; + default: + pr_aud_err("%s:ERROR:wrong event\n", __func__); + break; + } +} + +static long pcm_out_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct pcm *pcm = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + + mutex_lock(&pcm->lock); + switch (cmd) { + case AUDIO_SET_VOLUME: { + pr_aud_info("%s: AUDIO_SET_VOLUME, vol %lu\n", __func__, arg); + rc = q6asm_set_volume(pcm->ac, arg); + if (rc < 0) + pr_aud_err("%s: Send Volume command failed rc=%d\n", + __func__, rc); + break; + } + case AUDIO_START: { + pr_aud_info("%s: AUDIO_START\n", __func__); + rc = config(pcm); + if (rc) { + pr_aud_err("%s: Out Configuration failed\n", __func__); + rc = -EFAULT; + break; + } + + rc = pcm_out_enable(pcm); + if (rc) { + pr_aud_err("Out enable failed\n"); + rc = -EFAULT; + break; + } + audio_prevent_sleep(pcm); + atomic_set(&pcm->out_enabled, 1); + + rc = q6asm_set_volume(pcm->ac, pcm->volume); + if (rc < 0) + pr_aud_err("%s: Send Volume command failed rc=%d\n", + __func__, rc); + rc = q6asm_set_lrgain(pcm->ac, 0x2000, 0x2000); + if (rc < 0) + pr_aud_err("%s: Send channel gain failed rc=%d\n", + __func__, rc); + /* disable mute by default */ + rc = q6asm_set_mute(pcm->ac, 0); + if (rc < 0) + pr_aud_err("%s: Send mute command failed rc=%d\n", + __func__, rc); + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *) arg, &pcm->ac->session, + sizeof(unsigned short))) + rc = -EFAULT; + break; + } + case AUDIO_STOP: + break; + case AUDIO_FLUSH: + break; + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + pr_aud_info("AUDIO_SET_CONFIG\n"); + if (copy_from_user(&config, (void *) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (config.channel_count < 1 || config.channel_count > 2) { + rc = -EINVAL; + break; + } + if (config.sample_rate < 8000 || config.sample_rate > 48000) { + rc = -EINVAL; + break; + } + if (config.buffer_size < 128) { + rc = -EINVAL; + break; + } + pcm->sample_rate = config.sample_rate; + pcm->channel_count = config.channel_count; + pcm->buffer_size = config.buffer_size; + pcm->buffer_count = config.buffer_count; + pr_debug("%s:buffer_size:%d buffer_count:%d sample_rate:%d \ + channel_count:%d\n", __func__, pcm->buffer_size, + pcm->buffer_count, pcm->sample_rate, + pcm->channel_count); + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + pr_aud_info("AUDIO_GET_CONFIG\n"); + config.buffer_size = pcm->buffer_size; + config.buffer_count = pcm->buffer_count; + config.sample_rate = pcm->sample_rate; + config.channel_count = pcm->channel_count; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *) arg, &config, sizeof(config))) + rc = -EFAULT; + break; + } + case AUDIO_SET_EQ: { + struct msm_audio_eq_stream_config eq_config; + if (copy_from_user(&eq_config, (void *) arg, + sizeof(eq_config))) { + rc = -EFAULT; + break; + } + rc = q6asm_equalizer(pcm->ac, (void *) &eq_config); + if (rc < 0) + pr_aud_err("%s: EQUALIZER FAILED\n", __func__); + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&pcm->lock); + return rc; +} + +static int pcm_out_open(struct inode *inode, struct file *file) +{ + struct pcm *pcm; + int rc = 0; + + struct timespec ts; + struct rtc_time tm; + + pr_aud_info("[%s:%s] open\n", __MM_FILE__, __func__); + pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL); + if (!pcm) { + pr_aud_info("%s: Failed to allocated memory\n", __func__); + return -ENOMEM; + } + + pcm->channel_count = 2; + pcm->sample_rate = 44100; + pcm->buffer_size = BUFSZ; + pcm->buffer_count = MAX_BUF; + pcm->stream_event = AUDDEV_EVT_STREAM_VOL_CHG; + pcm->volume = 0x2000; + + pcm->ac = q6asm_audio_client_alloc((app_cb)pcm_out_cb, (void *)pcm); + if (!pcm->ac) { + pr_aud_err("%s: Could not allocate memory\n", __func__); + rc = -ENOMEM; + goto fail; + } + + rc = q6asm_open_write(pcm->ac, FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s: pcm out open failed for session %d\n", __func__, + pcm->ac->session); + rc = -EINVAL; + goto fail; + } + + mutex_init(&pcm->lock); + mutex_init(&pcm->write_lock); + init_waitqueue_head(&pcm->write_wait); + spin_lock_init(&pcm->dsp_lock); + atomic_set(&pcm->out_enabled, 0); + atomic_set(&pcm->out_stopped, 0); + atomic_set(&pcm->out_count, pcm->buffer_count); + atomic_set(&pcm->out_prefill, 0); + atomic_set(&pcm->out_opened, 1); + wake_lock_init(&pcm->wakelock, WAKE_LOCK_SUSPEND, "audio_pcm"); + + rc = auddev_register_evt_listner(pcm->stream_event, + AUDDEV_CLNT_DEC, + pcm->ac->session, + pcm_event_listner, + (void *)pcm); + if (rc < 0) { + pr_aud_err("%s: failed to register listner\n", __func__); + goto fail; + } + + file->private_data = pcm; + + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_aud_info1("[ATS][play_music][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + pr_debug("[%s:%s] open session id[%d]\n", __MM_FILE__, + __func__, pcm->ac->session); + return 0; +fail: + if (pcm->ac) + q6asm_audio_client_free(pcm->ac); + kfree(pcm); + return rc; +} + +static ssize_t pcm_out_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct pcm *pcm = file->private_data; + const char __user *start = buf; + int xfer; + char *bufptr; + uint32_t idx; + void *data; + int rc = 0; + uint32_t size; + + if (!pcm->ac) + return -ENODEV; + + if (!atomic_read(&pcm->out_enabled)) { + rc = config(pcm); + if (rc < 0) + return rc; + } + + mutex_lock(&pcm->write_lock); + while (count > 0) { + rc = wait_event_timeout(pcm->write_wait, + (atomic_read(&pcm->out_count) || + atomic_read(&pcm->out_stopped)), 5 * HZ); + if (!rc) { + pr_aud_info("%s: wait_event_timeout failed for session %d\n", + __func__, pcm->ac->session); + goto fail; + } + + if (atomic_read(&pcm->out_stopped) && + !atomic_read(&pcm->out_count)) { + pr_aud_info("%s: pcm stopped out_count 0\n", __func__); + mutex_unlock(&pcm->write_lock); + return 0; + } + + data = q6asm_is_cpu_buf_avail(IN, pcm->ac, &size, &idx); + bufptr = data; + if (bufptr) { + xfer = count; + if (xfer > BUFSZ) + xfer = BUFSZ; + + if (copy_from_user(bufptr, buf, xfer)) { + rc = -EFAULT; + goto fail; + } + buf += xfer; + count -= xfer; + rc = q6asm_write(pcm->ac, xfer, 0, 0, NO_TIMESTAMP); + wmb(); + if (rc < 0) { + rc = -EFAULT; + goto fail; + } + } + atomic_dec(&pcm->out_count); + } + + rc = buf - start; +fail: + mutex_unlock(&pcm->write_lock); + return rc; +} + +static int pcm_out_release(struct inode *inode, struct file *file) +{ + struct pcm *pcm = file->private_data; + struct timespec ts; + struct rtc_time tm; + + if (pcm == NULL) { + pr_aud_err("%s: Nothing need to be released.\n", __func__); + return 0; + } + if (pcm->ac) { + pr_aud_info("[%s:%s] release session id[%d]\n", __MM_FILE__, + __func__, pcm->ac->session); + pcm_out_disable(pcm); + msm_clear_session_id(pcm->ac->session); + auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, pcm->ac->session); + q6asm_audio_client_free(pcm->ac); + } + audio_allow_sleep(pcm); + wake_lock_destroy(&pcm->wakelock); + mutex_destroy(&pcm->lock); + mutex_destroy(&pcm->write_lock); + + kfree(pcm); + pr_aud_info("[%s:%s] release\n", __MM_FILE__, __func__); + + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_aud_info1("[ATS][stop_music][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + return 0; +} + +static const struct file_operations pcm_out_fops = { + .owner = THIS_MODULE, + .open = pcm_out_open, + .write = pcm_out_write, + .release = pcm_out_release, + .unlocked_ioctl = pcm_out_ioctl, +}; + +struct miscdevice pcm_out_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_out", + .fops = &pcm_out_fops, +}; + +static int __init pcm_out_init(void) +{ + return misc_register(&pcm_out_misc); +} + +device_initcall(pcm_out_init); diff --git a/arch/arm/mach-msm/qdsp6v3/q6adm.c b/arch/arm/mach-msm/qdsp6v3/q6adm.c new file mode 100644 index 00000000..b2e01518 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6adm.c @@ -0,0 +1,651 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" +#include "rtac.h" + +#define TIMEOUT_MS 1000 +#define AUDIO_RX 0x0 +#define AUDIO_TX 0x1 +#define RESET_COPP_ID 99 +#define INVALID_COPP_ID 0xFF + +struct adm_ctl { + void *apr; + atomic_t copp_id[AFE_MAX_PORTS]; + atomic_t copp_cnt[AFE_MAX_PORTS]; + atomic_t copp_stat[AFE_MAX_PORTS]; + wait_queue_head_t wait; +}; + +static struct adm_ctl this_adm; + +static int32_t adm_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *payload; + int i, index; + payload = data->payload; + + if (data->opcode == RESET_EVENTS) { + pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n", + data->reset_event, data->reset_proc, + this_adm.apr); + if (this_adm.apr) { + apr_reset(this_adm.apr); + for (i = 0; i < AFE_MAX_PORTS; i++) { + atomic_set(&this_adm.copp_id[i], RESET_COPP_ID); + atomic_set(&this_adm.copp_cnt[i], 0); + atomic_set(&this_adm.copp_stat[i], 0); + } + this_adm.apr = NULL; + } + return 0; + } + + pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__, + data->opcode, payload[0], payload[1], + data->payload_size); + + if (data->payload_size) { + index = afe_get_port_index(data->token); + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + return -EINVAL; + } + + pr_debug("%s: Port ID %d, index %d\n", __func__, + data->token, index); + + if (data->opcode == APR_BASIC_RSP_RESULT) { + pr_debug("APR_BASIC_RSP_RESULT\n"); + switch (payload[0]) { + case ADM_CMD_SET_PARAMS: +#ifdef CONFIG_MSM8X60_RTAC + if (rtac_make_adm_callback(payload, + data->payload_size)) + break; +#endif + case ADM_CMD_COPP_CLOSE: + case ADM_CMD_MEMORY_MAP: + case ADM_CMD_MEMORY_UNMAP: + case ADM_CMD_MEMORY_MAP_REGIONS: + case ADM_CMD_MEMORY_UNMAP_REGIONS: + case ADM_CMD_MATRIX_MAP_ROUTINGS: + pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n"); + atomic_set(&this_adm.copp_stat[index], 1); + wake_up(&this_adm.wait); + break; + default: + pr_aud_err("%s: Unknown Cmd: 0x%x\n", __func__, + payload[0]); + break; + } + return 0; + } + + switch (data->opcode) { + case ADM_CMDRSP_COPP_OPEN: { + struct adm_copp_open_respond *open = data->payload; + if (open->copp_id == INVALID_COPP_ID) { + pr_aud_err("%s: invalid coppid rxed %d\n", + __func__, open->copp_id); + atomic_set(&this_adm.copp_stat[index], 1); + wake_up(&this_adm.wait); + break; + } + atomic_set(&this_adm.copp_id[index], open->copp_id); + atomic_set(&this_adm.copp_stat[index], 1); + pr_debug("%s: coppid rxed=%d\n", __func__, + open->copp_id); + wake_up(&this_adm.wait); + } + break; +#ifdef CONFIG_MSM8X60_RTAC + case ADM_CMDRSP_GET_PARAMS: + pr_debug("ADM_CMDRSP_GET_PARAMS\n"); + rtac_make_adm_callback(payload, + data->payload_size); + break; +#endif + default: + pr_aud_err("%s: Unknown cmd:0x%x\n", __func__, + data->opcode); + break; + } + } + return 0; +} + +void send_cal(int port_id, struct acdb_cal_block *aud_cal) +{ + s32 result; + struct adm_set_params_command adm_params; + int index = afe_get_port_index(port_id); + + pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index); + + if (!aud_cal || aud_cal->cal_size == 0) { + pr_aud_info("%s: No calibration data to send!\n", __func__); + goto done; + } + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + goto done; + } + + adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(20), APR_PKT_VER); + adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(adm_params)); + adm_params.hdr.src_svc = APR_SVC_ADM; + adm_params.hdr.src_domain = APR_DOMAIN_APPS; + adm_params.hdr.src_port = port_id; + adm_params.hdr.dest_svc = APR_SVC_ADM; + adm_params.hdr.dest_domain = APR_DOMAIN_ADSP; + adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]); + adm_params.hdr.token = port_id; + adm_params.hdr.opcode = ADM_CMD_SET_PARAMS; + adm_params.payload = aud_cal->cal_paddr; + adm_params.payload_size = aud_cal->cal_size; + + atomic_set(&this_adm.copp_stat[index], 0); + pr_aud_info("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n", + __func__, adm_params.payload, adm_params.payload_size); + result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params); + if (result < 0) { + pr_aud_err("%s: Set params failed port = %d payload = 0x%x\n", + __func__, port_id, aud_cal->cal_paddr); + goto done; + } + /* Wait for the callback */ + result = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!result) + pr_aud_err("%s: Set params timed out port = %d, payload = 0x%x\n", + __func__, port_id, aud_cal->cal_paddr); +done: + return; +} + +void send_adm_cal(int port_id, int path) +{ + s32 acdb_path; + struct acdb_cal_block aud_cal; + + pr_aud_info("%s\n", __func__); + + /* Maps audio_dev_ctrl path definition to ACDB definition */ + acdb_path = path - 1; + if ((acdb_path >= NUM_AUDPROC_BUFFERS) || + (acdb_path < 0)) { + pr_aud_err("%s: Path is not RX or TX, path = %d\n", + __func__, path); + goto done; + } + + pr_aud_info("%s: Sending audproc cal, acdb_path %d\n", + __func__, acdb_path); + get_audproc_cal(acdb_path, &aud_cal); + send_cal(port_id, &aud_cal); + + pr_aud_info("%s: Sending audvol cal, acdb_path %d\n", + __func__, acdb_path); + get_audvol_cal(acdb_path, &aud_cal); + send_cal(port_id, &aud_cal); +done: + return; +} + +int adm_open(int port_id, int path, int rate, int channel_mode, int topology) +{ + struct adm_copp_open_command open; + int ret = 0; + int index; + + pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__, + port_id, path, rate, channel_mode); + pr_aud_info("Topology = %x\n", topology); + + if (afe_validate_port(port_id) < 0) { + pr_aud_err("%s port idi[%d] is invalid\n", __func__, port_id); + return -ENODEV; + } + + index = afe_get_port_index(port_id); + pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); + + if (this_adm.apr == NULL) { + this_adm.apr = apr_register("ADSP", "ADM", adm_callback, + 0xFFFFFFFF, &this_adm); + if (this_adm.apr == NULL) { + pr_aud_err("%s: Unable to register ADM\n", __func__); + ret = -ENODEV; + return ret; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_adm_handle(this_adm.apr); +#endif + } + + + /* Create a COPP if port id are not enabled */ + if (atomic_read(&this_adm.copp_cnt[index]) == 0) { + + open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + open.hdr.pkt_size = sizeof(open); + open.hdr.src_svc = APR_SVC_ADM; + open.hdr.src_domain = APR_DOMAIN_APPS; + open.hdr.src_port = port_id; + open.hdr.dest_svc = APR_SVC_ADM; + open.hdr.dest_domain = APR_DOMAIN_ADSP; + open.hdr.dest_port = port_id; + open.hdr.token = port_id; + open.hdr.opcode = ADM_CMD_COPP_OPEN; + + open.mode = path; + open.endpoint_id1 = port_id; + open.endpoint_id2 = 0xFFFF; + open.topology_id = topology; + + open.channel_config = channel_mode & 0x00FF; + open.rate = rate; + + pr_debug("%s: channel_config=%d port_id=%d rate=%d\ + topology_id=0x%X\n", __func__, open.channel_config,\ + open.endpoint_id1, open.rate,\ + open.topology_id); + + atomic_set(&this_adm.copp_stat[index], 0); + + ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open); + if (ret < 0) { + pr_aud_err("%s:ADM enable for port %d failed\n", + __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + /* Wait for the callback with copp id */ + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s ADM open failed for port %d\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + } + atomic_inc(&this_adm.copp_cnt[index]); + return 0; + +fail_cmd: + + return ret; +} + +int adm_matrix_map(int session_id, int path, int num_copps, + unsigned int *port_id, int copp_id) +{ + struct adm_routings_command route; + int ret = 0, i = 0; + /* Assumes port_ids have already been validated during adm_open */ + int index = afe_get_port_index(copp_id); + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + return -EINVAL; + } + + pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n", + __func__, session_id, path, num_copps, port_id[0]); + + route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + route.hdr.pkt_size = sizeof(route); + route.hdr.src_svc = 0; + route.hdr.src_domain = APR_DOMAIN_APPS; + route.hdr.src_port = copp_id; + route.hdr.dest_svc = APR_SVC_ADM; + route.hdr.dest_domain = APR_DOMAIN_ADSP; + route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]); + route.hdr.token = copp_id; + route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS; + route.num_sessions = 1; + route.session[0].id = session_id; + route.session[0].num_copps = num_copps; + + for (i = 0; i < num_copps; i++) { + int tmp; + tmp = afe_get_port_index(port_id[i]); + + pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i, + port_id[i], tmp); + if (tmp < 0 || tmp >= AFE_MAX_PORTS) { + pr_aud_err("afe_get_port_index return invalid %d\n", tmp); + ret = -EINVAL; + goto fail_cmd; + } + route.session[0].copp_id[i] = + atomic_read(&this_adm.copp_id[tmp]); + } + if (num_copps % 2) + route.session[0].copp_id[i] = 0; + + switch (path) { + case 0x1: + route.path = AUDIO_RX; + break; + case 0x2: + case 0x3: + route.path = AUDIO_TX; + break; + default: + pr_aud_err("%s: Wrong path set[%d]\n", __func__, path); + break; + } + atomic_set(&this_adm.copp_stat[index], 0); + + ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route); + if (ret < 0) { + pr_aud_err("%s: ADM routing for port %d failed\n", + __func__, port_id[0]); + ret = -EINVAL; + goto fail_cmd; + } + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: ADM cmd Route failed for port %d\n", + __func__, port_id[0]); + ret = -EINVAL; + goto fail_cmd; + } + + for (i = 0; i < num_copps; i++) + send_adm_cal(port_id[i], path); + +#ifdef CONFIG_MSM8X60_RTAC + for (i = 0; i < num_copps; i++) + rtac_add_adm_device(port_id[i], session_id); +#endif + return 0; + +fail_cmd: + + return ret; +} + +int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id, + uint32_t *bufsz, uint32_t bufcnt) +{ + struct adm_cmd_memory_map_regions *mmap_regions = NULL; + struct adm_memory_map_regions *mregions = NULL; + void *mmap_region_cmd = NULL; + void *payload = NULL; + int ret = 0; + int i = 0; + int cmd_size = 0; + + pr_aud_info("%s\n", __func__); + if (this_adm.apr == NULL) { + this_adm.apr = apr_register("ADSP", "ADM", adm_callback, + 0xFFFFFFFF, &this_adm); + if (this_adm.apr == NULL) { + pr_aud_err("%s: Unable to register ADM\n", __func__); + ret = -ENODEV; + return ret; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_adm_handle(this_adm.apr); +#endif + } + + cmd_size = sizeof(struct adm_cmd_memory_map_regions) + + sizeof(struct adm_memory_map_regions) * bufcnt; + + mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (!mmap_region_cmd) { + pr_aud_err("%s: allocate mmap_region_cmd failed\n", __func__); + return -ENOMEM; + } + mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd; + mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + mmap_regions->hdr.pkt_size = cmd_size; + mmap_regions->hdr.src_port = 0; + mmap_regions->hdr.dest_port = 0; + mmap_regions->hdr.token = 0; + mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS; + mmap_regions->mempool_id = mempool_id & 0x00ff; + mmap_regions->nregions = bufcnt & 0x00ff; + pr_debug("%s: map_regions->nregions = %d\n", __func__, + mmap_regions->nregions); + payload = ((u8 *) mmap_region_cmd + + sizeof(struct adm_cmd_memory_map_regions)); + mregions = (struct adm_memory_map_regions *)payload; + + for (i = 0; i < bufcnt; i++) { + mregions->phys = buf_add[i]; + mregions->buf_size = bufsz[i]; + ++mregions; + } + + atomic_set(&this_adm.copp_stat[0], 0); + ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd); + if (ret < 0) { + pr_aud_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, + mmap_regions->hdr.opcode, ret); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[0]), 5 * HZ); + if (!ret) { + pr_aud_err("%s: timeout. waited for memory_map\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } +fail_cmd: + kfree(mmap_region_cmd); + return ret; +} + +int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz, + uint32_t bufcnt) +{ + struct adm_cmd_memory_unmap_regions *unmap_regions = NULL; + struct adm_memory_unmap_regions *mregions = NULL; + void *unmap_region_cmd = NULL; + void *payload = NULL; + int ret = 0; + int i = 0; + int cmd_size = 0; + + pr_aud_info("%s\n", __func__); + + if (this_adm.apr == NULL) { + pr_aud_err("%s APR handle NULL\n", __func__); + return -EINVAL; + } + + cmd_size = sizeof(struct adm_cmd_memory_unmap_regions) + + sizeof(struct adm_memory_unmap_regions) * bufcnt; + + unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (!unmap_region_cmd) { + pr_aud_err("%s: allocate unmap_region_cmd failed\n", __func__); + return -ENOMEM; + } + unmap_regions = (struct adm_cmd_memory_unmap_regions *) + unmap_region_cmd; + unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + unmap_regions->hdr.pkt_size = cmd_size; + unmap_regions->hdr.src_port = 0; + unmap_regions->hdr.dest_port = 0; + unmap_regions->hdr.token = 0; + unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS; + unmap_regions->nregions = bufcnt & 0x00ff; + unmap_regions->reserved = 0; + pr_debug("%s: unmap_regions->nregions = %d\n", __func__, + unmap_regions->nregions); + payload = ((u8 *) unmap_region_cmd + + sizeof(struct adm_cmd_memory_unmap_regions)); + mregions = (struct adm_memory_unmap_regions *)payload; + + for (i = 0; i < bufcnt; i++) { + mregions->phys = buf_add[i]; + ++mregions; + } + atomic_set(&this_adm.copp_stat[0], 0); + ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd); + if (ret < 0) { + pr_aud_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, + unmap_regions->hdr.opcode, ret); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[0]), 5 * HZ); + if (!ret) { + pr_aud_err("%s: timeout. waited for memory_unmap\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } +fail_cmd: + kfree(unmap_region_cmd); + return ret; +} + +#ifdef CONFIG_MSM8X60_RTAC +int adm_get_copp_id(int port_id) +{ + pr_debug("%s\n", __func__); + + if (port_id < 0) { + pr_aud_err("%s: invalid port_id = %d\n", __func__, port_id); + return -EINVAL; + } + + return atomic_read(&this_adm.copp_id[port_id]); +} +#endif + +int adm_close(int port_id) +{ + struct apr_hdr close; + + int ret = 0; + int index = afe_get_port_index(port_id); + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + return -EINVAL; + } + + pr_aud_info("%s port_id=%d index %d\n", __func__, port_id, index); + + if (!(atomic_read(&this_adm.copp_cnt[index]))) { + pr_aud_err("%s: copp count for port[%d]is 0\n", __func__, port_id); + + goto fail_cmd; + } + atomic_dec(&this_adm.copp_cnt[index]); + if (!(atomic_read(&this_adm.copp_cnt[index]))) { + + close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + close.pkt_size = sizeof(close); + close.src_svc = APR_SVC_ADM; + close.src_domain = APR_DOMAIN_APPS; + close.src_port = port_id; + close.dest_svc = APR_SVC_ADM; + close.dest_domain = APR_DOMAIN_ADSP; + close.dest_port = atomic_read(&this_adm.copp_id[index]); + close.token = port_id; + close.opcode = ADM_CMD_COPP_CLOSE; + + atomic_set(&this_adm.copp_id[index], RESET_COPP_ID); + atomic_set(&this_adm.copp_stat[index], 0); + + + pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n", + __func__, + atomic_read(&this_adm.copp_id[index]), + port_id, index, + atomic_read(&this_adm.copp_cnt[index])); + + ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close); + if (ret < 0) { + pr_aud_err("%s ADM close failed\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_info("%s: ADM cmd Route failed for port %d\n", + __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + +#ifdef CONFIG_MSM8X60_RTAC + rtac_remove_adm_device(port_id); +#endif + } + +fail_cmd: + return ret; +} + +static int __init adm_init(void) +{ + int i = 0; + pr_aud_info("%s\n", __func__); + init_waitqueue_head(&this_adm.wait); + this_adm.apr = NULL; + + for (i = 0; i < AFE_MAX_PORTS; i++) { + atomic_set(&this_adm.copp_id[i], RESET_COPP_ID); + atomic_set(&this_adm.copp_cnt[i], 0); + atomic_set(&this_adm.copp_stat[i], 0); + } + return 0; +} + +device_initcall(adm_init); diff --git a/arch/arm/mach-msm/qdsp6v3/q6adm.h b/arch/arm/mach-msm/qdsp6v3/q6adm.h new file mode 100644 index 00000000..6d01738b --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6adm.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __Q6_ADM_H__ +#define __Q6_ADM_H__ +#include + +/* multiple copp per stream. */ +struct route_payload { + unsigned int copp_ids[AFE_MAX_PORTS]; + unsigned short num_copps; + unsigned int session_id; +}; + +int adm_open(int port, int path, int rate, int mode, int topology); + +int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id, + uint32_t *bufsz, uint32_t bufcnt); + +int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz, + uint32_t bufcnt); + +int adm_close(int port); + +int adm_matrix_map(int session_id, int path, int num_copps, + unsigned int *port_id, int copp_id); + +#ifdef CONFIG_MSM8X60_RTAC +int adm_get_copp_id(int port_id); +#endif + +#endif /* __Q6_ADM_H__ */ diff --git a/arch/arm/mach-msm/qdsp6v3/q6afe.c b/arch/arm/mach-msm/qdsp6v3/q6afe.c new file mode 100644 index 00000000..a51f6ab8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6afe.c @@ -0,0 +1,687 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct afe_ctl { + void *apr; + atomic_t state; + wait_queue_head_t wait; + struct task_struct *task; +}; + +static struct afe_ctl this_afe; + +#define TIMEOUT_MS 1000 +#define Q6AFE_MAX_VOLUME 0x3FFF + + +static int32_t afe_callback(struct apr_client_data *data, void *priv) +{ + if (data->opcode == RESET_EVENTS) { + pr_debug("q6afe: reset event = %d %d apr[%p]\n", + data->reset_event, data->reset_proc, this_afe.apr); + if (this_afe.apr) { + apr_reset(this_afe.apr); + atomic_set(&this_afe.state, 0); + this_afe.apr = NULL; + } + /* send info to user */ + pr_debug("task_name = %s pid = %d\n", + this_afe.task->comm, this_afe.task->pid); + send_sig(SIGUSR1, this_afe.task, 0); + } + if (data->payload_size) { + uint32_t *payload; + payload = data->payload; + pr_debug("%s: cmd = 0x%x status = 0x%x\n", __func__, + payload[0], payload[1]); + if (data->opcode == APR_BASIC_RSP_RESULT) { + switch (payload[0]) { + case AFE_PORT_AUDIO_IF_CONFIG: + case AFE_PORT_CMD_STOP: + case AFE_PORT_CMD_START: + case AFE_PORT_CMD_LOOPBACK: + case AFE_PORT_CMD_SIDETONE_CTL: + case AFE_PORT_CMD_SET_PARAM: + case AFE_PSEUDOPORT_CMD_START: + case AFE_PSEUDOPORT_CMD_STOP: + atomic_set(&this_afe.state, 0); + wake_up(&this_afe.wait); + break; + default: + pr_aud_err("Unknown cmd 0x%x\n", + payload[0]); + break; + } + } + } + return 0; +} + +int afe_validate_port(u16 port_id) +{ + int ret; + + switch (port_id) { + case PRIMARY_I2S_RX: + case PRIMARY_I2S_TX: + case PCM_RX: + case PCM_TX: + case SECONDARY_I2S_RX: + case SECONDARY_I2S_TX: + case MI2S_RX: + case MI2S_TX: + case HDMI_RX: + case RSVD_2: + case RSVD_3: + case DIGI_MIC_TX: + case VOICE_RECORD_RX: + case VOICE_RECORD_TX: + case VOICE_PLAYBACK_TX: + { + ret = 0; + break; + } + + default: + ret = -EINVAL; + } + + return ret; +} + +int afe_get_port_index(u16 port_id) +{ + switch (port_id) { + case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX; + case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX; + case PCM_RX: return IDX_PCM_RX; + case PCM_TX: return IDX_PCM_TX; + case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX; + case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX; + case MI2S_RX: return IDX_MI2S_RX; + case MI2S_TX: return IDX_MI2S_TX; + case HDMI_RX: return IDX_HDMI_RX; + case RSVD_2: return IDX_RSVD_2; + case RSVD_3: return IDX_RSVD_3; + case DIGI_MIC_TX: return IDX_DIGI_MIC_TX; + case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX; + case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX; + case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX; + default: return -EINVAL; + } +} + +int afe_open(u16 port_id, union afe_port_config *afe_config, int rate) +{ + struct afe_port_start_command start; + struct afe_audioif_config_command config; + int ret = 0; + + if (!afe_config) { + pr_aud_err("%s: Error, no configuration data\n", __func__); + ret = -EINVAL; + return ret; + } + + pr_aud_info("%s: %d %d\n", __func__, port_id, rate); + + if (this_afe.apr == NULL) { + this_afe.apr = apr_register("ADSP", "AFE", afe_callback, + 0xFFFFFFFF, &this_afe); + pr_aud_info("%s: Register AFE\n", __func__); + if (this_afe.apr == NULL) { + pr_aud_err("%s: Unable to register AFE\n", __func__); + ret = -ENODEV; + return ret; + } + } + + config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + config.hdr.pkt_size = sizeof(config); + config.hdr.src_port = 0; + config.hdr.dest_port = 0; + config.hdr.token = 0; + config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; + + if (afe_validate_port(port_id) < 0) { + + pr_aud_err("%s: Failed : Invalid Port id = %d\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + + config.port_id = port_id; + config.port = *afe_config; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); + if (ret < 0) { + pr_aud_err("%s: AFE enable for port %d failed\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + + start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + start.hdr.pkt_size = sizeof(start); + start.hdr.src_port = 0; + start.hdr.dest_port = 0; + start.hdr.token = 0; + start.hdr.opcode = AFE_PORT_CMD_START; + start.port_id = port_id; + start.gain = 0x2000; + start.sample_rate = rate; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); + if (ret < 0) { + pr_aud_err("%s: AFE enable for port %d failed\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + + if (this_afe.task != current) + this_afe.task = current; + + pr_debug("task_name = %s pid = %d\n", + this_afe.task->comm, this_afe.task->pid); + return 0; +fail_cmd: + return ret; +} + +int afe_loopback(u16 enable, u16 rx_port, u16 tx_port) +{ + struct afe_loopback_command lb_cmd; + int ret = 0; + if (this_afe.apr == NULL) { + pr_aud_err("%s:AFE is not opened\n", __func__); + ret = -1; + goto done; + } + lb_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(20), APR_PKT_VER); + lb_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(lb_cmd) - APR_HDR_SIZE); + lb_cmd.hdr.src_port = 0; + lb_cmd.hdr.dest_port = 0; + lb_cmd.hdr.token = 0; + lb_cmd.hdr.opcode = AFE_PORT_CMD_LOOPBACK; + lb_cmd.tx_port_id = tx_port; + lb_cmd.rx_port_id = rx_port; + lb_cmd.mode = 0xFFFF; + lb_cmd.enable = (enable ? 1 : 0); + atomic_set(&this_afe.state, 1); + + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &lb_cmd); + if (ret < 0) { + pr_aud_err("AFE loopback failed\n"); + ret = -EINVAL; + goto done; + } + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + } +done: + return ret; +} + + +int afe_loopback_gain(u16 port_id, u16 volume) +{ + struct afe_port_cmd_set_param set_param; + int ret = 0; + + if (this_afe.apr == NULL) { + pr_aud_err("%s: AFE is not opened\n", __func__); + ret = -EPERM; + goto fail_cmd; + } + + if (afe_validate_port(port_id) < 0) { + + pr_aud_err("%s: Failed : Invalid Port id = %d\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + + /* RX ports numbers are even .TX ports numbers are odd. */ + if (port_id % 2 == 0) { + pr_aud_err("%s: Failed : afe loopback gain only for TX ports." + " port_id %d\n", __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + + pr_debug("%s: %d %hX\n", __func__, port_id, volume); + + set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + set_param.hdr.pkt_size = sizeof(set_param); + set_param.hdr.src_port = 0; + set_param.hdr.dest_port = 0; + set_param.hdr.token = 0; + set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM; + + set_param.port_id = port_id; + set_param.payload_size = sizeof(struct afe_param_payload); + set_param.payload_address = 0; + + set_param.payload.module_id = AFE_MODULE_ID_PORT_INFO; + set_param.payload.param_id = AFE_PARAM_ID_LOOPBACK_GAIN; + set_param.payload.param_size = sizeof(struct afe_param_loopback_gain); + set_param.payload.reserved = 0; + + set_param.payload.param.loopback_gain.gain = volume; + set_param.payload.param.loopback_gain.reserved = 0; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_param); + if (ret < 0) { + pr_aud_err("%s: AFE param set failed for port %d\n", + __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (ret < 0) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + return 0; +fail_cmd: + return ret; +} + +int afe_start_pseudo_port(u16 port_id) +{ + int ret = 0; + struct afe_pseudoport_start_command start; + + pr_aud_info("%s: port_id=%d\n", __func__, port_id); + + if (this_afe.apr == NULL) { + this_afe.apr = apr_register("ADSP", "AFE", afe_callback, + 0xFFFFFFFF, &this_afe); + pr_aud_info("%s: Register AFE\n", __func__); + if (this_afe.apr == NULL) { + pr_aud_err("%s: Unable to register AFE\n", __func__); + ret = -ENODEV; + return ret; + } + } + + start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + start.hdr.pkt_size = sizeof(start); + start.hdr.src_port = 0; + start.hdr.dest_port = 0; + start.hdr.token = 0; + start.hdr.opcode = AFE_PSEUDOPORT_CMD_START; + start.port_id = port_id; + start.timing = 1; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); + if (ret < 0) { + pr_aud_err("%s: AFE enable for port %d failed %d\n", + __func__, port_id, ret); + ret = -EINVAL; + return ret; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + return ret; + } + + return 0; +} + +int afe_stop_pseudo_port(u16 port_id) +{ + int ret = 0; + struct afe_pseudoport_stop_command stop; + + pr_aud_info("%s: port_id=%d\n", __func__, port_id); + + if (this_afe.apr == NULL) { + pr_aud_err("%s: AFE is already closed\n", __func__); + ret = -EINVAL; + return ret; + } + + stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + stop.hdr.pkt_size = sizeof(stop); + stop.hdr.src_port = 0; + stop.hdr.dest_port = 0; + stop.hdr.token = 0; + stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP; + stop.port_id = port_id; + stop.reserved = 0; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop); + if (ret < 0) { + pr_aud_err("%s: AFE close failed %d\n", __func__, ret); + ret = -EINVAL; + return ret; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + return ret; + } + + return 0; +} + + +#ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_afelb; +static struct dentry *debugfs_afelb_gain; + +static int afe_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + pr_aud_info("debug intf %s\n", (char *) file->private_data); + return 0; +} + +static int afe_get_parameters(char *buf, long int *param1, int num_of_par) +{ + char *token; + int base, cnt; + + token = strsep(&buf, " "); + + for (cnt = 0; cnt < num_of_par; cnt++) { + if (token != NULL) { + if ((token[1] == 'x') || (token[1] == 'X')) + base = 16; + else + base = 10; + + if (strict_strtoul(token, base, ¶m1[cnt]) != 0) + return -EINVAL; + + token = strsep(&buf, " "); + } else + return -EINVAL; + } + return 0; +} +#define AFE_LOOPBACK_ON (1) +#define AFE_LOOPBACK_OFF (0) +static ssize_t afe_debug_write(struct file *filp, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char *lb_str = filp->private_data; + char lbuf[32]; + int rc; + unsigned long param[5]; + + if (cnt > sizeof(lbuf) - 1) + return -EINVAL; + + rc = copy_from_user(lbuf, ubuf, cnt); + if (rc) + return -EFAULT; + + lbuf[cnt] = '\0'; + + if (!strcmp(lb_str, "afe_loopback")) { + rc = afe_get_parameters(lbuf, param, 3); + if (!rc) { + pr_aud_info("%s %lu %lu %lu\n", lb_str, param[0], param[1], + param[2]); + + if ((param[0] != AFE_LOOPBACK_ON) && (param[0] != + AFE_LOOPBACK_OFF)) { + pr_aud_err("%s: Error, parameter 0 incorrect\n", + __func__); + rc = -EINVAL; + goto afe_error; + } + if ((afe_validate_port(param[1]) < 0) || + (afe_validate_port(param[2])) < 0) { + pr_aud_err("%s: Error, invalid afe port\n", + __func__); + } + if (this_afe.apr == NULL) { + pr_aud_err("%s: Error, AFE not opened\n", __func__); + rc = -EINVAL; + } else { + rc = afe_loopback(param[0], param[1], param[2]); + } + } else { + pr_aud_err("%s: Error, invalid parameters\n", __func__); + rc = -EINVAL; + } + + } else if (!strcmp(lb_str, "afe_loopback_gain")) { + rc = afe_get_parameters(lbuf, param, 2); + if (!rc) { + pr_aud_info("%s %lu %lu\n", lb_str, param[0], param[1]); + + if (afe_validate_port(param[0]) < 0) { + pr_aud_err("%s: Error, invalid afe port\n", + __func__); + rc = -EINVAL; + goto afe_error; + } + + if (param[1] < 0 || param[1] > 100) { + pr_aud_err("%s: Error, volume shoud be 0 to 100" + " percentage param = %lu\n", + __func__, param[1]); + rc = -EINVAL; + goto afe_error; + } + + param[1] = (Q6AFE_MAX_VOLUME * param[1]) / 100; + + if (this_afe.apr == NULL) { + pr_aud_err("%s: Error, AFE not opened\n", __func__); + rc = -EINVAL; + } else { + rc = afe_loopback_gain(param[0], param[1]); + } + } else { + pr_aud_err("%s: Error, invalid parameters\n", __func__); + rc = -EINVAL; + } + } + +afe_error: + if (rc == 0) + rc = cnt; + else + pr_aud_err("%s: rc = %d\n", __func__, rc); + + return rc; +} + +static const struct file_operations afe_debug_fops = { + .open = afe_debug_open, + .write = afe_debug_write +}; +#endif +int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain) +{ + struct afe_port_sidetone_command cmd_sidetone; + int ret = 0; + + pr_aud_info("%s: tx_port_id:%d rx_port_id:%d enable:%d gain:%d\n", __func__, + tx_port_id, rx_port_id, enable, gain); + cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone); + cmd_sidetone.hdr.src_port = 0; + cmd_sidetone.hdr.dest_port = 0; + cmd_sidetone.hdr.token = 0; + cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SIDETONE_CTL; + cmd_sidetone.tx_port_id = tx_port_id; + cmd_sidetone.rx_port_id = rx_port_id; + cmd_sidetone.gain = gain; + cmd_sidetone.enable = enable; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &cmd_sidetone); + if (ret < 0) { + pr_aud_err("%s: AFE sidetone failed for tx_port:%d rx_port:%d\n", + __func__, tx_port_id, rx_port_id); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (ret < 0) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + return 0; +fail_cmd: + return ret; +} + +int afe_close(int port_id) +{ + struct afe_port_stop_command stop; + int ret = 0; + + if (this_afe.apr == NULL) { + pr_aud_err("AFE is already closed\n"); + ret = -EINVAL; + goto fail_cmd; + } + pr_debug("%s: port_id=%d\n", __func__, port_id); + stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + stop.hdr.pkt_size = sizeof(stop); + stop.hdr.src_port = 0; + stop.hdr.dest_port = 0; + stop.hdr.token = 0; + stop.hdr.opcode = AFE_PORT_CMD_STOP; + stop.port_id = port_id; + stop.reserved = 0; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop); + + if (ret < 0) { + pr_aud_err("AFE close failed\n"); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } +fail_cmd: + return ret; +} + +static int __init afe_init(void) +{ + pr_aud_info("%s:\n", __func__); + init_waitqueue_head(&this_afe.wait); + atomic_set(&this_afe.state, 0); + this_afe.apr = NULL; +#ifdef CONFIG_DEBUG_FS + debugfs_afelb = debugfs_create_file("afe_loopback", + 0644, NULL, (void *) "afe_loopback", + &afe_debug_fops); + + debugfs_afelb_gain = debugfs_create_file("afe_loopback_gain", + 0644, NULL, (void *) "afe_loopback_gain", + &afe_debug_fops); + + +#endif + return 0; +} + +static void __exit afe_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + if (debugfs_afelb) + debugfs_remove(debugfs_afelb); + if (debugfs_afelb_gain) + debugfs_remove(debugfs_afelb_gain); +#endif +} + +device_initcall(afe_init); +__exitcall(afe_exit); diff --git a/arch/arm/mach-msm/qdsp6v3/q6asm.c b/arch/arm/mach-msm/qdsp6v3/q6asm.c new file mode 100644 index 00000000..33004bfc --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6asm.c @@ -0,0 +1,2548 @@ + +/* + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rtac.h" + +#define TRUE 0x01 +#define FALSE 0x00 +#define READDONE_IDX_STATUS 0 +#define READDONE_IDX_BUFFER 1 +#define READDONE_IDX_SIZE 2 +#define READDONE_IDX_OFFSET 3 +#define READDONE_IDX_MSW_TS 4 +#define READDONE_IDX_LSW_TS 5 +#define READDONE_IDX_FLAGS 6 +#define READDONE_IDX_NUMFRAMES 7 +#define READDONE_IDX_ID 8 + +static DEFINE_MUTEX(session_lock); + +/* session id: 0 reserved */ +static struct audio_client *session[SESSION_MAX+1]; +static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv); +static int32_t q6asm_callback(struct apr_client_data *data, void *priv); +static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg); +static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg); +static int q6asm_memory_map_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt); +static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt); + +static void q6asm_reset_buf_state(struct audio_client *ac); + +struct asm_mmap { + atomic_t ref_cnt; + atomic_t cmd_state; + wait_queue_head_t cmd_wait; + void *apr; +}; + +static struct asm_mmap this_mmap; + +static int q6asm_session_alloc(struct audio_client *ac) +{ + int n; + mutex_lock(&session_lock); + for (n = 1; n <= SESSION_MAX; n++) { + if (!session[n]) { + session[n] = ac; + mutex_unlock(&session_lock); + return n; + } + } + mutex_unlock(&session_lock); + return -ENOMEM; +} + +static void q6asm_session_free(struct audio_client *ac) +{ + pr_debug("%s: sessionid[%d]\n", __func__, ac->session); + mutex_lock(&session_lock); + session[ac->session] = 0; + mutex_unlock(&session_lock); + ac->session = 0; + return; +} + +int q6asm_audio_client_buf_free(unsigned int dir, + struct audio_client *ac) +{ + struct audio_port_data *port; + int cnt = 0; + int rc = 0; + pr_debug("%s: Session id %d\n", __func__, ac->session); + mutex_lock(&ac->cmd_lock); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + if (!port->buf) { + mutex_unlock(&ac->cmd_lock); + return 0; + } + cnt = port->max_buf_cnt - 1; + + if (cnt >= 0) { + rc = q6asm_memory_unmap_regions(ac, dir, + port->buf[0].size, + port->max_buf_cnt); + if (rc < 0) + pr_aud_err("%s CMD Memory_unmap_regions failed\n", + __func__); + } + + while (cnt >= 0) { + if (port->buf[cnt].data) { + pr_debug("data[%p]phys[%p][%p] cnt[%d]\n", + (void *)port->buf[cnt].data, + (void *)port->buf[cnt].phys, + (void *)&port->buf[cnt].phys, cnt); + dma_free_coherent(NULL, port->buf[cnt].size, + port->buf[cnt].data, + port->buf[cnt].phys); + port->buf[cnt].data = NULL; + port->buf[cnt].phys = 0; + --(port->max_buf_cnt); + } + --cnt; + } + kfree(port->buf); + port->buf = NULL; + } + mutex_unlock(&ac->cmd_lock); + return 0; +} + +int q6asm_audio_client_buf_free_contiguous(unsigned int dir, + struct audio_client *ac) +{ + struct audio_port_data *port; + int cnt = 0; + int rc = 0; + pr_debug("%s: Session id %d\n", __func__, ac->session); + mutex_lock(&ac->cmd_lock); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + if (!port->buf) { + mutex_unlock(&ac->cmd_lock); + return 0; + } + cnt = port->max_buf_cnt - 1; + + if (cnt >= 0) { + rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir); + if (rc < 0) + pr_aud_err("%s CMD Memory_unmap_regions failed\n", + __func__); + } + + if (port->buf[0].data) { + pr_debug("%s:data[%p]phys[%p][%p] cnt[%d]\n", + __func__, + (void *)port->buf[0].data, + (void *)port->buf[0].phys, + (void *)&port->buf[0].phys, cnt); + dma_free_coherent(NULL, + port->buf[0].size * port->max_buf_cnt, + port->buf[0].data, + port->buf[0].phys); + } + while (cnt >= 0) { + port->buf[cnt].data = NULL; + port->buf[cnt].phys = 0; + cnt--; + } + port->max_buf_cnt = 0; + kfree(port->buf); + port->buf = NULL; + } + mutex_unlock(&ac->cmd_lock); + return 0; +} + +void q6asm_audio_client_free(struct audio_client *ac) +{ + int loopcnt; + struct audio_port_data *port; + if (!ac || !ac->session) + return; + pr_debug("%s: Session id %d\n", __func__, ac->session); + if (ac->io_mode == SYNC_IO_MODE) { + for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { + port = &ac->port[loopcnt]; + if (!port->buf) + continue; + pr_debug("%s:loopcnt = %d\n", __func__, loopcnt); + q6asm_audio_client_buf_free(loopcnt, ac); + } + } + + apr_deregister(ac->apr); + q6asm_session_free(ac); + + pr_debug("%s: APR De-Register\n", __func__); + if (atomic_read(&this_mmap.ref_cnt) <= 0) { + pr_aud_err("%s: APR Common Port Already Closed\n", __func__); + goto done; + } + + atomic_dec(&this_mmap.ref_cnt); + if (atomic_read(&this_mmap.ref_cnt) == 0) { + apr_deregister(this_mmap.apr); + pr_debug("%s:APR De-Register common port\n", __func__); + } +done: + kfree(ac); + return; +} + +int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode) +{ + if (ac == NULL) { + pr_aud_err("%s APR handle NULL\n", __func__); + return -EINVAL; + } + if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) { + ac->io_mode = mode; + pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode); + return 0; + } else { + pr_aud_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode); + return -EINVAL; + } +} + +struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv) +{ + struct audio_client *ac; + int n; + int lcnt = 0; + + ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL); + if (!ac) + return NULL; + n = q6asm_session_alloc(ac); + if (n <= 0) + goto fail_session; + ac->session = n; + ac->cb = cb; + ac->priv = priv; + ac->io_mode = SYNC_IO_MODE; + ac->apr = apr_register("ADSP", "ASM", \ + (apr_fn)q6asm_callback,\ + ((ac->session) << 8 | 0x0001),\ + ac); + + if (ac->apr == NULL) { + pr_aud_err("%s Registration with APR failed\n", __func__); + goto fail; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_asm_handle(n, ac->apr); +#endif + pr_debug("%s Registering the common port with APR\n", __func__); + if (atomic_read(&this_mmap.ref_cnt) == 0) { + this_mmap.apr = apr_register("ADSP", "ASM", \ + (apr_fn)q6asm_mmapcallback,\ + 0x0FFFFFFFF, &this_mmap); + if (this_mmap.apr == NULL) { + pr_debug("%s Unable to register \ + APR ASM common port \n", __func__); + goto fail; + } + } + + atomic_inc(&this_mmap.ref_cnt); + init_waitqueue_head(&ac->cmd_wait); + init_waitqueue_head(&ac->time_wait); + atomic_set(&ac->time_flag, 1); + mutex_init(&ac->cmd_lock); + for (lcnt = 0; lcnt <= OUT; lcnt++) { + mutex_init(&ac->port[lcnt].lock); + spin_lock_init(&ac->port[lcnt].dsp_lock); + } + atomic_set(&ac->cmd_state, 0); + + pr_debug("%s: session[%d]\n", __func__, ac->session); + + return ac; +fail: + q6asm_audio_client_free(ac); + return NULL; +fail_session: + kfree(ac); + return NULL; +} + +int q6asm_audio_client_buf_alloc(unsigned int dir, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt) +{ + int cnt = 0; + int rc = 0; + struct audio_buffer *buf; + + if (!(ac) || ((dir != IN) && (dir != OUT))) + return -EINVAL; + + pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session, + bufsz, bufcnt); + + if (ac->session <= 0 || ac->session > 8) + goto fail; + + if (ac->io_mode == SYNC_IO_MODE) { + if (ac->port[dir].buf) { + pr_debug("%s: buffer already allocated\n", __func__); + return 0; + } + mutex_lock(&ac->cmd_lock); + buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), + GFP_KERNEL); + + if (!buf) { + mutex_unlock(&ac->cmd_lock); + goto fail; + } + + ac->port[dir].buf = buf; + + while (cnt < bufcnt) { + if (bufsz > 0) { + buf[cnt].data = dma_alloc_coherent(NULL, bufsz, + &buf[cnt].phys, + GFP_KERNEL); + if (!buf[cnt].data) { + pr_aud_err("%s Buf alloc failed for" + " size=%d\n", __func__, + bufsz); + mutex_unlock(&ac->cmd_lock); + goto fail; + } + buf[cnt].used = 1; + buf[cnt].size = bufsz; + buf[cnt].actual_size = bufsz; + pr_debug("%s data[%p]phys[%p][%p]\n", __func__, + (void *)buf[cnt].data, + (void *)buf[cnt].phys, + (void *)&buf[cnt].phys); + } + cnt++; + } + ac->port[dir].max_buf_cnt = cnt; + + mutex_unlock(&ac->cmd_lock); + rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt); + if (rc < 0) { + pr_aud_err("%s:CMD Memory_map_regions failed\n", __func__); + goto fail; + } + } + return 0; +fail: + q6asm_audio_client_buf_free(dir, ac); + return -EINVAL; +} + +int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt) +{ + int cnt = 0; + int rc = 0; + struct audio_buffer *buf; + + if (!(ac) || ((dir != IN) && (dir != OUT))) + return -EINVAL; + + pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", + __func__, ac->session, + bufsz, bufcnt); + + if (ac->session <= 0 || ac->session > 8) + goto fail; + + if (ac->io_mode == SYNC_IO_MODE) { + if (ac->port[dir].buf) { + pr_debug("%s: buffer already allocated\n", __func__); + return 0; + } + mutex_lock(&ac->cmd_lock); + buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), + GFP_KERNEL); + + if (!buf) { + mutex_unlock(&ac->cmd_lock); + goto fail; + } + + ac->port[dir].buf = buf; + + buf[0].data = dma_alloc_coherent(NULL, bufsz * bufcnt, + &buf[0].phys, GFP_KERNEL); + buf[0].used = dir ^ 1; + buf[0].size = bufsz; + buf[0].actual_size = bufsz; + cnt = 1; + while (cnt < bufcnt) { + if (bufsz > 0) { + buf[cnt].data = buf[0].data + (cnt * bufsz); + buf[cnt].phys = buf[0].phys + (cnt * bufsz); + + if (!buf[cnt].data) { + pr_aud_err("%s Buf alloc failed\n", + __func__); + mutex_unlock(&ac->cmd_lock); + goto fail; + } + buf[cnt].used = dir ^ 1; + buf[cnt].size = bufsz; + buf[cnt].actual_size = bufsz; + pr_debug("%s data[%p]phys[%p][%p]\n", __func__, + (void *)buf[cnt].data, + (void *)buf[cnt].phys, + (void *)&buf[cnt].phys); + } + cnt++; + } + ac->port[dir].max_buf_cnt = cnt; + + mutex_unlock(&ac->cmd_lock); + rc = q6asm_memory_map(ac, buf[0].phys, dir, bufsz, cnt); + if (rc < 0) { + pr_aud_err("%s:CMD Memory_map_regions failed\n", __func__); + goto fail; + } + } + return 0; +fail: + q6asm_audio_client_buf_free_contiguous(dir, ac); + return -EINVAL; +} + +static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv) +{ + uint32_t token; + uint32_t *payload = data->payload; + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s: Reset event is received: %d %d apr[%p]\n", + __func__, + data->reset_event, + data->reset_proc, + this_mmap.apr); + apr_reset(this_mmap.apr); + this_mmap.apr = NULL; + atomic_set(&this_mmap.cmd_state, 0); + return 0; + } + + pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]" + "token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__, + payload[0], payload[1], data->opcode, data->token, + data->payload_size, data->src_port, data->dest_port); + + if (data->opcode == APR_BASIC_RSP_RESULT) { + token = data->token; + switch (payload[0]) { + case ASM_SESSION_CMD_MEMORY_MAP: + case ASM_SESSION_CMD_MEMORY_UNMAP: + case ASM_SESSION_CMD_MEMORY_MAP_REGIONS: + case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS: + pr_debug("%s:command[0x%x]success [0x%x]\n", + __func__, payload[0], payload[1]); + if (atomic_read(&this_mmap.cmd_state)) { + atomic_set(&this_mmap.cmd_state, 0); + wake_up(&this_mmap.cmd_wait); + } + break; + default: + pr_debug("%s:command[0x%x] not expecting rsp\n", + __func__, payload[0]); + break; + } + } + return 0; +} + + +static int32_t q6asm_callback(struct apr_client_data *data, void *priv) +{ + int i = 0; + struct audio_client *ac = (struct audio_client *)priv; + uint32_t token; + unsigned long dsp_flags; + uint32_t *payload; + + + if ((ac == NULL) || (data == NULL)) { + pr_aud_err("ac or priv NULL\n"); + return -EINVAL; + } + if (ac->session <= 0 || ac->session > 8) { + pr_aud_err("%s:Session ID is invalid, session = %d\n", __func__, + ac->session); + return -EINVAL; + } + + payload = data->payload; + + if (data->opcode == RESET_EVENTS) { + pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n", + data->reset_event, data->reset_proc, ac->apr); + apr_reset(ac->apr); + return 0; + } + + pr_debug("%s: session[%d]opcode[0x%x] \ + token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__, + ac->session, data->opcode, + data->token, data->payload_size, data->src_port, + data->dest_port); + + if (data->opcode == APR_BASIC_RSP_RESULT) { + token = data->token; + switch (payload[0]) { + case ASM_STREAM_CMD_SET_PP_PARAMS: +#ifdef CONFIG_MSM8X60_RTAC + if (rtac_make_asm_callback(ac->session, payload, + data->payload_size)) + break; +#endif + case ASM_SESSION_CMD_PAUSE: + case ASM_DATA_CMD_EOS: + case ASM_STREAM_CMD_CLOSE: + case ASM_STREAM_CMD_FLUSH: + case ASM_SESSION_CMD_RUN: + case ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS: + pr_debug("%s:Payload = [0x%x]\n", __func__, payload[0]); + if (token != ac->session) { + pr_aud_err("%s:Invalid session[%d] rxed expected[%d]", + __func__, token, ac->session); + return -EINVAL; + } + case ASM_STREAM_CMD_OPEN_READ: + case ASM_STREAM_CMD_OPEN_WRITE: + case ASM_STREAM_CMD_OPEN_READWRITE: + case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE: + case ASM_STREAM_CMD_SET_ENCDEC_PARAM: + if (atomic_read(&ac->cmd_state)) { + atomic_set(&ac->cmd_state, 0); + wake_up(&ac->cmd_wait); + } + if (ac->cb) + ac->cb(data->opcode, data->token, + (uint32_t *)data->payload, ac->priv); + break; + default: + pr_debug("%s:command[0x%x] not expecting rsp\n", + __func__, payload[0]); + break; + } + return 0; + } + + switch (data->opcode) { + case ASM_DATA_EVENT_WRITE_DONE:{ + struct audio_port_data *port = &ac->port[IN]; + pr_debug("%s: Rxed opcode[0x%x] status[0x%x] token[%d]", + __func__, payload[0], payload[1], + data->token); + if (ac->io_mode == SYNC_IO_MODE) { + if (port->buf == NULL) { + pr_aud_err("%s: Unexpected Write Done\n", + __func__); + return -EINVAL; + } + spin_lock_irqsave(&port->dsp_lock, dsp_flags); + if (port->buf[data->token].phys != + payload[0]) { + pr_aud_err("Buf expected[%p]rxed[%p]\n",\ + (void *)port->buf[data->token].phys,\ + (void *)payload[0]); + spin_unlock_irqrestore(&port->dsp_lock, + dsp_flags); + return -EINVAL; + } + token = data->token; + port->buf[token].used = 1; + spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); + for (i = 0; i < port->max_buf_cnt; i++) + pr_debug("%d ", port->buf[i].used); + + } + break; + } +#ifdef CONFIG_MSM8X60_RTAC + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + rtac_make_asm_callback(ac->session, payload, + data->payload_size); + break; +#endif + case ASM_DATA_EVENT_READ_DONE:{ + + struct audio_port_data *port = &ac->port[OUT]; + + pr_debug("%s:R-D: status=%d buff_add=%x act_size=%d offset=%d\n", + __func__, payload[READDONE_IDX_STATUS], + payload[READDONE_IDX_BUFFER], + payload[READDONE_IDX_SIZE], + payload[READDONE_IDX_OFFSET]); + pr_debug("%s:R-D:msw_ts=%d lsw_ts=%d flags=%d id=%d num=%d\n", + __func__, payload[READDONE_IDX_MSW_TS], + payload[READDONE_IDX_LSW_TS], + payload[READDONE_IDX_FLAGS], + payload[READDONE_IDX_ID], + payload[READDONE_IDX_NUMFRAMES]); + + if (ac->io_mode == SYNC_IO_MODE) { + if (port->buf == NULL) { + pr_aud_err("%s: Unexpected Write Done\n", __func__); + return -EINVAL; + } + spin_lock_irqsave(&port->dsp_lock, dsp_flags); + token = data->token; + port->buf[token].used = 0; + if (port->buf[token].phys != + payload[READDONE_IDX_BUFFER]) { + pr_aud_err("Buf expected[%p]rxed[%p]\n",\ + (void *)port->buf[token].phys,\ + (void *)payload[READDONE_IDX_BUFFER]); + spin_unlock_irqrestore(&port->dsp_lock, + dsp_flags); + break; + } + port->buf[token].actual_size = + payload[READDONE_IDX_SIZE]; + spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); + } + break; + } + case ASM_DATA_EVENT_EOS: + case ASM_DATA_CMDRSP_EOS: + pr_debug("%s:EOS ACK received: rxed opcode[0x%x]\n", + __func__, data->opcode); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("ASM_SESSION_EVENT_TX_OVERFLOW\n"); + break; + case ASM_SESSION_CMDRSP_GET_SESSION_TIME: + pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, " + "payload[0] = %d, payload[1] = %d, " + "payload[2] = %d\n", __func__, + payload[0], payload[1], payload[2]); + ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) | + payload[2]); + if (atomic_read(&ac->time_flag)) { + atomic_set(&ac->time_flag, 0); + wake_up(&ac->time_wait); + } + break; + + } + if (ac->cb) + ac->cb(data->opcode, data->token, + data->payload, ac->priv); + + return 0; +} + +void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size, + uint32_t *index) +{ + void *data; + unsigned char idx; + struct audio_port_data *port; + + if (!ac || ((dir != IN) && (dir != OUT))) + return NULL; + + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + + mutex_lock(&port->lock); + idx = port->cpu_buf; + if (port->buf == NULL) { + pr_debug("%s:Buffer pointer null\n", __func__); + return NULL; + } + /* dir 0: used = 0 means buf in use + dir 1: used = 1 means buf in use */ + if (port->buf[idx].used == dir) { + /* To make it more robust, we could loop and get the + next avail buf, its risky though */ + pr_debug("%s:Next buf idx[0x%x] not available,\ + dir[%d]\n", __func__, idx, dir); + mutex_unlock(&port->lock); + return NULL; + } + *size = port->buf[idx].actual_size; + *index = port->cpu_buf; + data = port->buf[idx].data; + pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n", + __func__, + ac->session, + port->cpu_buf, + data, *size); + /* By default increase the cpu_buf cnt + user accesses this function,increase cpu + buf(to avoid another api)*/ + port->buf[idx].used = dir; + port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1)); + mutex_unlock(&port->lock); + return data; + } + return NULL; +} + +int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac) +{ + int ret = -1; + struct audio_port_data *port; + uint32_t idx; + + if (!ac || (dir != OUT)) + return ret; + + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + + mutex_lock(&port->lock); + idx = port->dsp_buf; + + if (port->buf[idx].used == (dir ^ 1)) { + /* To make it more robust, we could loop and get the + next avail buf, its risky though */ + pr_aud_err("Next buf idx[0x%x] not available, dir[%d]\n", + idx, dir); + mutex_unlock(&port->lock); + return ret; + } + pr_debug("%s: session[%d]dsp_buf=%d cpu_buf=%d\n", __func__, + ac->session, port->dsp_buf, port->cpu_buf); + ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1); + mutex_unlock(&port->lock); + } + return ret; +} + +static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg) +{ + pr_debug("%s:session=%d pkt size=%d cmd_flg=%d\n", __func__, pkt_size, + cmd_flg, ac->session); + mutex_lock(&ac->cmd_lock); + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(sizeof(struct apr_hdr)),\ + APR_PKT_VER); + hdr->src_svc = ((struct apr_svc *)ac->apr)->id; + hdr->src_domain = APR_DOMAIN_APPS; + hdr->dest_svc = APR_SVC_ASM; + hdr->dest_domain = APR_DOMAIN_ADSP; + hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; + hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; + if (cmd_flg) { + hdr->token = ac->session; + atomic_set(&ac->cmd_state, 1); + } + hdr->pkt_size = pkt_size; + mutex_unlock(&ac->cmd_lock); + return; +} + +static void q6asm_add_mmaphdr(struct apr_hdr *hdr, uint32_t pkt_size, + uint32_t cmd_flg) +{ + pr_debug("%s:pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg); + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + hdr->src_port = 0; + hdr->dest_port = 0; + if (cmd_flg) { + hdr->token = 0; + atomic_set(&this_mmap.cmd_state, 1); + } + hdr->pkt_size = pkt_size; + return; +} + +int q6asm_open_read(struct audio_client *ac, + uint32_t format) +{ + int rc = 0x00; + struct asm_stream_cmd_open_read open; + + if ((ac == NULL) || (ac->apr == NULL)) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + pr_debug("%s:session[%d]", __func__, ac->session); + + q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); + open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ; + /* Stream prio : High, provide meta info with encoded frames */ + open.src_endpoint = ASM_END_POINT_DEVICE_MATRIX; + open.pre_proc_top = DEFAULT_POPP_TOPOLOGY; + + switch (format) { + case FORMAT_LINEAR_PCM: + open.uMode = STREAM_PRIORITY_HIGH; + open.format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = MPEG4_AAC; + break; + case FORMAT_V13K: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = V13K_FS; + break; + case FORMAT_EVRC: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = EVRC_FS; + break; + case FORMAT_AMRNB: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = AMRNB_FS; + break; + default: + pr_aud_err("Invalid format[%d]\n", format); + goto fail_cmd; + } + rc = apr_send_pkt(ac->apr, (uint32_t *) &open); + if (rc < 0) { + pr_aud_err("open failed op[0x%x]rc[%d]\n", \ + open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__, + rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_open_write(struct audio_client *ac, uint32_t format) +{ + int rc = 0x00; + struct asm_stream_cmd_open_write open; + + if ((ac == NULL) || (ac->apr == NULL)) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session, + format); + + q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); + + open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE; + open.uMode = STREAM_PRIORITY_HIGH; + /* source endpoint : matrix */ + open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX; + open.stream_handle = 0x00; + open.post_proc_top = DEFAULT_POPP_TOPOLOGY; + + switch (format) { + case FORMAT_LINEAR_PCM: + open.format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.format = MPEG4_AAC; + break; + case FORMAT_WMA_V9: + open.format = WMA_V9; + break; + case FORMAT_WMA_V10PRO: + open.format = WMA_V10PRO; + break; + default: + pr_aud_err("%s: Invalid format[%d]\n", __func__, format); + goto fail_cmd; + } + rc = apr_send_pkt(ac->apr, (uint32_t *) &open); + if (rc < 0) { + pr_aud_err("%s: open failed op[0x%x]rc[%d]\n", \ + __func__, open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout. waited for OPEN_WRITR rc[%d]\n", __func__, + rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_open_read_write(struct audio_client *ac, + uint32_t rd_format, + uint32_t wr_format) +{ + int rc = 0x00; + struct asm_stream_cmd_open_read_write open; + + if ((ac == NULL) || (ac->apr == NULL)) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: session[%d]", __func__, ac->session); + pr_debug("wr_format[0x%x]rd_format[0x%x]", + wr_format, rd_format); + + q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); + open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE; + + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_NORMAL; + /* source endpoint : matrix */ + open.post_proc_top = DEFAULT_POPP_TOPOLOGY; + switch (wr_format) { + case FORMAT_LINEAR_PCM: + open.write_format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.write_format = MPEG4_AAC; + break; + case FORMAT_WMA_V9: + open.write_format = WMA_V9; + break; + case FORMAT_WMA_V10PRO: + open.write_format = WMA_V10PRO; + break; + default: + pr_aud_err("Invalid format[%d]\n", wr_format); + goto fail_cmd; + } + + switch (rd_format) { + case FORMAT_LINEAR_PCM: + open.read_format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.read_format = MPEG4_AAC; + break; + case FORMAT_V13K: + open.read_format = V13K_FS; + break; + case FORMAT_EVRC: + open.read_format = EVRC_FS; + break; + case FORMAT_AMRNB: + open.read_format = AMRNB_FS; + break; + default: + pr_aud_err("Invalid format[%d]\n", rd_format); + goto fail_cmd; + } + pr_debug("%s:rdformat[0x%x]wrformat[0x%x]\n", __func__, + open.read_format, open.write_format); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &open); + if (rc < 0) { + pr_aud_err("open failed op[0x%x]rc[%d]\n", \ + open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for OPEN_WRITR rc[%d]\n", rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_run(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts) +{ + struct asm_stream_cmd_run run; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s session[%d]", __func__, ac->session); + q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE); + + run.hdr.opcode = ASM_SESSION_CMD_RUN; + run.flags = flags; + run.msw_ts = msw_ts; + run.lsw_ts = lsw_ts; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &run); + if (rc < 0) { + pr_aud_err("Commmand run failed[%d]", rc); + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for run success rc[%d]", rc); + goto fail_cmd; + } + + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts) +{ + struct asm_stream_cmd_run run; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("%s:APR handle NULL\n", __func__); + return -EINVAL; + } + pr_debug("session[%d]", ac->session); + q6asm_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE); + + run.hdr.opcode = ASM_SESSION_CMD_RUN; + run.flags = flags; + run.msw_ts = msw_ts; + run.lsw_ts = lsw_ts; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &run); + if (rc < 0) { + pr_aud_err("%s:Commmand run failed[%d]", __func__, rc); + return -EINVAL; + } + return 0; +} + + +int q6asm_enc_cfg_blk_aac(struct audio_client *ac, + uint32_t frames_per_buf, + uint32_t sample_rate, uint32_t channels, + uint32_t bit_rate, uint32_t mode, uint32_t format) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]" + "format[%d]", __func__, ac->session, frames_per_buf, + sample_rate, channels, bit_rate, mode, format); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = MPEG4_AAC; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_aac_read_cfg); + enc_cfg.enc_blk.cfg.aac.bitrate = bit_rate; + enc_cfg.enc_blk.cfg.aac.enc_mode = mode; + enc_cfg.enc_blk.cfg.aac.format = format; + enc_cfg.enc_blk.cfg.aac.ch_cfg = channels; + enc_cfg.enc_blk.cfg.aac.sample_rate = sample_rate; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + + int rc = 0; + + pr_debug("%s: Session %d\n", __func__, ac->session); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + enc_cfg.enc_blk.frames_per_buf = 1; + enc_cfg.enc_blk.format_id = LINEAR_PCM; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg); + enc_cfg.enc_blk.cfg.pcm.ch_cfg = channels; + enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16; + enc_cfg.enc_blk.cfg.pcm.sample_rate = rate; + enc_cfg.enc_blk.cfg.pcm.is_signed = 1; + enc_cfg.enc_blk.cfg.pcm.interleaved = 1; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd open failed\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enable_sbrps(struct audio_client *ac, + uint32_t sbr_ps_enable) +{ + struct asm_stream_cmd_encdec_sbr sbrps; + + int rc = 0; + + pr_debug("%s: Session %d\n", __func__, ac->session); + + q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE); + + sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + sbrps.param_id = ASM_ENABLE_SBR_PS; + sbrps.param_size = sizeof(struct asm_sbr_ps); + sbrps.sbr_ps.enable = sbr_ps_enable; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps); + if (rc < 0) { + pr_aud_err("Command opcode[0x%x]paramid[0x%x] failed\n", + ASM_STREAM_CMD_SET_ENCDEC_PARAM, + ASM_ENABLE_SBR_PS); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout opcode[0x%x] ", sbrps.hdr.opcode); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t reduced_rate_level, uint16_t rate_modulation_cmd) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \ + reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__, + ac->session, frames_per_buf, min_rate, max_rate, + reduced_rate_level, rate_modulation_cmd); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = V13K_FS; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_qcelp13_read_cfg); + enc_cfg.enc_blk.cfg.qcelp13.min_rate = min_rate; + enc_cfg.enc_blk.cfg.qcelp13.max_rate = max_rate; + enc_cfg.enc_blk.cfg.qcelp13.reduced_rate_level = reduced_rate_level; + enc_cfg.enc_blk.cfg.qcelp13.rate_modulation_cmd = rate_modulation_cmd; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t rate_modulation_cmd) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \ + rate_modulation_cmd[0x%4x]", __func__, ac->session, + frames_per_buf, min_rate, max_rate, rate_modulation_cmd); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = EVRC_FS; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_evrc_read_cfg); + enc_cfg.enc_blk.cfg.evrc.min_rate = min_rate; + enc_cfg.enc_blk.cfg.evrc.max_rate = max_rate; + enc_cfg.enc_blk.cfg.evrc.rate_modulation_cmd = rate_modulation_cmd; + enc_cfg.enc_blk.cfg.evrc.reserved = 0; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t band_mode, uint16_t dtx_enable) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]", + __func__, ac->session, frames_per_buf, band_mode, dtx_enable); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = AMRNB_FS; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_amrnb_read_cfg); + enc_cfg.enc_blk.cfg.amrnb.mode = band_mode; + enc_cfg.enc_blk.cfg.amrnb.dtx_mode = dtx_enable; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels) +{ + struct asm_stream_media_format_update fmt; + int rc = 0; + + pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate, + channels); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = LINEAR_PCM; + fmt.cfg_size = sizeof(struct asm_pcm_cfg); + fmt.write_cfg.pcm_cfg.ch_cfg = channels; + fmt.write_cfg.pcm_cfg.bits_per_sample = 16; + fmt.write_cfg.pcm_cfg.sample_rate = rate; + fmt.write_cfg.pcm_cfg.is_signed = 1; + fmt.write_cfg.pcm_cfg.interleaved = 1; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_aac(struct audio_client *ac, + struct asm_aac_cfg *cfg) +{ + struct asm_stream_media_format_update fmt; + int rc = 0; + + pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, + cfg->sample_rate, cfg->ch_cfg); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = MPEG4_AAC; + fmt.cfg_size = sizeof(struct asm_aac_cfg); + fmt.write_cfg.aac_cfg.format = cfg->format; + fmt.write_cfg.aac_cfg.aot = cfg->aot; + fmt.write_cfg.aac_cfg.ep_config = cfg->ep_config; + fmt.write_cfg.aac_cfg.section_data_resilience = + cfg->section_data_resilience; + fmt.write_cfg.aac_cfg.scalefactor_data_resilience = + cfg->scalefactor_data_resilience; + fmt.write_cfg.aac_cfg.spectral_data_resilience = + cfg->spectral_data_resilience; + fmt.write_cfg.aac_cfg.ch_cfg = cfg->ch_cfg; + fmt.write_cfg.aac_cfg.sample_rate = cfg->sample_rate; + pr_aud_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n", + __func__, fmt.format, fmt.cfg_size, + fmt.write_cfg.aac_cfg.format, + fmt.write_cfg.aac_cfg.aot, + fmt.write_cfg.aac_cfg.ch_cfg, + fmt.write_cfg.aac_cfg.sample_rate); + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_wma(struct audio_client *ac, + void *cfg) +{ + struct asm_stream_media_format_update fmt; + struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg; + int rc = 0; + + pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],\ + balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n", + ac->session, wma_cfg->format_tag, wma_cfg->sample_rate, + wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec, + wma_cfg->block_align, wma_cfg->valid_bits_per_sample, + wma_cfg->ch_mask, wma_cfg->encode_opt); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = WMA_V9; + fmt.cfg_size = sizeof(struct asm_wma_cfg); + fmt.write_cfg.wma_cfg.format_tag = wma_cfg->format_tag; + fmt.write_cfg.wma_cfg.ch_cfg = wma_cfg->ch_cfg; + fmt.write_cfg.wma_cfg.sample_rate = wma_cfg->sample_rate; + fmt.write_cfg.wma_cfg.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec; + fmt.write_cfg.wma_cfg.block_align = wma_cfg->block_align; + fmt.write_cfg.wma_cfg.valid_bits_per_sample = + wma_cfg->valid_bits_per_sample; + fmt.write_cfg.wma_cfg.ch_mask = wma_cfg->ch_mask; + fmt.write_cfg.wma_cfg.encode_opt = wma_cfg->encode_opt; + fmt.write_cfg.wma_cfg.adv_encode_opt = 0; + fmt.write_cfg.wma_cfg.adv_encode_opt2 = 0; + fmt.write_cfg.wma_cfg.drc_peak_ref = 0; + fmt.write_cfg.wma_cfg.drc_peak_target = 0; + fmt.write_cfg.wma_cfg.drc_ave_ref = 0; + fmt.write_cfg.wma_cfg.drc_ave_target = 0; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_wmapro(struct audio_client *ac, + void *cfg) +{ + struct asm_stream_media_format_update fmt; + struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg; + int rc = 0; + + pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d]," + "balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],\ + adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n", + ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate, + wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec, + wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample, + wmapro_cfg->ch_mask, wmapro_cfg->encode_opt, + wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = WMA_V10PRO; + fmt.cfg_size = sizeof(struct asm_wmapro_cfg); + fmt.write_cfg.wmapro_cfg.format_tag = wmapro_cfg->format_tag; + fmt.write_cfg.wmapro_cfg.ch_cfg = wmapro_cfg->ch_cfg; + fmt.write_cfg.wmapro_cfg.sample_rate = wmapro_cfg->sample_rate; + fmt.write_cfg.wmapro_cfg.avg_bytes_per_sec = + wmapro_cfg->avg_bytes_per_sec; + fmt.write_cfg.wmapro_cfg.block_align = wmapro_cfg->block_align; + fmt.write_cfg.wmapro_cfg.valid_bits_per_sample = + wmapro_cfg->valid_bits_per_sample; + fmt.write_cfg.wmapro_cfg.ch_mask = wmapro_cfg->ch_mask; + fmt.write_cfg.wmapro_cfg.encode_opt = wmapro_cfg->encode_opt; + fmt.write_cfg.wmapro_cfg.adv_encode_opt = wmapro_cfg->adv_encode_opt; + fmt.write_cfg.wmapro_cfg.adv_encode_opt2 = wmapro_cfg->adv_encode_opt2; + fmt.write_cfg.wmapro_cfg.drc_peak_ref = 0; + fmt.write_cfg.wmapro_cfg.drc_peak_target = 0; + fmt.write_cfg.wmapro_cfg.drc_ave_ref = 0; + fmt.write_cfg.wmapro_cfg.drc_ave_target = 0; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir, + uint32_t bufsz, uint32_t bufcnt) +{ + struct asm_stream_cmd_memory_map mem_map; + int rc = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + mem_map.hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP; + + mem_map.buf_add = buf_add; + mem_map.buf_size = bufsz * bufcnt; + mem_map.mempool_id = 0; /* EBI */ + mem_map.reserved = 0; + + q6asm_add_mmaphdr(&mem_map.hdr, + sizeof(struct asm_stream_cmd_memory_map), TRUE); + + pr_debug("buf add[%x] buf_add_parameter[%x]\n", + mem_map.buf_add, buf_add); + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_map); + if (rc < 0) { + pr_aud_err("mem_map op[0x%x]rc[%d]\n", + mem_map.hdr.opcode, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_map\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + return rc; +} + +int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, int dir) +{ + struct asm_stream_cmd_memory_unmap mem_unmap; + int rc = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + q6asm_add_mmaphdr(&mem_unmap.hdr, + sizeof(struct asm_stream_cmd_memory_unmap), TRUE); + mem_unmap.hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP; + mem_unmap.buf_add = buf_add; + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap); + if (rc < 0) { + pr_aud_err("mem_unmap op[0x%x]rc[%d]\n", + mem_unmap.hdr.opcode, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_map\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + return rc; +} + +int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_lrchannel_gain_params *lrgain = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_lrchannel_gain_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_lrchannel_gain_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = L_R_CHANNEL_GAIN_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_lrchannel_gain_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + lrgain = (struct asm_lrchannel_gain_params *)payload; + + lrgain->left_gain = left_gain; + lrgain->right_gain = right_gain; + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Volume Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending volume command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +static int q6asm_memory_map_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt) +{ + struct asm_stream_cmd_memory_map_regions *mmap_regions = NULL; + struct asm_memory_map_regions *mregions = NULL; + struct audio_port_data *port = NULL; + struct audio_buffer *ab = NULL; + void *mmap_region_cmd = NULL; + void *payload = NULL; + int rc = 0; + int i = 0; + int cmd_size = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + cmd_size = sizeof(struct asm_stream_cmd_memory_map_regions) + + sizeof(struct asm_memory_map_regions) * bufcnt; + + mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (mmap_region_cmd == NULL) { + pr_aud_info("mmap_region_cmd == NULL\n"); + return -EINVAL; + } + mmap_regions = (struct asm_stream_cmd_memory_map_regions *) + mmap_region_cmd; + q6asm_add_mmaphdr(&mmap_regions->hdr, cmd_size, TRUE); + mmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP_REGIONS; + mmap_regions->mempool_id = 0; + mmap_regions->nregions = bufcnt & 0x00ff; + pr_debug("map_regions->nregions = %d\n", mmap_regions->nregions); + payload = ((u8 *) mmap_region_cmd + + sizeof(struct asm_stream_cmd_memory_map_regions)); + mregions = (struct asm_memory_map_regions *)payload; + + port = &ac->port[dir]; + for (i = 0; i < bufcnt; i++) { + ab = &port->buf[i]; + mregions->phys = ab->phys; + mregions->buf_size = ab->size; + ++mregions; + } + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) mmap_region_cmd); + if (rc < 0) { + pr_aud_err("mmap_regions op[0x%x]rc[%d]\n", + mmap_regions->hdr.opcode, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_map\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(mmap_region_cmd); + return rc; +} + +static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt) +{ + struct asm_stream_cmd_memory_unmap_regions *unmap_regions = NULL; + struct asm_memory_unmap_regions *mregions = NULL; + struct audio_port_data *port = NULL; + struct audio_buffer *ab = NULL; + void *unmap_region_cmd = NULL; + void *payload = NULL; + int rc = 0; + int i = 0; + int cmd_size = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + cmd_size = sizeof(struct asm_stream_cmd_memory_unmap_regions) + + sizeof(struct asm_memory_unmap_regions) * bufcnt; + + unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (unmap_region_cmd == NULL) { + pr_aud_info("unmap_region_cmd == NULL\n"); + return -EINVAL; + } + unmap_regions = (struct asm_stream_cmd_memory_unmap_regions *) + unmap_region_cmd; + q6asm_add_mmaphdr(&unmap_regions->hdr, cmd_size, TRUE); + unmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS; + unmap_regions->nregions = bufcnt & 0x00ff; + pr_debug("unmap_regions->nregions = %d\n", unmap_regions->nregions); + payload = ((u8 *) unmap_region_cmd + + sizeof(struct asm_stream_cmd_memory_unmap_regions)); + mregions = (struct asm_memory_unmap_regions *)payload; + port = &ac->port[dir]; + for (i = 0; i < bufcnt; i++) { + ab = &port->buf[i]; + mregions->phys = ab->phys; + ++mregions; + } + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) unmap_region_cmd); + if (rc < 0) { + pr_aud_err("mmap_regions op[0x%x]rc[%d]\n", + unmap_regions->hdr.opcode, rc); + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_unmap\n"); + goto fail_cmd; + } + rc = 0; + +fail_cmd: + kfree(unmap_region_cmd); + return rc; +} + +int q6asm_set_mute(struct audio_client *ac, int muteflag) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_mute_params *mute = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_mute_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_mute_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = MUTE_CONFIG_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_mute_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + mute = (struct asm_mute_params *)payload; + + mute->muteflag = muteflag; + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Mute Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending mute command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +int q6asm_set_volume(struct audio_client *ac, int volume) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_master_gain_params *mgain = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_master_gain_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_master_gain_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = MASTER_GAIN_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_master_gain_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + mgain = (struct asm_master_gain_params *)payload; + + mgain->master_gain = volume; + mgain->padding = 0x00; + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Volume Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending volume command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +int q6asm_set_softpause(struct audio_client *ac, + struct asm_softpause_params *pause_param) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_softpause_params *params = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_softpause_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_softpause_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = SOFT_PAUSE_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_softpause_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + params = (struct asm_softpause_params *)payload; + + params->enable = pause_param->enable; + params->period = pause_param->period; + params->step = pause_param->step; + params->rampingcurve = pause_param->rampingcurve; + pr_debug("%s: soft Pause Command: enable = %d, period = %d," + "step = %d, curve = %d\n", __func__, params->enable, + params->period, params->step, params->rampingcurve); + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Volume Command(soft_pause) failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending volume command(soft_pause)" + "to apr\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +int q6asm_equalizer(struct audio_client *ac, void *eq) +{ + void *eq_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_equalizer_params *equalizer = NULL; + struct msm_audio_eq_stream_config *eq_params = NULL; + int i = 0; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_equalizer_params); + eq_cmd = kzalloc(sz, GFP_KERNEL); + if (eq_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + goto fail_cmd; + } + eq_params = (struct msm_audio_eq_stream_config *) eq; + cmd = (struct asm_pp_params_command *)eq_cmd; + q6asm_add_hdr(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_equalizer_params); + cmd->params.module_id = EQUALIZER_MODULE_ID; + cmd->params.param_id = EQUALIZER_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_equalizer_params); + cmd->params.reserved = 0; + payload = (u8 *)(eq_cmd + sizeof(struct asm_pp_params_command)); + equalizer = (struct asm_equalizer_params *)payload; + + equalizer->enable = eq_params->enable; + equalizer->num_bands = eq_params->num_bands; + pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable, + eq_params->num_bands); + for (i = 0; i < eq_params->num_bands; i++) { + equalizer->eq_bands[i].band_idx = + eq_params->eq_bands[i].band_idx; + equalizer->eq_bands[i].filter_type = + eq_params->eq_bands[i].filter_type; + equalizer->eq_bands[i].center_freq_hz = + eq_params->eq_bands[i].center_freq_hz; + equalizer->eq_bands[i].filter_gain = + eq_params->eq_bands[i].filter_gain; + equalizer->eq_bands[i].q_factor = + eq_params->eq_bands[i].q_factor; + pr_debug("%s: filter_type:%u bandnum:%d\n", __func__, + eq_params->eq_bands[i].filter_type, i); + pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__, + eq_params->eq_bands[i].center_freq_hz, i); + pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__, + eq_params->eq_bands[i].filter_gain, i); + pr_debug("%s: q_factor:%d bandnum:%d\n", __func__, + eq_params->eq_bands[i].q_factor, i); + } + rc = apr_send_pkt(ac->apr, (uint32_t *) eq_cmd); + if (rc < 0) { + pr_aud_err("%s: Equalizer Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending equalizer command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(eq_cmd); + return rc; +} + +int q6asm_read(struct audio_client *ac) +{ + struct asm_stream_cmd_read read; + struct audio_buffer *ab; + int dsp_buf; + struct audio_port_data *port; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[OUT]; + + q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE); + + mutex_lock(&port->lock); + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", + __func__, + ac->session, + dsp_buf, + (void *)port->buf[dsp_buf].data, + port->cpu_buf, + (void *)port->buf[port->cpu_buf].phys); + + read.hdr.opcode = ASM_DATA_CMD_READ; + read.buf_add = ab->phys; + read.buf_size = ab->size; + read.uid = port->dsp_buf; + read.hdr.token = port->dsp_buf; + + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + mutex_unlock(&port->lock); + pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, + read.buf_add, + read.hdr.token, + read.uid); + rc = apr_send_pkt(ac->apr, (uint32_t *) &read); + if (rc < 0) { + pr_aud_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); + goto fail_cmd; + } + return 0; + } +fail_cmd: + return -EINVAL; +} + +int q6asm_read_nolock(struct audio_client *ac) +{ + struct asm_stream_cmd_read read; + struct audio_buffer *ab; + int dsp_buf; + struct audio_port_data *port; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[OUT]; + + q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); + + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + + pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", + __func__, + ac->session, + dsp_buf, + (void *)port->buf[dsp_buf].data, + port->cpu_buf, + (void *)port->buf[port->cpu_buf].phys); + + read.hdr.opcode = ASM_DATA_CMD_READ; + read.buf_add = ab->phys; + read.buf_size = ab->size; + read.uid = port->dsp_buf; + read.hdr.token = port->dsp_buf; + + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, + read.buf_add, + read.hdr.token, + read.uid); + rc = apr_send_pkt(ac->apr, (uint32_t *) &read); + if (rc < 0) { + pr_aud_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); + goto fail_cmd; + } + return 0; + } +fail_cmd: + return -EINVAL; +} + +static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg) +{ + pr_debug("session=%d pkt size=%d cmd_flg=%d\n", pkt_size, cmd_flg, + ac->session); + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(sizeof(struct apr_hdr)),\ + APR_PKT_VER); + hdr->src_svc = ((struct apr_svc *)ac->apr)->id; + hdr->src_domain = APR_DOMAIN_APPS; + hdr->dest_svc = APR_SVC_ASM; + hdr->dest_domain = APR_DOMAIN_ADSP; + hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; + hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; + if (cmd_flg) { + hdr->token = ac->session; + atomic_set(&ac->cmd_state, 1); + } + hdr->pkt_size = pkt_size; + return; +} + + +int q6asm_async_write(struct audio_client *ac, + struct audio_aio_write_param *param) +{ + int rc = 0; + struct asm_stream_cmd_write write; + + if (!ac || ac->apr == NULL) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + + q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE); + + /* Pass physical address as token for AIO scheme */ + write.hdr.token = param->uid; + write.hdr.opcode = ASM_DATA_CMD_WRITE; + write.buf_add = param->paddr; + write.avail_bytes = param->len; + write.uid = param->uid; + write.msw_ts = param->msw_ts; + write.lsw_ts = param->lsw_ts; + /* Use 0xFF00 for disabling timestamps */ + if (param->flags == 0xFF00) + write.uflags = (0x00000000 | (param->flags & 0x800000FF)); + else + write.uflags = (0x80000000 | param->flags); + + pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session, + write.buf_add, write.avail_bytes); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &write); + if (rc < 0) { + pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__, + write.hdr.opcode, rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_async_read(struct audio_client *ac, + struct audio_aio_read_param *param) +{ + int rc = 0; + struct asm_stream_cmd_read read; + + if (!ac || ac->apr == NULL) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + + q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); + + /* Pass physical address as token for AIO scheme */ + read.hdr.token = param->paddr; + read.hdr.opcode = ASM_DATA_CMD_READ; + read.buf_add = param->paddr; + read.buf_size = param->len; + read.uid = param->uid; + + pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session, + read.buf_add, read.buf_size); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &read); + if (rc < 0) { + pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__, + read.hdr.opcode, rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags) +{ + int rc = 0; + struct asm_stream_cmd_write write; + struct audio_port_data *port; + struct audio_buffer *ab; + int dsp_buf = 0; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[IN]; + + q6asm_add_hdr(ac, &write.hdr, sizeof(write), + FALSE); + mutex_lock(&port->lock); + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + + write.hdr.token = port->dsp_buf; + write.hdr.opcode = ASM_DATA_CMD_WRITE; + write.buf_add = ab->phys; + write.avail_bytes = len; + write.uid = port->dsp_buf; + write.msw_ts = msw_ts; + write.lsw_ts = lsw_ts; + /* Use 0xFF00 for disabling timestamps */ + if (flags == 0xFF00) + write.uflags = (0x00000000 | (flags & 0x800000FF)); + else + write.uflags = (0x80000000 | flags); + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + + pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]" + , __func__, + ab->phys, + write.buf_add, + write.hdr.token, + write.uid); + mutex_unlock(&port->lock); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &write); + if (rc < 0) { + pr_aud_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); + goto fail_cmd; + } + pr_debug("%s: WRITE SUCCESS\n", __func__); + return 0; + } +fail_cmd: + return -EINVAL; +} + +int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags) +{ + int rc = 0; + struct asm_stream_cmd_write write; + struct audio_port_data *port; + struct audio_buffer *ab; + int dsp_buf = 0; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[IN]; + + q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), + FALSE); + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + + write.hdr.token = port->dsp_buf; + write.hdr.opcode = ASM_DATA_CMD_WRITE; + write.buf_add = ab->phys; + write.avail_bytes = len; + write.uid = port->dsp_buf; + write.msw_ts = msw_ts; + write.lsw_ts = lsw_ts; + /* Use 0xFF00 for disabling timestamps */ + if (flags == 0xFF00) + write.uflags = (0x00000000 | (flags & 0x800000FF)); + else + write.uflags = (0x80000000 | flags); + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + + pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]" + , __func__, + ab->phys, + write.buf_add, + write.hdr.token, + write.uid); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &write); + if (rc < 0) { + pr_aud_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); + goto fail_cmd; + } + pr_debug("%s: WRITE SUCCESS\n", __func__); + return 0; + } +fail_cmd: + return -EINVAL; +} + +uint64_t q6asm_get_session_time(struct audio_client *ac) +{ + struct apr_hdr hdr; + int rc; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE); + hdr.opcode = ASM_SESSION_CMD_GET_SESSION_TIME; + atomic_set(&ac->time_flag, 1); + + pr_debug("%s: session[%d]opcode[0x%x]\n", __func__, + ac->session, + hdr.opcode); + rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); + if (rc < 0) { + pr_aud_err("Commmand 0x%x failed\n", hdr.opcode); + goto fail_cmd; + } + rc = wait_event_timeout(ac->time_wait, + (atomic_read(&ac->time_flag) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in getting session time from DSP\n", + __func__); + goto fail_cmd; + } + return ac->time_stamp; + +fail_cmd: + return -EINVAL; +} + +int q6asm_cmd(struct audio_client *ac, int cmd) +{ + struct apr_hdr hdr; + int rc; + atomic_t *state; + int cnt = 0; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE); + switch (cmd) { + case CMD_PAUSE: + pr_debug("%s:CMD_PAUSE\n", __func__); + hdr.opcode = ASM_SESSION_CMD_PAUSE; + state = &ac->cmd_state; + break; + case CMD_FLUSH: + pr_debug("%s:CMD_FLUSH\n", __func__); + hdr.opcode = ASM_STREAM_CMD_FLUSH; + state = &ac->cmd_state; + break; + case CMD_EOS: + pr_debug("%s:CMD_EOS\n", __func__); + hdr.opcode = ASM_DATA_CMD_EOS; + atomic_set(&ac->cmd_state, 0); + state = &ac->cmd_state; + break; + case CMD_CLOSE: + pr_debug("%s:CMD_CLOSE\n", __func__); + hdr.opcode = ASM_STREAM_CMD_CLOSE; + state = &ac->cmd_state; + break; + default: + pr_aud_err("Invalid format[%d]\n", cmd); + goto fail_cmd; + } + pr_debug("%s:session[%d]opcode[0x%x] ", __func__, + ac->session, + hdr.opcode); + rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); + if (rc < 0) { + pr_aud_err("Commmand 0x%x failed\n", hdr.opcode); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for response opcode[0x%x]\n", + hdr.opcode); + goto fail_cmd; + } + if (cmd == CMD_FLUSH) + q6asm_reset_buf_state(ac); + if (cmd == CMD_CLOSE) { + /* check if DSP return all buffers */ + if (ac->port[IN].buf) { + for (cnt = 0; cnt < ac->port[IN].max_buf_cnt; + cnt++) { + if (ac->port[IN].buf[cnt].used == IN) { + pr_aud_info("Write Buf[%d] not returned\n", + cnt); + } + } + } + if (ac->port[OUT].buf) { + for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) { + if (ac->port[OUT].buf[cnt].used == OUT) { + pr_aud_info("Read Buf[%d] not returned\n", + cnt); + } + } + } + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_cmd_nowait(struct audio_client *ac, int cmd) +{ + struct apr_hdr hdr; + int rc; + + if (!ac || ac->apr == NULL) { + pr_aud_err("%s:APR handle NULL\n", __func__); + return -EINVAL; + } + q6asm_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE); + switch (cmd) { + case CMD_PAUSE: + pr_debug("%s:CMD_PAUSE\n", __func__); + hdr.opcode = ASM_SESSION_CMD_PAUSE; + break; + case CMD_EOS: + pr_debug("%s:CMD_EOS\n", __func__); + hdr.opcode = ASM_DATA_CMD_EOS; + break; + default: + pr_aud_err("%s:Invalid format[%d]\n", __func__, cmd); + goto fail_cmd; + } + pr_debug("%s:session[%d]opcode[0x%x] ", __func__, + ac->session, + hdr.opcode); + rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); + if (rc < 0) { + pr_aud_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +static void q6asm_reset_buf_state(struct audio_client *ac) +{ + int cnt = 0; + int loopcnt = 0; + struct audio_port_data *port = NULL; + + if (ac->io_mode == SYNC_IO_MODE) { + mutex_lock(&ac->cmd_lock); + for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { + port = &ac->port[loopcnt]; + cnt = port->max_buf_cnt - 1; + port->dsp_buf = 0; + port->cpu_buf = 0; + while (cnt >= 0) { + if (!port->buf) + continue; + port->buf[cnt].used = 1; + cnt--; + } + } + mutex_unlock(&ac->cmd_lock); + } +} + +int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable) +{ + struct asm_stream_cmd_reg_tx_overflow_event tx_overflow; + int rc; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s:session[%d]enable[%d]\n", __func__, + ac->session, enable); + q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE); + + tx_overflow.hdr.opcode = \ + ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS; + /* tx overflow event: enable */ + tx_overflow.enable = enable; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow); + if (rc < 0) { + pr_aud_err("tx overflow op[0x%x]rc[%d]\n", \ + tx_overflow.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for tx overflow\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +#ifdef CONFIG_MSM8X60_RTAC +int q6asm_get_apr_service_id(int session_id) +{ + pr_debug("%s\n", __func__); + + if (session_id < 0) { + pr_aud_err("%s: invalid session_id = %d\n", __func__, session_id); + return -EINVAL; + } + + return ((struct apr_svc *)session[session_id]->apr)->id; +} +#endif + + +static int __init q6asm_init(void) +{ + pr_debug("%s\n", __func__); + init_waitqueue_head(&this_mmap.cmd_wait); + memset(session, 0, sizeof(session)); + return 0; +} + +device_initcall(q6asm_init); diff --git a/arch/arm/mach-msm/qdsp6v3/q6core.c b/arch/arm/mach-msm/qdsp6v3/q6core.c new file mode 100644 index 00000000..8d739347 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6core.c @@ -0,0 +1,348 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct dentry *dentry; +struct apr_svc_ch_dev *handle; +struct apr_svc *apr_handle_q; +struct apr_svc *apr_handle_m; +struct apr_svc *core_handle_q; +struct apr_client_data clnt_data; +char l_buf[4096]; + +#define TIMEOUT_MS 1000 +int32_t query_adsp_ver; +wait_queue_head_t adsp_version_wait; +uint32_t adsp_version; + +static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv) +{ + struct adsp_get_version *payload; + uint32_t *payload1; + struct adsp_service_info *svc_info; + int i; + + pr_aud_info("core msg: payload len = %d\n", data->payload_size); + switch (data->opcode) { + case APR_BASIC_RSP_RESULT:{ + payload1 = data->payload; + if (payload1[0] == ADSP_CMD_SET_POWER_COLLAPSE_STATE) { + pr_aud_info("Cmd[0x%x] status[0x%x]\n", payload1[0], + payload1[1]); + break; + } else + pr_aud_err("Invalid cmd rsp[0x%x][0x%x]\n", payload1[0], + payload1[1]); + break; + } + case ADSP_GET_VERSION_RSP:{ + if (data->payload_size) { + payload = data->payload; + if (query_adsp_ver == 1) { + query_adsp_ver = 0; + adsp_version = payload->build_id; + wake_up(&adsp_version_wait); + } + svc_info = (struct adsp_service_info *) + ((char *)payload + sizeof(struct adsp_get_version)); + pr_aud_info("----------------------------------------\n"); + pr_aud_info("Build id = %x\n", payload->build_id); + pr_aud_info("Number of services= %x\n", payload->svc_cnt); + pr_aud_info("----------------------------------------\n"); + for (i = 0; i < payload->svc_cnt; i++) { + pr_aud_info("svc-id[%d]\tver[%x.%x]\n", + svc_info[i].svc_id, + (svc_info[i].svc_ver & 0xFFFF0000) >> 16, + (svc_info[i].svc_ver & 0xFFFF)); + } + pr_aud_info("-----------------------------------------\n"); + } else + pr_aud_info("zero payload for ADSP_GET_VERSION_RSP\n"); + break; + } + case RESET_EVENTS:{ + pr_debug("Reset event received in Core service"); + apr_reset(core_handle_q); + core_handle_q = NULL; + break; + } + + default: + pr_aud_err("Message id from adsp core svc: %d\n", data->opcode); + break; + } + + return 0; +} + +static int32_t aprv2_debug_fn_q(struct apr_client_data *data, void *priv) +{ + pr_debug("Q6_Payload Length = %d\n", data->payload_size); + if (memcmp(data->payload, l_buf + 20, data->payload_size)) + pr_aud_info("FAIL: %d\n", data->payload_size); + else + pr_aud_info("SUCCESS: %d\n", data->payload_size); + return 0; +} + +static int32_t aprv2_debug_fn_m(struct apr_client_data *data, void *priv) +{ + pr_aud_info("M_Payload Length = %d\n", data->payload_size); + return 0; +} + +static ssize_t apr_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + pr_debug("apr debugfs opened\n"); + return 0; +} + +void *core_open(void) +{ + if (core_handle_q == NULL) { + core_handle_q = apr_register("ADSP", "CORE", + aprv2_core_fn_q, 0xFFFFFFFF, NULL); + } + pr_aud_info("Open_q %p\n", core_handle_q); + if (core_handle_q == NULL) { + pr_aud_err("%s: Unable to register CORE\n", __func__); + return NULL; + } + return core_handle_q; +} +EXPORT_SYMBOL(core_open); + +int32_t core_close(void) +{ + int ret = 0; + if (core_handle_q == NULL) { + pr_aud_err("CORE is already closed\n"); + ret = -EINVAL; + return ret; + } + apr_deregister(core_handle_q); + return ret; +} +EXPORT_SYMBOL(core_close); + +uint32_t core_get_adsp_version(void) +{ + struct apr_hdr *hdr; + int32_t rc = 0, ret = 0; + core_open(); + if (core_handle_q) { + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = ADSP_GET_VERSION; + + apr_send_pkt(core_handle_q, (uint32_t *)l_buf); + query_adsp_ver = 1; + pr_aud_info("Write_q\n"); + ret = wait_event_timeout(adsp_version_wait, + (query_adsp_ver == 0), + msecs_to_jiffies(TIMEOUT_MS)); + rc = adsp_version; + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + rc = -ENODEV; + } + } else + pr_aud_info("apr registration failed\n"); + return rc; +} +EXPORT_SYMBOL(core_get_adsp_version); + +static ssize_t apr_debug_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int len; + static int t_len; + + if (count < 0) + return 0; + len = count > 63 ? 63 : count; + if (copy_from_user(l_buf + 20 , buf, len)) { + pr_aud_info("Unable to copy data from user space\n"); + return -EFAULT; + } + l_buf[len + 20] = 0; + if (l_buf[len + 20 - 1] == '\n') { + l_buf[len + 20 - 1] = 0; + len--; + } + if (!strncmp(l_buf + 20, "open_q", 64)) { + apr_handle_q = apr_register("ADSP", "TEST", aprv2_debug_fn_q, + 0xFFFFFFFF, NULL); + pr_aud_info("Open_q %p\n", apr_handle_q); + } else if (!strncmp(l_buf + 20, "open_m", 64)) { + apr_handle_m = apr_register("MODEM", "TEST", aprv2_debug_fn_m, + 0xFFFFFFFF, NULL); + pr_aud_info("Open_m %p\n", apr_handle_m); + } else if (!strncmp(l_buf + 20, "write_q", 64)) { + struct apr_hdr *hdr; + + t_len++; + t_len = t_len % 450; + if (!t_len % 99) + msleep(2000); + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, t_len); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 20, 9, 4060); + + apr_send_pkt(apr_handle_q, (uint32_t *)l_buf); + pr_debug("Write_q\n"); + } else if (!strncmp(l_buf + 20, "write_m", 64)) { + struct apr_hdr *hdr; + + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, 8); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 30, 9, 4060); + + apr_send_pkt(apr_handle_m, (uint32_t *)l_buf); + pr_aud_info("Write_m\n"); + } else if (!strncmp(l_buf + 20, "write_q4", 64)) { + struct apr_hdr *hdr; + + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, 4076); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 30, 9, 4060); + + apr_send_pkt(apr_handle_q, (uint32_t *)l_buf); + pr_aud_info("Write_q\n"); + } else if (!strncmp(l_buf + 20, "write_m4", 64)) { + struct apr_hdr *hdr; + + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, 4076); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 30, 9, 4060); + + apr_send_pkt(apr_handle_m, (uint32_t *)l_buf); + pr_aud_info("Write_m\n"); + } else if (!strncmp(l_buf + 20, "close", 64)) { + if (apr_handle_q) + apr_deregister(apr_handle_q); + } else if (!strncmp(l_buf + 20, "loaded", 64)) { + change_q6_state(APR_Q6_LOADED); + } else if (!strncmp(l_buf + 20, "boom", 64)) { + q6audio_dsp_not_responding(); + } else if (!strncmp(l_buf + 20, "dsp_ver", 64)) { + core_get_adsp_version(); + } else if (!strncmp(l_buf + 20, "en_pwr_col", 64)) { + struct adsp_power_collapse pc; + + core_handle_q = core_open(); + if (core_handle_q) { + pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(uint32_t));; + pc.hdr.src_port = 0; + pc.hdr.dest_port = 0; + pc.hdr.token = 0; + pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE; + pc.power_collapse = 0x00000000; + apr_send_pkt(core_handle_q, (uint32_t *)&pc); + pr_aud_info("Write_q :enable power collapse\n"); + } + } else if (!strncmp(l_buf + 20, "dis_pwr_col", 64)) { + struct adsp_power_collapse pc; + + core_handle_q = core_open(); + if (core_handle_q) { + pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(uint32_t)); + pc.hdr.src_port = 0; + pc.hdr.dest_port = 0; + pc.hdr.token = 0; + pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE; + pc.power_collapse = 0x00000001; + apr_send_pkt(core_handle_q, (uint32_t *)&pc); + pr_aud_info("Write_q:disable power collapse\n"); + } + } else + pr_aud_info("Unknown Command\n"); + + return count; +} + +static const struct file_operations apr_debug_fops = { + .write = apr_debug_write, + .open = apr_debug_open, +}; + +static int __init core_init(void) +{ +#ifdef CONFIG_DEBUG_FS + dentry = debugfs_create_file("apr", 0644, + NULL, (void *) NULL, &apr_debug_fops); +#endif /* CONFIG_DEBUG_FS */ + query_adsp_ver = 0; + init_waitqueue_head(&adsp_version_wait); + adsp_version = 0; + core_handle_q = NULL; + return 0; +} +device_initcall(core_init); + diff --git a/arch/arm/mach-msm/qdsp6v3/q6voice.c b/arch/arm/mach-msm/qdsp6v3/q6voice.c new file mode 100644 index 00000000..e320184d --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6voice.c @@ -0,0 +1,2812 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" +#include "rtac.h" +#include + +#define TIMEOUT_MS 3000 +#define SNDDEV_CAP_TTY 0x20 + +#define CMD_STATUS_SUCCESS 0 +#define CMD_STATUS_FAIL 1 + +#define VOC_PATH_PASSIVE 0 +#define VOC_PATH_FULL 1 +#define ADSP_VERSION_CVD 0x60300000 + +#define BUFFER_PAYLOAD_SIZE 4000 + +#define VOC_REC_NONE 0xFF + +#define AUD_LOG(x...) do { \ +struct timespec ts; \ +struct rtc_time tm; \ +getnstimeofday(&ts); \ +rtc_time_to_tm(ts.tv_sec, &tm); \ +printk(KERN_INFO "[AUD] " x); \ +printk("[AUD] at %lld (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", \ +ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, \ +tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); \ +} while (0) + + +struct voice_data voice; + +static bool is_adsp_support_cvd(void) +{ + return (voice.adsp_version >= ADSP_VERSION_CVD); +} +static int voice_send_enable_vocproc_cmd(struct voice_data *v); +static int voice_send_netid_timing_cmd(struct voice_data *v); + +static void *voice_get_apr_mvm(struct voice_data *v) +{ + void *apr_mvm = NULL; + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + apr_mvm = v->apr_mvm; + else + apr_mvm = v->apr_q6_mvm; + + pr_debug("%s: apr_mvm 0x%x\n", __func__, (unsigned int)apr_mvm); + + return apr_mvm; +} + +static void voice_set_apr_mvm(struct voice_data *v, void *apr_mvm) +{ + pr_debug("%s: apr_mvm 0x%x\n", __func__, (unsigned int)apr_mvm); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + v->apr_mvm = apr_mvm; + else + v->apr_q6_mvm = apr_mvm; +} + +static void *voice_get_apr_cvs(struct voice_data *v) +{ + void *apr_cvs = NULL; + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + apr_cvs = v->apr_cvs; + else + apr_cvs = v->apr_q6_cvs; + + pr_debug("%s: apr_cvs 0x%x\n", __func__, (unsigned int)apr_cvs); + + return apr_cvs; +} + +static void voice_set_apr_cvs(struct voice_data *v, void *apr_cvs) +{ + pr_debug("%s: apr_cvs 0x%x\n", __func__, (unsigned int)apr_cvs); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + v->apr_cvs = apr_cvs; + else + v->apr_q6_cvs = apr_cvs; +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_voice_handle(RTAC_CVS, apr_cvs); +#endif +} + +static void *voice_get_apr_cvp(struct voice_data *v) +{ + void *apr_cvp = NULL; + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + apr_cvp = v->apr_cvp; + else + apr_cvp = v->apr_q6_cvp; + + pr_debug("%s: apr_cvp 0x%x\n", __func__, (unsigned int)apr_cvp); + + return apr_cvp; +} + +static void voice_set_apr_cvp(struct voice_data *v, void *apr_cvp) +{ + pr_debug("%s: apr_cvp 0x%x\n", __func__, (unsigned int)apr_cvp); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + v->apr_cvp = apr_cvp; + else + v->apr_q6_cvp = apr_cvp; +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_voice_handle(RTAC_CVP, apr_cvp); +#endif +} + +static u16 voice_get_mvm_handle(struct voice_data *v) +{ + u16 mvm_handle = 0; + + if (v->voc_path == VOC_PATH_PASSIVE) + mvm_handle = v->mvm_handle; + else + mvm_handle = v->mvm_q6_handle; + + pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle); + + return mvm_handle; +} + +static void voice_set_mvm_handle(struct voice_data *v, u16 mvm_handle) +{ + pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle); + + if (v->voc_path == VOC_PATH_PASSIVE) + v->mvm_handle = mvm_handle; + else + v->mvm_q6_handle = mvm_handle; +} + +static u16 voice_get_cvs_handle(struct voice_data *v) +{ + u16 cvs_handle = 0; + + if (v->voc_path == VOC_PATH_PASSIVE) + cvs_handle = v->cvs_handle; + else + cvs_handle = v->cvs_q6_handle; + + pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle); + + return cvs_handle; +} + +static void voice_set_cvs_handle(struct voice_data *v, u16 cvs_handle) +{ + pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle); + + if (v->voc_path == VOC_PATH_PASSIVE) + v->cvs_handle = cvs_handle; + else + v->cvs_q6_handle = cvs_handle; +} + +static u16 voice_get_cvp_handle(struct voice_data *v) +{ + u16 cvp_handle = 0; + + if (v->voc_path == VOC_PATH_PASSIVE) + cvp_handle = v->cvp_handle; + else + cvp_handle = v->cvp_q6_handle; + + pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle); + + return cvp_handle; +} + +static void voice_set_cvp_handle(struct voice_data *v, u16 cvp_handle) +{ + pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle); + + if (v->voc_path == VOC_PATH_PASSIVE) + v->cvp_handle = cvp_handle; + else + v->cvp_q6_handle = cvp_handle; +} + +static void voice_auddev_cb_function(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data); + +static int32_t modem_mvm_callback(struct apr_client_data *data, void *priv); +static int32_t modem_cvs_callback(struct apr_client_data *data, void *priv); +static int32_t modem_cvp_callback(struct apr_client_data *data, void *priv); + +static int voice_apr_register(struct voice_data *v) +{ + int rc = 0; + void *apr_mvm; + void *apr_cvs; + void *apr_cvp; + + if (v->adsp_version == 0) { + core_open(); + v->adsp_version = core_get_adsp_version(); + pr_aud_info("adsp_ver fetched:%x\n", v->adsp_version); + } + apr_mvm = voice_get_apr_mvm(v); + apr_cvs = voice_get_apr_cvs(v); + apr_cvp = voice_get_apr_cvp(v); + + + pr_debug("into voice_apr_register_callback\n"); + /* register callback to APR */ + if (apr_mvm == NULL) { + pr_debug("start to register MVM callback\n"); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) { + apr_mvm = apr_register("MODEM", "MVM", + modem_mvm_callback, 0xFFFFFFFF, + v); + } else { + apr_mvm = apr_register("ADSP", "MVM", + modem_mvm_callback, 0xFFFFFFFF, + v); + } + + if (apr_mvm == NULL) { + pr_aud_err("Unable to register MVM %d\n", + is_adsp_support_cvd()); + rc = -ENODEV; + goto done; + } + + voice_set_apr_mvm(v, apr_mvm); + } + + if (apr_cvs == NULL) { + pr_debug("start to register CVS callback\n"); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) { + apr_cvs = apr_register("MODEM", "CVS", + modem_cvs_callback, 0xFFFFFFFF, + v); + } else { + apr_cvs = apr_register("ADSP", "CVS", + modem_cvs_callback, 0xFFFFFFFF, + v); + } + + if (apr_cvs == NULL) { + pr_aud_err("Unable to register CVS %d\n", + is_adsp_support_cvd()); + rc = -ENODEV; + goto err; + } + + voice_set_apr_cvs(v, apr_cvs); + } + + if (apr_cvp == NULL) { + pr_debug("start to register CVP callback\n"); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) { + apr_cvp = apr_register("MODEM", "CVP", + modem_cvp_callback, 0xFFFFFFFF, + v); + } else { + apr_cvp = apr_register("ADSP", "CVP", + modem_cvp_callback, 0xFFFFFFFF, + v); + } + + if (apr_cvp == NULL) { + pr_aud_err("Unable to register CVP %d\n", + is_adsp_support_cvd()); + rc = -ENODEV; + goto err1; + } + + voice_set_apr_cvp(v, apr_cvp); + } + return 0; + +err1: + apr_deregister(apr_cvs); + apr_cvs = NULL; + voice_set_apr_cvs(v, apr_cvs); +err: + apr_deregister(apr_mvm); + apr_mvm = NULL; + voice_set_apr_mvm(v, apr_mvm); + +done: + return rc; +} + +static int voice_create_mvm_cvs_session(struct voice_data *v) +{ + int ret = 0; + struct mvm_create_ctl_session_cmd mvm_session_cmd; + struct cvs_create_passive_ctl_session_cmd cvs_session_cmd; + struct cvs_create_full_ctl_session_cmd cvs_full_ctl_cmd; + struct mvm_attach_stream_cmd attach_stream_cmd; + void *apr_mvm = voice_get_apr_mvm(v); + void *apr_cvs = voice_get_apr_cvs(v); + void *apr_cvp = voice_get_apr_cvp(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvs_handle = voice_get_cvs_handle(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + pr_aud_info("%s:\n", __func__); + + /* start to ping if modem service is up */ + pr_debug("in voice_create_mvm_cvs_session, mvm_hdl=%d, cvs_hdl=%d\n", + mvm_handle, cvs_handle); + /* send cmd to create mvm session and wait for response */ + + if (!mvm_handle) { + if (v->voc_path == VOC_PATH_PASSIVE) { + mvm_session_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_session_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_session_cmd) - APR_HDR_SIZE); + pr_debug("Send mvm create session pkt size = %d\n", + mvm_session_cmd.hdr.pkt_size); + mvm_session_cmd.hdr.src_port = 0; + mvm_session_cmd.hdr.dest_port = 0; + mvm_session_cmd.hdr.token = 0; + pr_debug("%s: Creating MVM passive ctrl\n", __func__); + mvm_session_cmd.hdr.opcode = + VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION; + strncpy(mvm_session_cmd.mvm_session.name, + "default modem voice", SESSION_NAME_LEN); + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, + (uint32_t *) &mvm_session_cmd); + if (ret < 0) { + pr_aud_err("Error sending MVM_CONTROL_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + } else { + mvm_session_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_session_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_session_cmd) - APR_HDR_SIZE); + pr_debug("Send mvm create session pkt size = %d\n", + mvm_session_cmd.hdr.pkt_size); + mvm_session_cmd.hdr.src_port = 0; + mvm_session_cmd.hdr.dest_port = 0; + mvm_session_cmd.hdr.token = 0; + pr_debug("%s: Creating MVM full ctrl\n", __func__); + mvm_session_cmd.hdr.opcode = + VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION; + strcpy(mvm_session_cmd.mvm_session.name, + "default voip"); + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, + (uint32_t *) &mvm_session_cmd); + if (ret < 0) { + pr_aud_err("Error sending MVM_FULL_CTL_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + } + + /* Get the created MVM handle. */ + mvm_handle = voice_get_mvm_handle(v); + } + + /* send cmd to create cvs session */ + if (!cvs_handle) { + if (v->voc_path == VOC_PATH_PASSIVE) { + pr_aud_info("%s:creating CVS passive session\n", __func__); + + cvs_session_cmd.hdr.hdr_field = APR_HDR_FIELD( + APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_session_cmd) - APR_HDR_SIZE); + pr_aud_info("send stream create session pkt size = %d\n", + cvs_session_cmd.hdr.pkt_size); + cvs_session_cmd.hdr.src_port = 0; + cvs_session_cmd.hdr.dest_port = 0; + cvs_session_cmd.hdr.token = 0; + cvs_session_cmd.hdr.opcode = + VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION; + strncpy(cvs_session_cmd.cvs_session.name, + "default modem voice", SESSION_NAME_LEN); + v->cvs_state = CMD_STATUS_FAIL; + + pr_aud_info("%s: CVS create\n", __func__); + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_session_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending STREAM_CONTROL_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* Get the created CVS handle. */ + cvs_handle = voice_get_cvs_handle(v); + } else { + pr_aud_info("%s:creating CVS full session\n", __func__); + + cvs_full_ctl_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + + cvs_full_ctl_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_full_ctl_cmd) - APR_HDR_SIZE); + + cvs_full_ctl_cmd.hdr.src_port = 0; + cvs_full_ctl_cmd.hdr.dest_port = 0; + cvs_full_ctl_cmd.hdr.token = 0; + cvs_full_ctl_cmd.hdr.opcode = + VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION; + cvs_full_ctl_cmd.cvs_session.direction = 2; + + cvs_full_ctl_cmd.cvs_session.enc_media_type = + v->mvs_info.media_type; + cvs_full_ctl_cmd.cvs_session.dec_media_type = + v->mvs_info.media_type; + cvs_full_ctl_cmd.cvs_session.network_id = + v->mvs_info.network_type; + strncpy(cvs_full_ctl_cmd.cvs_session.name, + "default voip", SESSION_NAME_LEN); + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, + (uint32_t *) &cvs_full_ctl_cmd); + + if (ret < 0) { + pr_aud_err("%s: Err %d sending CREATE_FULL_CTRL\n", + __func__, ret); + goto fail; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* Get the created CVS handle. */ + cvs_handle = voice_get_cvs_handle(v); + + /* Attach MVM to CVS. */ + pr_aud_info("%s: Attach MVM to stream\n", __func__); + + attach_stream_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + + attach_stream_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(attach_stream_cmd) - APR_HDR_SIZE); + attach_stream_cmd.hdr.src_port = 0; + attach_stream_cmd.hdr.dest_port = mvm_handle; + attach_stream_cmd.hdr.token = 0; + attach_stream_cmd.hdr.opcode = + VSS_IMVM_CMD_ATTACH_STREAM; + attach_stream_cmd.attach_stream.handle = cvs_handle; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, + (uint32_t *) &attach_stream_cmd); + if (ret < 0) { + pr_aud_err("%s: Error %d sending ATTACH_STREAM\n", + __func__, ret); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + } + } + + return 0; + +fail: + apr_deregister(apr_mvm); + apr_mvm = NULL; + voice_set_apr_mvm(v, apr_mvm); + + apr_deregister(apr_cvs); + apr_cvs = NULL; + voice_set_apr_cvs(v, apr_cvs); + + apr_deregister(apr_cvp); + apr_cvp = NULL; + voice_set_apr_cvp(v, apr_cvp); + + cvp_handle = 0; + voice_set_cvp_handle(v, cvp_handle); + + cvs_handle = 0; + voice_set_cvs_handle(v, cvs_handle); + + return -EINVAL; +} + +static int voice_destroy_mvm_cvs_session(struct voice_data *v) +{ + int ret = 0; + struct mvm_detach_stream_cmd detach_stream; + struct apr_hdr mvm_destroy; + struct apr_hdr cvs_destroy; + void *apr_mvm = voice_get_apr_mvm(v); + void *apr_cvs = voice_get_apr_cvs(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* MVM, CVS sessions are destroyed only for Full control sessions. */ + if (v->voc_path == VOC_PATH_FULL) { + pr_aud_info("%s: MVM detach stream\n", __func__); + + /* Detach voice stream. */ + detach_stream.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + detach_stream.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(detach_stream) - APR_HDR_SIZE); + detach_stream.hdr.src_port = 0; + detach_stream.hdr.dest_port = mvm_handle; + detach_stream.hdr.token = 0; + detach_stream.hdr.opcode = VSS_IMVM_CMD_DETACH_STREAM; + detach_stream.detach_stream.handle = cvs_handle; + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, (uint32_t *) &detach_stream); + if (ret < 0) { + pr_aud_err("%s: Error %d sending DETACH_STREAM\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait event timeout\n", __func__); + goto fail; + } + + /* Destroy CVS. */ + pr_aud_info("%s: CVS destroy session\n", __func__); + + cvs_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_destroy) - APR_HDR_SIZE); + cvs_destroy.src_port = 0; + cvs_destroy.dest_port = cvs_handle; + cvs_destroy.token = 0; + cvs_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_destroy); + if (ret < 0) { + pr_aud_err("%s: Error %d sending CVS DESTROY\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait event timeout\n", __func__); + + goto fail; + } + cvs_handle = 0; + voice_set_cvs_handle(v, cvs_handle); + + /* Destroy MVM. */ + pr_aud_info("%s: MVM destroy session\n", __func__); + + mvm_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + mvm_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_destroy) - APR_HDR_SIZE); + mvm_destroy.src_port = 0; + mvm_destroy.dest_port = mvm_handle; + mvm_destroy.token = 0; + mvm_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_destroy); + if (ret < 0) { + pr_aud_err("%s: Error %d sending MVM DESTROY\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait event timeout\n", __func__); + + goto fail; + } + mvm_handle = 0; + voice_set_mvm_handle(v, mvm_handle); + } + +fail: + return 0; +} + +static int voice_send_tty_mode_to_modem(struct voice_data *v) +{ + struct msm_snddev_info *dev_tx_info; + struct msm_snddev_info *dev_rx_info; + int tty_mode = 0; + int ret = 0; + struct mvm_set_tty_mode_cmd mvm_tty_mode_cmd; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + + dev_rx_info = audio_dev_ctrl_find_dev(v->dev_rx.dev_id); + if (IS_ERR(dev_rx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_rx.dev_id); + goto done; + } + + dev_tx_info = audio_dev_ctrl_find_dev(v->dev_tx.dev_id); + if (IS_ERR(dev_tx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_tx.dev_id); + goto done; + } + + if ((dev_rx_info->capability & SNDDEV_CAP_TTY) && + (dev_tx_info->capability & SNDDEV_CAP_TTY)) + tty_mode = 3; /* FULL */ + else if (!(dev_tx_info->capability & SNDDEV_CAP_TTY) && + (dev_rx_info->capability & SNDDEV_CAP_TTY)) + tty_mode = 2; /* VCO */ + else if ((dev_tx_info->capability & SNDDEV_CAP_TTY) && + !(dev_rx_info->capability & SNDDEV_CAP_TTY)) + tty_mode = 1; /* HCO */ + + if (tty_mode) { + /* send tty mode cmd to mvm */ + mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD( + APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_tty_mode_cmd) - APR_HDR_SIZE); + pr_debug("pkt size = %d\n", mvm_tty_mode_cmd.hdr.pkt_size); + mvm_tty_mode_cmd.hdr.src_port = 0; + mvm_tty_mode_cmd.hdr.dest_port = mvm_handle; + mvm_tty_mode_cmd.hdr.token = 0; + mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE; + mvm_tty_mode_cmd.tty_mode.mode = tty_mode; + pr_aud_info("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode); + + v->mvm_state = CMD_STATUS_FAIL; + pr_aud_info("%s: MVM set tty\n", __func__); + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd); + if (ret < 0) { + pr_aud_err("Fail: sending VSS_ISTREAM_CMD_SET_TTY_MODE\n"); + goto done; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto done; + } + } + return 0; +done: + return -EINVAL; +} + +static int voice_send_cvs_cal_to_modem(struct voice_data *v) +{ + struct apr_hdr cvs_cal_cmd_hdr = {0}; + uint32_t *cmd_buf; + struct acdb_cal_data cal_data; + struct acdb_cal_block *cal_blk; + int32_t cal_size_per_network; + uint32_t *cal_data_per_network; + int index = 0; + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* fill the header */ + cvs_cal_cmd_hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_cal_cmd_hdr) - APR_HDR_SIZE); + cvs_cal_cmd_hdr.src_port = 0; + cvs_cal_cmd_hdr.dest_port = cvs_handle; + cvs_cal_cmd_hdr.token = 0; + cvs_cal_cmd_hdr.opcode = + VSS_ISTREAM_CMD_CACHE_CALIBRATION_DATA; + + pr_debug("voice_send_cvs_cal_to_modem\n"); + /* get the cvs cal data */ + get_vocstrm_cal(&cal_data); + if (cal_data.num_cal_blocks == 0) { + pr_aud_err("%s: No calibration data to send!\n", __func__); + goto done; + } + + /* send cvs cal to modem */ + cmd_buf = kzalloc((sizeof(struct apr_hdr) + BUFFER_PAYLOAD_SIZE), + GFP_KERNEL); + if (!cmd_buf) { + pr_aud_err("No memory is allocated.\n"); + return -ENOMEM; + } + pr_debug("----- num_cal_blocks=%d\n", (s32)cal_data.num_cal_blocks); + cal_blk = cal_data.cal_blocks; + pr_debug("cal_blk =%x\n", (uint32_t)cal_data.cal_blocks); + + for (; index < cal_data.num_cal_blocks; index++) { + cal_size_per_network = cal_blk[index].cal_size; + pr_debug(" cal size =%d\n", cal_size_per_network); + if (cal_size_per_network >= BUFFER_PAYLOAD_SIZE) + pr_aud_err("Cal size is too big\n"); + cal_data_per_network = (u32 *)cal_blk[index].cal_kvaddr; + pr_debug(" cal data=%x\n", (uint32_t)cal_data_per_network); + cvs_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + cal_size_per_network); + pr_debug("header size =%d, pkt_size =%d\n", + APR_HDR_SIZE, cvs_cal_cmd_hdr.pkt_size); + memcpy(cmd_buf, &cvs_cal_cmd_hdr, APR_HDR_SIZE); + memcpy(cmd_buf + (APR_HDR_SIZE / sizeof(uint32_t)), + cal_data_per_network, cal_size_per_network); + pr_debug("send cvs cal: index =%d\n", index); + v->cvs_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvs, cmd_buf); + if (ret < 0) { + pr_aud_err("Fail: sending cvs cal, idx=%d\n", index); + continue; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + } + kfree(cmd_buf); +done: + return 0; +} + +static int voice_send_cvp_cal_to_modem(struct voice_data *v) +{ + struct apr_hdr cvp_cal_cmd_hdr = {0}; + uint32_t *cmd_buf; + struct acdb_cal_data cal_data; + struct acdb_cal_block *cal_blk; + int32_t cal_size_per_network; + uint32_t *cal_data_per_network; + int index = 0; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + + /* fill the header */ + cvp_cal_cmd_hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_cal_cmd_hdr) - APR_HDR_SIZE); + cvp_cal_cmd_hdr.src_port = 0; + cvp_cal_cmd_hdr.dest_port = cvp_handle; + cvp_cal_cmd_hdr.token = 0; + cvp_cal_cmd_hdr.opcode = + VSS_IVOCPROC_CMD_CACHE_CALIBRATION_DATA; + + /* get cal data */ + get_vocproc_cal(&cal_data); + if (cal_data.num_cal_blocks == 0) { + pr_aud_err("%s: No calibration data to send!\n", __func__); + goto done; + } + + /* send cal to modem */ + cmd_buf = kzalloc((sizeof(struct apr_hdr) + BUFFER_PAYLOAD_SIZE), + GFP_KERNEL); + if (!cmd_buf) { + pr_aud_err("No memory is allocated.\n"); + return -ENOMEM; + } + pr_debug("----- num_cal_blocks=%d\n", (s32)cal_data.num_cal_blocks); + cal_blk = cal_data.cal_blocks; + pr_debug(" cal_blk =%x\n", (uint32_t)cal_data.cal_blocks); + + for (; index < cal_data.num_cal_blocks; index++) { + cal_size_per_network = cal_blk[index].cal_size; + if (cal_size_per_network >= BUFFER_PAYLOAD_SIZE) + pr_aud_err("Cal size is too big\n"); + pr_debug(" cal size =%d\n", cal_size_per_network); + cal_data_per_network = (u32 *)cal_blk[index].cal_kvaddr; + pr_debug(" cal data=%x\n", (uint32_t)cal_data_per_network); + + cvp_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + cal_size_per_network); + memcpy(cmd_buf, &cvp_cal_cmd_hdr, APR_HDR_SIZE); + memcpy(cmd_buf + (APR_HDR_SIZE / sizeof(*cmd_buf)), + cal_data_per_network, cal_size_per_network); + pr_debug("Send cvp cal\n"); + v->cvp_state = CMD_STATUS_FAIL; + pr_aud_info("%s: CVP calib\n", __func__); + ret = apr_send_pkt(apr_cvp, cmd_buf); + if (ret < 0) { + pr_aud_err("Fail: sending cvp cal, idx=%d\n", index); + continue; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + } + kfree(cmd_buf); +done: + return 0; +} + +static int voice_send_cvp_vol_tbl_to_modem(struct voice_data *v) +{ + struct apr_hdr cvp_vol_cal_cmd_hdr = {0}; + uint32_t *cmd_buf; + struct acdb_cal_data cal_data; + struct acdb_cal_block *cal_blk; + int32_t cal_size_per_network; + uint32_t *cal_data_per_network; + int index = 0; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + + /* fill the header */ + cvp_vol_cal_cmd_hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_vol_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_vol_cal_cmd_hdr) - APR_HDR_SIZE); + cvp_vol_cal_cmd_hdr.src_port = 0; + cvp_vol_cal_cmd_hdr.dest_port = cvp_handle; + cvp_vol_cal_cmd_hdr.token = 0; + cvp_vol_cal_cmd_hdr.opcode = + VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE; + + /* get cal data */ + get_vocvol_cal(&cal_data); + if (cal_data.num_cal_blocks == 0) { + pr_aud_err("%s: No calibration data to send!\n", __func__); + goto done; + } + + /* send cal to modem */ + cmd_buf = kzalloc((sizeof(struct apr_hdr) + BUFFER_PAYLOAD_SIZE), + GFP_KERNEL); + if (!cmd_buf) { + pr_aud_err("No memory is allocated.\n"); + return -ENOMEM; + } + pr_debug("----- num_cal_blocks=%d\n", (s32)cal_data.num_cal_blocks); + cal_blk = cal_data.cal_blocks; + pr_debug("Cal_blk =%x\n", (uint32_t)cal_data.cal_blocks); + + for (; index < cal_data.num_cal_blocks; index++) { + cal_size_per_network = cal_blk[index].cal_size; + cal_data_per_network = (u32 *)cal_blk[index].cal_kvaddr; + pr_debug("Cal size =%d, index=%d\n", cal_size_per_network, + index); + pr_debug("Cal data=%x\n", (uint32_t)cal_data_per_network); + cvp_vol_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + cal_size_per_network); + memcpy(cmd_buf, &cvp_vol_cal_cmd_hdr, APR_HDR_SIZE); + memcpy(cmd_buf + (APR_HDR_SIZE / sizeof(uint32_t)), + cal_data_per_network, cal_size_per_network); + pr_debug("Send vol table\n"); + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, cmd_buf); + if (ret < 0) { + pr_aud_err("Fail: sending cvp vol cal, idx=%d\n", index); + continue; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + } + kfree(cmd_buf); +done: + return 0; +} + +static int voice_set_dtx(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* Set DTX */ + struct cvs_set_enc_dtx_mode_cmd cvs_set_dtx = { + .hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER), + .hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_dtx) - APR_HDR_SIZE), + .hdr.src_port = 0, + .hdr.dest_port = cvs_handle, + .hdr.token = 0, + .hdr.opcode = VSS_ISTREAM_CMD_SET_ENC_DTX_MODE, + .dtx_mode.enable = v->mvs_info.dtx_mode, + }; + + pr_debug("%s: Setting DTX %d\n", __func__, v->mvs_info.dtx_mode); + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_dtx); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_DTX\n", __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + } + +done: + return ret; +} + +static int voice_config_cvs_vocoder(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* Set media type. */ + struct cvs_set_media_type_cmd cvs_set_media_cmd; + + pr_aud_info("%s: Setting media type\n", __func__); + + cvs_set_media_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_media_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_media_cmd) - APR_HDR_SIZE); + cvs_set_media_cmd.hdr.src_port = 0; + cvs_set_media_cmd.hdr.dest_port = cvs_handle; + cvs_set_media_cmd.hdr.token = 0; + cvs_set_media_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MEDIA_TYPE; + cvs_set_media_cmd.media_type.tx_media_id = v->mvs_info.media_type; + cvs_set_media_cmd.media_type.rx_media_id = v->mvs_info.media_type; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_media_cmd); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_MEDIA_TYPE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + /* Set encoder properties. */ + switch (v->mvs_info.media_type) { + case VSS_MEDIA_ID_EVRC_MODEM: { + struct cvs_set_cdma_enc_minmax_rate_cmd cvs_set_cdma_rate; + + pr_aud_info("%s: Setting EVRC min-max rate\n", __func__); + + cvs_set_cdma_rate.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_cdma_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_cdma_rate) - APR_HDR_SIZE); + cvs_set_cdma_rate.hdr.src_port = 0; + cvs_set_cdma_rate.hdr.dest_port = cvs_handle; + cvs_set_cdma_rate.hdr.token = 0; + cvs_set_cdma_rate.hdr.opcode = + VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE; + cvs_set_cdma_rate.cdma_rate.min_rate = v->mvs_info.rate; + cvs_set_cdma_rate.cdma_rate.max_rate = v->mvs_info.rate; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_cdma_rate); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_EVRC_MINMAX_RATE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + break; + } + + case VSS_MEDIA_ID_AMR_NB_MODEM: { + struct cvs_set_amr_enc_rate_cmd cvs_set_amr_rate; + + pr_aud_info("%s: Setting AMR rate\n", __func__); + + cvs_set_amr_rate.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_amr_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_amr_rate) - APR_HDR_SIZE); + cvs_set_amr_rate.hdr.src_port = 0; + cvs_set_amr_rate.hdr.dest_port = cvs_handle; + cvs_set_amr_rate.hdr.token = 0; + cvs_set_amr_rate.hdr.opcode = + VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE; + cvs_set_amr_rate.amr_rate.mode = v->mvs_info.rate; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amr_rate); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_AMR_RATE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + ret = voice_set_dtx(v); + + break; + } + + case VSS_MEDIA_ID_AMR_WB_MODEM: { + struct cvs_set_amrwb_enc_rate_cmd cvs_set_amrwb_rate; + + pr_aud_info("%s: Setting AMR WB rate\n", __func__); + + cvs_set_amrwb_rate.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_amrwb_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_amrwb_rate) - APR_HDR_SIZE); + cvs_set_amrwb_rate.hdr.src_port = 0; + cvs_set_amrwb_rate.hdr.dest_port = cvs_handle; + cvs_set_amrwb_rate.hdr.token = 0; + cvs_set_amrwb_rate.hdr.opcode = + VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE; + cvs_set_amrwb_rate.amrwb_rate.mode = v->mvs_info.rate; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amrwb_rate); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_AMRWB_RATE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + ret = voice_set_dtx(v); + + break; + } + + case VSS_MEDIA_ID_G729: + case VSS_MEDIA_ID_G711_ALAW: + case VSS_MEDIA_ID_G711_MULAW: { + ret = voice_set_dtx(v); + + break; + } + + default: { + /* Do nothing. */ + } + } + +done: + return ret; +} + +static int voice_send_start_voice_cmd(struct voice_data *v) +{ + struct apr_hdr mvm_start_voice_cmd; + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + + mvm_start_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_start_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_start_voice_cmd) - APR_HDR_SIZE); + AUD_LOG("send mvm_start_voice_cmd pkt size = %d\n", + mvm_start_voice_cmd.pkt_size); + mvm_start_voice_cmd.src_port = 0; + mvm_start_voice_cmd.dest_port = mvm_handle; + mvm_start_voice_cmd.token = 0; + mvm_start_voice_cmd.opcode = VSS_IMVM_CMD_START_VOICE; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_start_voice_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IMVM_CMD_START_VOICE\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_disable_vocproc(struct voice_data *v) +{ + struct apr_hdr cvp_disable_cmd; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* disable vocproc and wait for respose */ + cvp_disable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_disable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_disable_cmd) - APR_HDR_SIZE); + pr_debug("cvp_disable_cmd pkt size = %d, cvp_handle=%d\n", + cvp_disable_cmd.pkt_size, cvp_handle); + cvp_disable_cmd.src_port = 0; + cvp_disable_cmd.dest_port = cvp_handle; + cvp_disable_cmd.token = 0; + cvp_disable_cmd.opcode = VSS_IVOCPROC_CMD_DISABLE; + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_disable_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IVOCPROC_CMD_DISABLE\n"); + goto fail; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_remove_voice(v); +#endif + + return 0; +fail: + return -EINVAL; +} + +static int voice_set_device(struct voice_data *v) +{ + struct cvp_set_device_cmd cvp_setdev_cmd; + struct msm_snddev_info *dev_tx_info; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + + /* set device and wait for response */ + cvp_setdev_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_setdev_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_setdev_cmd) - APR_HDR_SIZE); + pr_debug(" send create cvp setdev, pkt size = %d\n", + cvp_setdev_cmd.hdr.pkt_size); + cvp_setdev_cmd.hdr.src_port = 0; + cvp_setdev_cmd.hdr.dest_port = cvp_handle; + cvp_setdev_cmd.hdr.token = 0; + cvp_setdev_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_DEVICE; + + dev_tx_info = audio_dev_ctrl_find_dev(v->dev_tx.dev_id); + if (IS_ERR(dev_tx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_tx.dev_id); + goto fail; + } + + if (dev_tx_info->channel_mode > 1) + cvp_setdev_cmd.cvp_set_device.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE; + else + cvp_setdev_cmd.cvp_set_device.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS; + cvp_setdev_cmd.cvp_set_device.rx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT; + cvp_setdev_cmd.cvp_set_device.tx_port_id = v->dev_tx.dev_port_id; + cvp_setdev_cmd.cvp_set_device.rx_port_id = v->dev_rx.dev_port_id; + pr_aud_info("topology=%d , tx_port_id=%d, rx_port_id=%d\n", + cvp_setdev_cmd.cvp_set_device.tx_topology_id, + cvp_setdev_cmd.cvp_set_device.tx_port_id, + cvp_setdev_cmd.cvp_set_device.rx_port_id); + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_setdev_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n"); + goto fail; + } + pr_debug("wait for cvp create session event\n"); + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* send cvs cal */ + voice_send_cvs_cal_to_modem(v); + + /* send cvp cal */ + voice_send_cvp_cal_to_modem(v); + + /* send cvp vol table cal */ + voice_send_cvp_vol_tbl_to_modem(v); + + /* enable vocproc and wait for respose */ + voice_send_enable_vocproc_cmd(v); + + /* send tty mode if tty device is used */ + voice_send_tty_mode_to_modem(v); + + if (v->voc_path == VOC_PATH_FULL) + voice_send_netid_timing_cmd(v); + +#ifdef CONFIG_MSM8X60_RTAC + rtac_add_voice(v); +#endif + + return 0; +fail: + return -EINVAL; +} + +static int voice_send_stop_voice_cmd(struct voice_data *v) +{ + struct apr_hdr mvm_stop_voice_cmd; + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + + mvm_stop_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_stop_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_stop_voice_cmd) - APR_HDR_SIZE); + AUD_LOG("send mvm_stop_voice_cmd pkt size = %d\n", + mvm_stop_voice_cmd.pkt_size); + mvm_stop_voice_cmd.src_port = 0; + mvm_stop_voice_cmd.dest_port = mvm_handle; + mvm_stop_voice_cmd.token = 0; + mvm_stop_voice_cmd.opcode = VSS_IMVM_CMD_STOP_VOICE; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_stop_voice_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IMVM_CMD_STOP_VOICE\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_setup_modem_voice(struct voice_data *v) +{ + struct cvp_create_full_ctl_session_cmd cvp_session_cmd; + int ret = 0; + struct msm_snddev_info *dev_tx_info; + void *apr_cvp = voice_get_apr_cvp(v); + + /* create cvp session and wait for response */ + cvp_session_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_session_cmd) - APR_HDR_SIZE); + pr_aud_info(" send create cvp session, pkt size = %d\n", + cvp_session_cmd.hdr.pkt_size); + cvp_session_cmd.hdr.src_port = 0; + cvp_session_cmd.hdr.dest_port = 0; + cvp_session_cmd.hdr.token = 0; + cvp_session_cmd.hdr.opcode = + VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION; + + dev_tx_info = audio_dev_ctrl_find_dev(v->dev_tx.dev_id); + if (IS_ERR(dev_tx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_tx.dev_id); + goto fail; + } + + if (dev_tx_info->channel_mode > 1) + cvp_session_cmd.cvp_session.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE; + else + cvp_session_cmd.cvp_session.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS; + cvp_session_cmd.cvp_session.rx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT; + cvp_session_cmd.cvp_session.direction = 2; /*tx and rx*/ + cvp_session_cmd.cvp_session.network_id = VSS_NETWORK_ID_DEFAULT; + cvp_session_cmd.cvp_session.tx_port_id = v->dev_tx.dev_port_id; + cvp_session_cmd.cvp_session.rx_port_id = v->dev_rx.dev_port_id; + pr_aud_info("topology=%d net_id=%d, dir=%d tx_port_id=%d, rx_port_id=%d\n", + cvp_session_cmd.cvp_session.tx_topology_id, + cvp_session_cmd.cvp_session.network_id, + cvp_session_cmd.cvp_session.direction, + cvp_session_cmd.cvp_session.tx_port_id, + cvp_session_cmd.cvp_session.rx_port_id); + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_session_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n"); + goto fail; + } + pr_debug("wait for cvp create session event\n"); + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* send cvs cal */ + voice_send_cvs_cal_to_modem(v); + + /* send cvp cal */ + voice_send_cvp_cal_to_modem(v); + + /* send cvp vol table cal */ + voice_send_cvp_vol_tbl_to_modem(v); + + return 0; + +fail: + return -EINVAL; +} + +static int voice_send_enable_vocproc_cmd(struct voice_data *v) +{ + int ret = 0; + struct apr_hdr cvp_enable_cmd; + + u16 cvp_handle = voice_get_cvp_handle(v); + void *apr_cvp = voice_get_apr_cvp(v); + + /* enable vocproc and wait for respose */ + cvp_enable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_enable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_enable_cmd) - APR_HDR_SIZE); + pr_debug("cvp_enable_cmd pkt size = %d, cvp_handle=%d\n", + cvp_enable_cmd.pkt_size, cvp_handle); + cvp_enable_cmd.src_port = 0; + cvp_enable_cmd.dest_port = cvp_handle; + cvp_enable_cmd.token = 0; + cvp_enable_cmd.opcode = VSS_IVOCPROC_CMD_ENABLE; + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_enable_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IVOCPROC_CMD_ENABLE\n"); + goto fail; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_send_netid_timing_cmd(struct voice_data *v) +{ + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + struct mvm_set_network_cmd mvm_set_network; + struct mvm_set_voice_timing_cmd mvm_set_voice_timing; + u16 mvm_handle = voice_get_mvm_handle(v); + + ret = voice_config_cvs_vocoder(v); + if (ret < 0) { + pr_aud_err("%s: Error %d configuring CVS voc", + __func__, ret); + goto fail; + } + /* Set network ID. */ + pr_debug("%s: Setting network ID\n", __func__); + + mvm_set_network.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_set_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_set_network) - APR_HDR_SIZE); + mvm_set_network.hdr.src_port = 0; + mvm_set_network.hdr.dest_port = mvm_handle; + mvm_set_network.hdr.token = 0; + mvm_set_network.hdr.opcode = VSS_ICOMMON_CMD_SET_NETWORK; + mvm_set_network.network.network_id = v->mvs_info.network_type; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_network); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_NETWORK\n", __func__, ret); + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* Set voice timing. */ + pr_debug("%s: Setting voice timing\n", __func__); + + mvm_set_voice_timing.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_set_voice_timing.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_set_voice_timing) - APR_HDR_SIZE); + mvm_set_voice_timing.hdr.src_port = 0; + mvm_set_voice_timing.hdr.dest_port = mvm_handle; + mvm_set_voice_timing.hdr.token = 0; + mvm_set_voice_timing.hdr.opcode = + VSS_ICOMMON_CMD_SET_VOICE_TIMING; + mvm_set_voice_timing.timing.mode = 0; + mvm_set_voice_timing.timing.enc_offset = 8000; + mvm_set_voice_timing.timing.dec_req_offset = 3300; + mvm_set_voice_timing.timing.dec_offset = 8300; + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_voice_timing); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_TIMING\n", __func__, ret); + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_attach_vocproc(struct voice_data *v) +{ + int ret = 0; + struct mvm_attach_vocproc_cmd mvm_a_vocproc_cmd; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* send enable vocproc */ + voice_send_enable_vocproc_cmd(v); + + /* attach vocproc and wait for response */ + mvm_a_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_a_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_a_vocproc_cmd) - APR_HDR_SIZE); + pr_aud_info("send mvm_a_vocproc_cmd pkt size = %d\n", + mvm_a_vocproc_cmd.hdr.pkt_size); + mvm_a_vocproc_cmd.hdr.src_port = 0; + mvm_a_vocproc_cmd.hdr.dest_port = mvm_handle; + mvm_a_vocproc_cmd.hdr.token = 0; + mvm_a_vocproc_cmd.hdr.opcode = VSS_ISTREAM_CMD_ATTACH_VOCPROC; + mvm_a_vocproc_cmd.mvm_attach_cvp_handle.handle = cvp_handle; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_a_vocproc_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_ISTREAM_CMD_ATTACH_VOCPROC\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* send tty mode if tty device is used */ + voice_send_tty_mode_to_modem(v); + + if (v->voc_path == VOC_PATH_FULL) + voice_send_netid_timing_cmd(v); + +#ifdef CONFIG_MSM8X60_RTAC + rtac_add_voice(v); +#endif + return 0; +fail: + return -EINVAL; +} + +static int voice_destroy_modem_voice(struct voice_data *v) +{ + struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd; + struct apr_hdr cvp_destroy_session_cmd; + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + void *apr_cvp = voice_get_apr_cvp(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* detach VOCPROC and wait for response from mvm */ + mvm_d_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_d_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_d_vocproc_cmd) - APR_HDR_SIZE); + pr_aud_info("mvm_d_vocproc_cmd pkt size = %d\n", + mvm_d_vocproc_cmd.hdr.pkt_size); + mvm_d_vocproc_cmd.hdr.src_port = 0; + mvm_d_vocproc_cmd.hdr.dest_port = mvm_handle; + mvm_d_vocproc_cmd.hdr.token = 0; + mvm_d_vocproc_cmd.hdr.opcode = VSS_ISTREAM_CMD_DETACH_VOCPROC; + mvm_d_vocproc_cmd.mvm_detach_cvp_handle.handle = cvp_handle; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_d_vocproc_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_ISTREAM_CMD_DETACH_VOCPROC\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* destrop cvp session */ + cvp_destroy_session_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_destroy_session_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_destroy_session_cmd) - APR_HDR_SIZE); + pr_aud_info("cvp_destroy_session_cmd pkt size = %d\n", + cvp_destroy_session_cmd.pkt_size); + cvp_destroy_session_cmd.src_port = 0; + cvp_destroy_session_cmd.dest_port = cvp_handle; + cvp_destroy_session_cmd.token = 0; + cvp_destroy_session_cmd.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_destroy_session_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending APRV2_IBASIC_CMD_DESTROY_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + +#ifdef CONFIG_MSM8X60_RTAC + rtac_remove_voice(v); +#endif + cvp_handle = 0; + voice_set_cvp_handle(v, cvp_handle); + + return 0; + +fail: + return -EINVAL; +} + +static int voice_send_mute_cmd_to_modem(struct voice_data *v) +{ + struct cvs_set_mute_cmd cvs_mute_cmd; + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* send mute/unmute to cvs */ + cvs_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_mute_cmd) - APR_HDR_SIZE); + cvs_mute_cmd.hdr.src_port = 0; + cvs_mute_cmd.hdr.dest_port = cvs_handle; + cvs_mute_cmd.hdr.token = 0; + cvs_mute_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MUTE; + cvs_mute_cmd.cvs_set_mute.direction = 0; /*tx*/ + cvs_mute_cmd.cvs_set_mute.mute_flag = v->dev_tx.mute; + + pr_aud_info(" mute value =%d\n", cvs_mute_cmd.cvs_set_mute.mute_flag); + v->cvs_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_mute_cmd); + if (ret < 0) { + pr_aud_err("Fail: send STREAM SET MUTE\n"); + goto fail; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) + pr_aud_err("%s: wait_event timeout\n", __func__); + +fail: + return 0; +} + +static int voice_send_vol_index_to_modem(struct voice_data *v) +{ + struct cvp_set_rx_volume_index_cmd cvp_vol_cmd; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* send volume index to cvp */ + cvp_vol_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_vol_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_vol_cmd) - APR_HDR_SIZE); + cvp_vol_cmd.hdr.src_port = 0; + cvp_vol_cmd.hdr.dest_port = cvp_handle; + cvp_vol_cmd.hdr.token = 0; + cvp_vol_cmd.hdr.opcode = + VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX; + cvp_vol_cmd.cvp_set_vol_idx.vol_index = v->dev_rx.volume; + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_vol_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending RX VOL INDEX\n"); + return -EINVAL; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + return 0; +} + +static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct cvs_start_record_cmd cvs_start_record; + + pr_debug("%s: Start record %d\n", __func__, rec_mode); + + cvs_start_record.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_start_record.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_start_record) - APR_HDR_SIZE); + cvs_start_record.hdr.src_port = 0; + cvs_start_record.hdr.dest_port = cvs_handle; + cvs_start_record.hdr.token = 0; + cvs_start_record.hdr.opcode = VSS_ISTREAM_CMD_START_RECORD; + + if (rec_mode == VOC_REC_UPLINK) { + cvs_start_record.rec_mode.rx_tap_point = VSS_TAP_POINT_NONE; + cvs_start_record.rec_mode.tx_tap_point = + VSS_TAP_POINT_STREAM_END; + } else if (rec_mode == VOC_REC_DOWNLINK) { + cvs_start_record.rec_mode.rx_tap_point = + VSS_TAP_POINT_STREAM_END; + cvs_start_record.rec_mode.tx_tap_point = VSS_TAP_POINT_NONE; + } else if (rec_mode == VOC_REC_BOTH) { + cvs_start_record.rec_mode.rx_tap_point = + VSS_TAP_POINT_STREAM_END; + cvs_start_record.rec_mode.tx_tap_point = + VSS_TAP_POINT_STREAM_END; + } else { + pr_aud_err("%s: Invalid in-call rec_mode %d\n", __func__, rec_mode); + + ret = -EINVAL; + goto fail; + } + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_record); + if (ret < 0) { + pr_aud_err("%s: Error %d sending START_RECORD\n", __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + return 0; + +fail: + return ret; +} + +static int voice_cvs_stop_record(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct apr_hdr cvs_stop_record; + + pr_debug("%s: Stop record\n", __func__); + + cvs_stop_record.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_stop_record.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_stop_record) - APR_HDR_SIZE); + cvs_stop_record.src_port = 0; + cvs_stop_record.dest_port = cvs_handle; + cvs_stop_record.token = 0; + cvs_stop_record.opcode = VSS_ISTREAM_CMD_STOP_RECORD; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_record); + if (ret < 0) { + pr_aud_err("%s: Error %d sending STOP_RECORD\n", __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + return 0; + +fail: + return ret; +} + +int voice_start_record(uint32_t rec_mode, uint32_t set) +{ + int ret = 0; + u16 cvs_handle; + + pr_debug("%s: rec_mode %d, set %d\n", __func__, rec_mode, set); + + mutex_lock(&voice.lock); + + cvs_handle = voice_get_cvs_handle(&voice); + + if (cvs_handle != 0) { + if (set) + ret = voice_cvs_start_record(&voice, rec_mode); + else + ret = voice_cvs_stop_record(&voice); + } else { + /* Cache the value for later. */ + voice.rec_info.pending = set; + voice.rec_info.rec_mode = rec_mode; + } + + mutex_unlock(&voice.lock); + + return ret; +} + +static int voice_cvs_start_playback(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct apr_hdr cvs_start_playback; + + pr_debug("%s: Start playback\n", __func__); + + cvs_start_playback.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_start_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_start_playback) - APR_HDR_SIZE); + cvs_start_playback.src_port = 0; + cvs_start_playback.dest_port = cvs_handle; + cvs_start_playback.token = 0; + cvs_start_playback.opcode = VSS_ISTREAM_CMD_START_PLAYBACK; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_playback); + if (ret < 0) { + pr_aud_err("%s: Error %d sending START_PLAYBACK\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + v->music_info.playing = 1; + + return 0; + +fail: + return ret; +} + +static int voice_cvs_stop_playback(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct apr_hdr cvs_stop_playback; + + pr_debug("%s: Stop playback\n", __func__); + + if (v->music_info.playing) { + cvs_stop_playback.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_stop_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_stop_playback) - APR_HDR_SIZE); + cvs_stop_playback.src_port = 0; + cvs_stop_playback.dest_port = cvs_handle; + cvs_stop_playback.token = 0; + + cvs_stop_playback.opcode = VSS_ISTREAM_CMD_STOP_PLAYBACK; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_playback); + if (ret < 0) { + pr_aud_err("%s: Error %d sending STOP_PLAYBACK\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + v->music_info.playing = 0; + } else { + pr_aud_err("%s: Stop playback already sent\n", __func__); + } + + return 0; + +fail: + return ret; +} + +int voice_start_playback(uint32_t set) +{ + int ret = 0; + u16 cvs_handle; + + pr_debug("%s: Start playback %d\n", __func__, set); + + mutex_lock(&voice.lock); + + cvs_handle = voice_get_cvs_handle(&voice); + + if (cvs_handle != 0) { + if (set) + ret = voice_cvs_start_playback(&voice); + else + ret = voice_cvs_stop_playback(&voice); + } else { + /* Cache the value for later. */ + pr_debug("%s: Caching ICP value", __func__); + + voice.music_info.pending = set; + } + + mutex_unlock(&voice.lock); + + return ret; +} + +static void voice_auddev_cb_function(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data) +{ + struct voice_data *v = &voice; + struct sidetone_cal sidetone_cal_data; + int rc = 0; + pr_aud_info("auddev_cb_function, evt_id=%d,\n", evt_id); + if ((evt_id != AUDDEV_EVT_START_VOICE) || + (evt_id != AUDDEV_EVT_END_VOICE)) { + if (evt_payload == NULL) { + pr_aud_err(" evt_payload is NULL pointer\n"); + return; + } + } + + switch (evt_id) { + case AUDDEV_EVT_START_VOICE: + mutex_lock(&v->lock); + + if ((v->voc_state == VOC_INIT) || + (v->voc_state == VOC_RELEASE)) { + v->v_call_status = VOICE_CALL_START; + if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) + && (v->dev_tx.enabled == VOICE_DEV_ENABLED)) { + rc = voice_apr_register(v); + if (rc < 0) { + mutex_unlock(&v->lock); + pr_aud_err("%s: voice apr registration" + "failed\n", __func__); + return; + } + voice_create_mvm_cvs_session(v); + voice_setup_modem_voice(v); + voice_attach_vocproc(v); + voice_send_start_voice_cmd(v); + get_sidetone_cal(&sidetone_cal_data); + msm_snddev_enable_sidetone( + v->dev_rx.dev_id, + sidetone_cal_data.enable, + sidetone_cal_data.gain); + v->voc_state = VOC_RUN; + + /* Start in-call recording if command was + * pending. */ + if (v->rec_info.pending) { + voice_cvs_start_record(v, + v->rec_info.rec_mode); + + v->rec_info.pending = 0; + } + + /* Start in-call music delivery if command was + * pending. */ + if (v->music_info.pending) { + voice_cvs_start_playback(v); + + v->music_info.pending = 0; + } + } + } + + mutex_unlock(&v->lock); + break; + case AUDDEV_EVT_DEV_CHG_VOICE: + if (v->dev_rx.enabled == VOICE_DEV_ENABLED) + msm_snddev_enable_sidetone(v->dev_rx.dev_id, 0, 0); + v->dev_rx.enabled = VOICE_DEV_DISABLED; + v->dev_tx.enabled = VOICE_DEV_DISABLED; + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) { + /* send cmd to modem to do voice device change */ + voice_disable_vocproc(v); + v->voc_state = VOC_CHANGE; + } + + mutex_unlock(&v->lock); + break; + case AUDDEV_EVT_DEV_RDY: + mutex_lock(&v->lock); + + if (v->voc_state == VOC_CHANGE) { + /* get port Ids */ + if (evt_payload->voc_devinfo.dev_type == DIR_RX) { + v->dev_rx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_rx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_rx.dev_id = + evt_payload->voc_devinfo.dev_id; + v->dev_rx.enabled = VOICE_DEV_ENABLED; + } else { + v->dev_tx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_tx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_tx.enabled = VOICE_DEV_ENABLED; + v->dev_tx.dev_id = + evt_payload->voc_devinfo.dev_id; + } + if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) && + (v->dev_tx.enabled == VOICE_DEV_ENABLED)) { + voice_set_device(v); + get_sidetone_cal(&sidetone_cal_data); + msm_snddev_enable_sidetone( + v->dev_rx.dev_id, + sidetone_cal_data.enable, + sidetone_cal_data.gain); + v->voc_state = VOC_RUN; + } + } else if ((v->voc_state == VOC_INIT) || + (v->voc_state == VOC_RELEASE)) { + /* get AFE ports */ + if (evt_payload->voc_devinfo.dev_type == DIR_RX) { + /* get rx port id */ + v->dev_rx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_rx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_rx.dev_id = + evt_payload->voc_devinfo.dev_id; + v->dev_rx.enabled = VOICE_DEV_ENABLED; + } else { + /* get tx port id */ + v->dev_tx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_tx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_tx.dev_id = + evt_payload->voc_devinfo.dev_id; + v->dev_tx.enabled = VOICE_DEV_ENABLED; + } + if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) && + (v->dev_tx.enabled == VOICE_DEV_ENABLED) && + (v->v_call_status == VOICE_CALL_START)) { + rc = voice_apr_register(v); + if (rc < 0) { + mutex_unlock(&v->lock); + pr_aud_err("%s: voice apr registration" + "failed\n", __func__); + return; + } + voice_create_mvm_cvs_session(v); + voice_setup_modem_voice(v); + voice_attach_vocproc(v); + voice_send_start_voice_cmd(v); + get_sidetone_cal(&sidetone_cal_data); + msm_snddev_enable_sidetone( + v->dev_rx.dev_id, + sidetone_cal_data.enable, + sidetone_cal_data.gain); + v->voc_state = VOC_RUN; + + /* Start in-call recording if command was + * pending. */ + if (v->rec_info.pending) { + voice_cvs_start_record(v, + v->rec_info.rec_mode); + + v->rec_info.pending = 0; + } + + /* Start in-call music delivery if command was + * pending. */ + if (v->music_info.pending) { + voice_cvs_start_playback(v); + + v->music_info.pending = 0; + } + } + } + + mutex_unlock(&v->lock); + break; + case AUDDEV_EVT_DEVICE_VOL_MUTE_CHG: + /* cache the mute and volume index value */ + if (evt_payload->voc_devinfo.dev_type == DIR_TX) { + v->dev_tx.mute = + evt_payload->voc_vm_info.dev_vm_val.mute; + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) + voice_send_mute_cmd_to_modem(v); + + mutex_unlock(&v->lock); + } else { + v->dev_rx.volume = evt_payload-> + voc_vm_info.dev_vm_val.vol; + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) + voice_send_vol_index_to_modem(v); + + mutex_unlock(&v->lock); + } + break; + case AUDDEV_EVT_REL_PENDING: + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) { + voice_disable_vocproc(v); + v->voc_state = VOC_CHANGE; + } + + mutex_unlock(&v->lock); + + if (evt_payload->voc_devinfo.dev_type == DIR_RX) + v->dev_rx.enabled = VOICE_DEV_DISABLED; + else + v->dev_tx.enabled = VOICE_DEV_DISABLED; + + break; + case AUDDEV_EVT_END_VOICE: + /* recover the tx mute and rx volume to the default values */ + v->dev_tx.mute = v->default_mute_val; + v->dev_rx.volume = v->default_vol_val; + if (v->dev_rx.enabled == VOICE_DEV_ENABLED) + msm_snddev_enable_sidetone(v->dev_rx.dev_id, 0, 0); + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) { + /* call stop modem voice */ + voice_send_stop_voice_cmd(v); + voice_destroy_modem_voice(v); + voice_destroy_mvm_cvs_session(v); + v->voc_state = VOC_RELEASE; + } else if (v->voc_state == VOC_CHANGE) { + voice_send_stop_voice_cmd(v); + voice_destroy_mvm_cvs_session(v); + v->voc_state = VOC_RELEASE; + } + + mutex_unlock(&v->lock); + + v->v_call_status = VOICE_CALL_END; + + break; + default: + pr_aud_err("UNKNOWN EVENT\n"); + } + return; +} +EXPORT_SYMBOL(voice_auddev_cb_function); + +int voice_set_voc_path_full(uint32_t set) +{ + int rc = 0; + + pr_aud_info("%s: %d\n", __func__, set); + + mutex_lock(&voice.lock); + + if (voice.voc_state == VOC_INIT || voice.voc_state == VOC_RELEASE) { + if (set) + voice.voc_path = VOC_PATH_FULL; + else + voice.voc_path = VOC_PATH_PASSIVE; + } else { + pr_aud_err("%s: Invalid voc path set to %d, in state %d\n", + __func__, set, voice.voc_state); + + rc = -EPERM; + } + + mutex_unlock(&voice.lock); + + return rc; +} +EXPORT_SYMBOL(voice_set_voc_path_full); + +void voice_register_mvs_cb(ul_cb_fn ul_cb, + dl_cb_fn dl_cb, + void *private_data) +{ + voice.mvs_info.ul_cb = ul_cb; + voice.mvs_info.dl_cb = dl_cb; + voice.mvs_info.private_data = private_data; +} + +void voice_config_vocoder(uint32_t media_type, + uint32_t rate, + uint32_t network_type, + uint32_t dtx_mode) +{ + voice.mvs_info.media_type = media_type; + voice.mvs_info.rate = rate; + voice.mvs_info.network_type = network_type; + voice.mvs_info.dtx_mode = dtx_mode; +} + +static int32_t modem_mvm_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *ptr; + struct voice_data *v = priv; + + pr_debug("%s\n", __func__); + pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, + data->payload_size, data->opcode); + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s:Reset event received in Voice service MVM\n", + __func__); + apr_reset(v->apr_mvm); + apr_reset(v->apr_q6_mvm); + v->apr_q6_mvm = NULL; + v->apr_mvm = NULL; + v->mvm_handle = 0; + v->mvm_q6_handle = 0; + return 0; + } + + if (data->opcode == APR_BASIC_RSP_RESULT) { + if (data->payload_size) { + ptr = data->payload; + + pr_debug("%x %x\n", ptr[0], ptr[1]); + /* ping mvm service ACK */ + + if (ptr[0] == + VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION || + ptr[0] == + VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION) { + /* Passive session is used for voice call + * through modem. Full session is used for voice + * call through Q6. */ + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + if (!ptr[1]) { + pr_debug("%s: MVM handle is %d\n", + __func__, data->src_port); + + voice_set_mvm_handle(v, data->src_port); + } else + pr_aud_info("got NACK for sending \ + MVM create session \n"); + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_START_VOICE) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_ATTACH_VOCPROC) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_STOP_VOICE) { + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_DETACH_VOCPROC) { + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_SET_TTY_MODE) { + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == APRV2_IBASIC_CMD_DESTROY_SESSION) { + pr_aud_info("%s: DESTROY resp\n", __func__); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_ATTACH_STREAM) { + pr_aud_info("%s: ATTACH_STREAM resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_DETACH_STREAM) { + pr_debug("%s: DETACH_STREAM resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ICOMMON_CMD_SET_NETWORK) { + pr_debug("%s: SET_NETWORK resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ICOMMON_CMD_SET_VOICE_TIMING) { + pr_debug("%s: SET_VOICE_TIMING resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else + pr_debug("%s: not match cmd = 0x%x\n", + __func__, ptr[0]); + } + } + + return 0; +} + +static int32_t modem_cvs_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *ptr; + struct voice_data *v = priv; + + pr_debug("%s\n", __func__); + pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, + data->payload_size, data->opcode); + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s:Reset event received in Voice service CVS\n", + __func__); + apr_reset(v->apr_cvs); + apr_reset(v->apr_q6_cvs); + v->apr_q6_cvs = NULL; + v->apr_cvs = NULL; + v->cvs_handle = 0; + v->cvs_q6_handle = 0; + return 0; + } + + if (data->opcode == APR_BASIC_RSP_RESULT) { + if (data->payload_size) { + ptr = data->payload; + + pr_aud_info("%x %x\n", ptr[0], ptr[1]); + /*response from modem CVS */ + if (ptr[0] == + VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION || + ptr[0] == + VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION) { + if (!ptr[1]) { + pr_debug("%s: CVS handle is %d\n", + __func__, data->src_port); + voice_set_cvs_handle(v, data->src_port); + } else + pr_aud_info("got NACK for sending \ + CVS create session \n"); + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_CACHE_CALIBRATION_DATA) { + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_SET_MUTE) { + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_SET_MEDIA_TYPE) { + pr_debug("%s: SET_MEDIA resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE) { + pr_debug("%s: SET_AMR_RATE resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE) { + pr_debug("%s: SET_AMR_WB_RATE resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_SET_ENC_DTX_MODE) { + pr_debug("%s: SET_DTX resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE) { + pr_debug("%s: SET_CDMA_RATE resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == APRV2_IBASIC_CMD_DESTROY_SESSION) { + pr_debug("%s: DESTROY resp\n", __func__); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_START_RECORD) { + pr_debug("%s: START_RECORD resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_STOP_RECORD) { + pr_debug("%s: STOP_RECORD resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); +#ifdef CONFIG_MSM8X60_RTAC + } else if (ptr[0] == VOICE_CMD_SET_PARAM) { + rtac_make_voice_callback(RTAC_CVS, ptr, + data->payload_size); +#endif + } else if (ptr[0] == VSS_ISTREAM_CMD_START_PLAYBACK) { + pr_debug("%s: START_PLAYBACK resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_STOP_PLAYBACK) { + pr_debug("%s: STOP_PLAYBACK resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + } + } else if (data->opcode == VSS_ISTREAM_EVT_SEND_ENC_BUFFER) { + uint32_t *voc_pkt = data->payload; + uint32_t pkt_len = data->payload_size; + + if (voc_pkt != NULL && v->mvs_info.ul_cb != NULL) { + pr_debug("%s: Media type is 0x%x\n", + __func__, voc_pkt[0]); + + /* Remove media ID from payload. */ + voc_pkt++; + pkt_len = pkt_len - 4; + + v->mvs_info.ul_cb((uint8_t *)voc_pkt, + pkt_len, + v->mvs_info.private_data); + } else { + pr_aud_err("%s: voc_pkt is 0x%x ul_cb is 0x%x\n", + __func__, (unsigned int)voc_pkt, + (unsigned int) v->mvs_info.ul_cb); + } + } else if (data->opcode == VSS_ISTREAM_EVT_SEND_DEC_BUFFER) { + pr_debug("%s: Send dec buf resp\n", __func__); + } else if (data->opcode == VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER) { + struct cvs_send_dec_buf_cmd send_dec_buf; + int ret = 0; + uint32_t pkt_len = 0; + + if (v->mvs_info.dl_cb != NULL) { + send_dec_buf.dec_buf.media_id = v->mvs_info.media_type; + + v->mvs_info.dl_cb( + (uint8_t *)&send_dec_buf.dec_buf.packet_data, + &pkt_len, + v->mvs_info.private_data); + + send_dec_buf.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + send_dec_buf.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(send_dec_buf.dec_buf.media_id) + pkt_len); + send_dec_buf.hdr.src_port = 0; + send_dec_buf.hdr.dest_port = voice_get_cvs_handle(v); + send_dec_buf.hdr.token = 0; + send_dec_buf.hdr.opcode = + VSS_ISTREAM_EVT_SEND_DEC_BUFFER; + + ret = apr_send_pkt(voice_get_apr_cvs(v), + (uint32_t *) &send_dec_buf); + if (ret < 0) { + pr_aud_err("%s: Error %d sending DEC_BUF\n", + __func__, ret); + goto fail; + } + } else { + pr_aud_err("%s: ul_cb is NULL\n", __func__); + } +#ifdef CONFIG_MSM8X60_RTAC + } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) { + rtac_make_voice_callback(RTAC_CVS, data->payload, + data->payload_size); +#endif + + } else { + pr_debug("%s: Unknown opcode 0x%x\n", __func__, data->opcode); + } + +fail: + return 0; +} + +static int32_t modem_cvp_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *ptr; + struct voice_data *v = priv; + + pr_debug("%s\n", __func__); + pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, + data->payload_size, data->opcode); + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s:Reset event received in Voice service CVP\n", + __func__); + apr_reset(v->apr_cvp); + apr_reset(v->apr_q6_cvp); + v->apr_q6_cvp = NULL; + v->apr_cvp = NULL; + v->cvp_handle = 0; + v->cvp_q6_handle = 0; + return 0; + } + + if (data->opcode == APR_BASIC_RSP_RESULT) { + if (data->payload_size) { + ptr = data->payload; + + pr_aud_info("%x %x\n", ptr[0], ptr[1]); + /*response from modem CVP */ + if (ptr[0] == + VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + if (!ptr[1]) { + voice_set_cvp_handle(v, data->src_port); + pr_debug("cvphdl=%d\n", data->src_port); + } else + pr_aud_info("got NACK from CVP create \ + session response\n"); + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == + VSS_IVOCPROC_CMD_CACHE_CALIBRATION_DATA) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == VSS_IVOCPROC_CMD_SET_DEVICE) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == + VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == VSS_IVOCPROC_CMD_ENABLE) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == VSS_IVOCPROC_CMD_DISABLE) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == APRV2_IBASIC_CMD_DESTROY_SESSION) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == + VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE + ) { + + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); +#ifdef CONFIG_MSM8X60_RTAC + } else if (ptr[0] == VOICE_CMD_SET_PARAM) { + rtac_make_voice_callback(RTAC_CVP, ptr, + data->payload_size); +#endif + } else + pr_debug("%s: not match cmd = 0x%x\n", + __func__, ptr[0]); + } +#ifdef CONFIG_MSM8X60_RTAC + } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) { + rtac_make_voice_callback(RTAC_CVP, data->payload, + data->payload_size); +#endif + } + return 0; +} + + +static int __init voice_init(void) +{ + int rc = 0; + struct voice_data *v = &voice; + pr_aud_info("%s\n", __func__); /* Macro prints the file name and function */ + /* set default value */ + v->default_mute_val = 1; /* default is mute */ + v->default_vol_val = 0; + v->default_sample_val = 8000; + + /* initialize dev_rx and dev_tx */ + memset(&v->dev_tx, 0, sizeof(struct device_data)); + memset(&v->dev_rx, 0, sizeof(struct device_data)); + v->dev_rx.volume = v->default_vol_val; + v->dev_tx.mute = v->default_mute_val; + + v->voc_state = VOC_INIT; + v->voc_path = VOC_PATH_PASSIVE; + v->adsp_version = 0; + init_waitqueue_head(&v->mvm_wait); + init_waitqueue_head(&v->cvs_wait); + init_waitqueue_head(&v->cvp_wait); + + mutex_init(&v->lock); + + v->mvm_handle = 0; + v->cvs_handle = 0; + v->cvp_handle = 0; + + v->mvm_q6_handle = 0; + v->cvs_q6_handle = 0; + v->cvp_q6_handle = 0; + + v->apr_mvm = NULL; + v->apr_cvs = NULL; + v->apr_cvp = NULL; + + v->apr_q6_mvm = NULL; + v->apr_q6_cvs = NULL; + v->apr_q6_cvp = NULL; + + /* Initialize MVS info. */ + memset(&v->mvs_info, 0, sizeof(v->mvs_info)); + v->mvs_info.network_type = VSS_NETWORK_ID_DEFAULT; + + v->rec_info.pending = 0; + v->rec_info.rec_mode = VOC_REC_NONE; + + memset(&v->music_info, 0, sizeof(v->music_info)); + + v->device_events = AUDDEV_EVT_DEV_CHG_VOICE | + AUDDEV_EVT_DEV_RDY | + AUDDEV_EVT_REL_PENDING | + AUDDEV_EVT_START_VOICE | + AUDDEV_EVT_END_VOICE | + AUDDEV_EVT_DEVICE_VOL_MUTE_CHG | + AUDDEV_EVT_FREQ_CHG; + + pr_debug("to register call back\n"); + /* register callback to auddev */ + auddev_register_evt_listner(v->device_events, AUDDEV_CLNT_VOC, + 0, voice_auddev_cb_function, v); + + return rc; +} + +device_initcall(voice_init); diff --git a/arch/arm/mach-msm/qdsp6v3/qcelp_in.c b/arch/arm/mach-msm/qdsp6v3/qcelp_in.c new file mode 100644 index 00000000..f1986cef --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/qcelp_in.c @@ -0,0 +1,334 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((35+sizeof(struct meta_out_dsp)) * 10)) + +void q6asm_qcelp_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode - %d\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d:ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} +/* ------------------- device --------------------- */ +static long qcelp_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_qcelp_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + /* reduced_rate_level, rate_modulation_cmd set to zero + currently not configurable from user space */ + rc = q6asm_enc_cfg_blk_qcelp(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate, 0, 0); + + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd qcelp media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__, + audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, audio->ac->session, + rc); + break; + } + break; + } + case AUDIO_GET_QCELP_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_qcelp_enc_config))) + rc = -EFAULT; + break; + } + case AUDIO_SET_QCELP_ENC_CONFIG: { + struct msm_audio_qcelp_enc_config cfg; + struct msm_audio_qcelp_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(struct msm_audio_qcelp_enc_config))) { + rc = -EFAULT; + break; + } + + if (cfg.min_bit_rate > 4 || + cfg.min_bit_rate < 1) { + pr_aud_err("%s:session id %d: invalid min bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + if (cfg.max_bit_rate > 4 || + cfg.max_bit_rate < 1) { + pr_aud_err("%s:session id %d: invalid max bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->min_bit_rate = cfg.min_bit_rate; + enc_cfg->max_bit_rate = cfg.max_bit_rate; + pr_debug("%s:session id %d: min_bit_rate= 0x%x\ + max_bit_rate=0x%x\n", __func__, + audio->ac->session, enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate); + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int qcelp_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_qcelp_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for qcelp\ + driver\n", __func__, audio->ac->session); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_qcelp_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 35; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->min_bit_rate = 4; + enc_cfg->max_bit_rate = 4; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_qcelp_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s:session id %d: Could not allocate memory for audio\ + client\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open qcelp encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_V13K, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: NT mode encoder success\n", __func__, + audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_V13K); + if (rc < 0) { + pr_aud_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = qcelp_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = qcelp_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_qcelp_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_qcelp_in", + .fops = &audio_in_fops, +}; + +static int __init qcelp_in_init(void) +{ + return misc_register(&audio_qcelp_in_misc); +} + +device_initcall(qcelp_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/rtac.h b/arch/arm/mach-msm/qdsp6v3/rtac.h new file mode 100644 index 00000000..fd7f85f0 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/rtac.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __RTAC_H__ +#define __RTAC_H__ + +#ifdef CONFIG_MSM8X60_RTAC + +#include +#include + +/* Voice Modes */ +#define RTAC_CVP 0 +#define RTAC_CVS 1 +#define RTAC_VOICE_MODES 2 + +void update_rtac(u32 evt_id, u32 dev_id, struct msm_snddev_info *dev_info); +void rtac_add_adm_device(u32 port_id, u32 popp_id); +void rtac_remove_adm_device(u32 port_id); +void rtac_add_voice(struct voice_data *v); +void rtac_remove_voice(struct voice_data *v); +void rtac_set_adm_handle(void *handle); +bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size); +void rtac_copy_adm_payload_to_user(void *payload, u32 payload_size); +void rtac_set_asm_handle(u32 session_id, void *handle); +bool rtac_make_asm_callback(u32 session_id, uint32_t *payload, + u32 payload_size); +void rtac_copy_asm_payload_to_user(void *payload, u32 payload_size); +void rtac_set_voice_handle(u32 mode, void *handle); +bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size); +void rtac_copy_voice_payload_to_user(void *payload, u32 payload_size); + +#endif + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c b/arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c new file mode 100644 index 00000000..fc944ef7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c @@ -0,0 +1,390 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ECODEC_SAMPLE_RATE 8000 +static struct q6v2audio_ecodec_ops default_audio_ops; +static struct q6v2audio_ecodec_ops *audio_ops = &default_audio_ops; + +/* Context for each external codec device */ +struct snddev_ecodec_state { + struct snddev_ecodec_data *data; + u32 sample_rate; +}; + +/* Global state for the driver */ +struct snddev_ecodec_drv_state { + struct mutex dev_lock; + int ref_cnt; /* ensure one rx device at a time */ + struct clk *ecodec_clk; +}; + +static struct snddev_ecodec_drv_state snddev_ecodec_drv; + +struct aux_pcm_state { + unsigned int dout; + unsigned int din; + unsigned int syncout; + unsigned int clkin_a; +}; + +static struct aux_pcm_state the_aux_pcm_state; + +static int aux_pcm_gpios_request(void) +{ + int rc = 0; + uint32_t bt_config_gpio[] = { + GPIO_CFG(the_aux_pcm_state.dout, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.din, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.syncout, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.clkin_a, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + }; + pr_debug("%s\n", __func__); + gpio_tlmm_config(bt_config_gpio[0], GPIO_CFG_ENABLE); + gpio_tlmm_config(bt_config_gpio[1], GPIO_CFG_ENABLE); + gpio_tlmm_config(bt_config_gpio[2], GPIO_CFG_ENABLE); + gpio_tlmm_config(bt_config_gpio[3], GPIO_CFG_ENABLE); + + return rc; +} + +static void aux_pcm_gpios_free(void) +{ + uint32_t bt_config_gpio[] = { + GPIO_CFG(the_aux_pcm_state.dout, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.din, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.syncout, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.clkin_a, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + }; + pr_debug("%s\n", __func__); + gpio_tlmm_config(bt_config_gpio[0], GPIO_CFG_DISABLE); + gpio_tlmm_config(bt_config_gpio[1], GPIO_CFG_DISABLE); + gpio_tlmm_config(bt_config_gpio[2], GPIO_CFG_DISABLE); + gpio_tlmm_config(bt_config_gpio[3], GPIO_CFG_DISABLE); +} + +static int get_aux_pcm_gpios(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res; + + /* Claim all of the GPIOs. */ + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_dout"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM DOUT\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.dout = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_din"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM DIN\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.din = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "aux_pcm_syncout"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM SYNC OUT\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.syncout = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "aux_pcm_clkin_a"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM CLKIN A\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.clkin_a = res->start; + + pr_aud_info("%s: dout = %u, din = %u , syncout = %u, clkin_a =%u\n", + __func__, the_aux_pcm_state.dout, the_aux_pcm_state.din, + the_aux_pcm_state.syncout, the_aux_pcm_state.clkin_a); + + return rc; +} + +static int aux_pcm_probe(struct platform_device *pdev) +{ + int rc = 0; + + pr_aud_info("%s:\n", __func__); + + rc = get_aux_pcm_gpios(pdev); + if (rc < 0) { + pr_aud_err("%s: GPIO configuration failed\n", __func__); + return -ENODEV; + } + return rc; +} + +static struct platform_driver aux_pcm_driver = { + .probe = aux_pcm_probe, + .driver = { .name = "msm_aux_pcm"} +}; + +static int snddev_ecodec_open(struct msm_snddev_info *dev_info) +{ + int rc; + struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; + union afe_port_config afe_config; + + pr_debug("%s\n", __func__); + + mutex_lock(&drv->dev_lock); + + if (dev_info->opened) { + pr_aud_err("%s: ERROR: %s already opened\n", __func__, + dev_info->name); + mutex_unlock(&drv->dev_lock); + return -EBUSY; + } + + if (drv->ref_cnt != 0) { + pr_debug("%s: opened %s\n", __func__, dev_info->name); + drv->ref_cnt++; + mutex_unlock(&drv->dev_lock); + return 0; + } + + pr_aud_info("%s: opening %s\n", __func__, dev_info->name); + + rc = aux_pcm_gpios_request(); + if (rc < 0) { + pr_aud_err("%s: GPIO request failed\n", __func__); + return rc; + } + + clk_reset(drv->ecodec_clk, CLK_RESET_ASSERT); + + afe_config.pcm.mode = AFE_PCM_CFG_MODE_PCM; + afe_config.pcm.sync = AFE_PCM_CFG_SYNC_INT; + afe_config.pcm.frame = AFE_PCM_CFG_FRM_256BPF; + afe_config.pcm.quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD; + afe_config.pcm.slot = 0; + afe_config.pcm.data = AFE_PCM_CFG_CDATAOE_MASTER; + + rc = afe_open(PCM_RX, &afe_config, ECODEC_SAMPLE_RATE); + if (rc < 0) { + pr_aud_err("%s: afe open failed for PCM_RX\n", __func__); + goto err_rx_afe; + } + + rc = afe_open(PCM_TX, &afe_config, ECODEC_SAMPLE_RATE); + if (rc < 0) { + pr_aud_err("%s: afe open failed for PCM_TX\n", __func__); + goto err_tx_afe; + } + + rc = clk_set_rate(drv->ecodec_clk, 2048000); + if (rc < 0) { + pr_aud_err("%s: clk_set_rate failed\n", __func__); + goto err_clk; + } + + clk_enable(drv->ecodec_clk); + + clk_reset(drv->ecodec_clk, CLK_RESET_DEASSERT); + + drv->ref_cnt++; + mutex_unlock(&drv->dev_lock); + + return 0; + +err_clk: + afe_close(PCM_TX); +err_tx_afe: + afe_close(PCM_RX); +err_rx_afe: + aux_pcm_gpios_free(); + mutex_unlock(&drv->dev_lock); + return -ENODEV; +} + +int snddev_ecodec_close(struct msm_snddev_info *dev_info) +{ + struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; + + pr_debug("%s: closing %s\n", __func__, dev_info->name); + + mutex_lock(&drv->dev_lock); + + if (!dev_info->opened) { + pr_aud_err("%s: ERROR: %s is not opened\n", __func__, + dev_info->name); + mutex_unlock(&drv->dev_lock); + return -EPERM; + } + + drv->ref_cnt--; + + if (drv->ref_cnt == 0) { + + pr_aud_info("%s: closing all devices\n", __func__); + + clk_disable(drv->ecodec_clk); + aux_pcm_gpios_free(); + + afe_close(PCM_RX); + afe_close(PCM_TX); + } + + mutex_unlock(&drv->dev_lock); + + return 0; +} + +int snddev_ecodec_set_freq(struct msm_snddev_info *dev_info, u32 rate) +{ + int rc = 0; + + if (!dev_info) { + rc = -EINVAL; + goto error; + } + return ECODEC_SAMPLE_RATE; + +error: + return rc; +} + +void htc_8x60_register_ecodec_ops(struct q6v2audio_ecodec_ops *ops) +{ + audio_ops = ops; +} + +static int snddev_ecodec_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_ecodec_data *pdata; + struct msm_snddev_info *dev_info; + struct snddev_ecodec_state *ecodec; + + pr_aud_info("%s:\n", __func__); + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + rc = -1; + goto error; + } + pdata = pdev->dev.platform_data; + + ecodec = kzalloc(sizeof(struct snddev_ecodec_state), GFP_KERNEL); + if (!ecodec) { + rc = -ENOMEM; + goto error; + } + + dev_info = kzalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + kfree(ecodec); + rc = -ENOMEM; + goto error; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->private_data = (void *)ecodec; + dev_info->dev_ops.open = snddev_ecodec_open; + dev_info->dev_ops.close = snddev_ecodec_close; + dev_info->dev_ops.set_freq = snddev_ecodec_set_freq; + dev_info->dev_ops.enable_sidetone = NULL; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + + msm_snddev_register(dev_info); + + ecodec->data = pdata; + ecodec->sample_rate = ECODEC_SAMPLE_RATE; /* Default to 8KHz */ +error: + return rc; +} + +struct platform_driver snddev_ecodec_driver = { + .probe = snddev_ecodec_probe, + .driver = {.name = "msm_snddev_ecodec"} +}; + +int __init snddev_ecodec_init(void) +{ + int rc = 0; + struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; + + pr_aud_info("%s:\n", __func__); + + mutex_init(&drv->dev_lock); + drv->ref_cnt = 0; + + drv->ecodec_clk = clk_get(NULL, "pcm_clk"); + if (IS_ERR(drv->ecodec_clk)) { + pr_aud_err("%s: could not get pcm_clk\n", __func__); + return PTR_ERR(drv->ecodec_clk); + } + + rc = platform_driver_register(&aux_pcm_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for aux pcm failed\n", + __func__); + goto error_aux_pcm_platform_driver; + } + + rc = platform_driver_register(&snddev_ecodec_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for ecodec failed\n", + __func__); + goto error_ecodec_platform_driver; + } + pr_aud_info("%s: done\n", __func__); + + return 0; + +error_ecodec_platform_driver: + platform_driver_unregister(&aux_pcm_driver); +error_aux_pcm_platform_driver: + clk_put(drv->ecodec_clk); + + pr_aud_err("%s: encounter error\n", __func__); + return -ENODEV; +} + +device_initcall(snddev_ecodec_init); + +MODULE_DESCRIPTION("ECodec Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c b/arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c new file mode 100644 index 00000000..9e661c8b --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c @@ -0,0 +1,182 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_MUTEX(snddev_hdmi_lock); +static int snddev_hdmi_active; + +static int snddev_hdmi_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + union afe_port_config afe_config; + struct snddev_hdmi_data *snddev_hdmi_data; + + if (!dev_info) { + pr_aud_err("msm_snddev_info is null\n"); + return -EINVAL; + } + + snddev_hdmi_data = dev_info->private_data; + + mutex_lock(&snddev_hdmi_lock); + + if (snddev_hdmi_active) { + pr_aud_err("HDMI snddev already active\n"); + mutex_unlock(&snddev_hdmi_lock); + return -EBUSY; + } + afe_config.hdmi.channel_mode = snddev_hdmi_data->channel_mode; + afe_config.hdmi.bitwidth = 16; + afe_config.hdmi.data_type = 0; + rc = afe_open(snddev_hdmi_data->copp_id, &afe_config, + dev_info->sample_rate); + + if (rc < 0) { + pr_aud_err("afe_open failed\n"); + mutex_unlock(&snddev_hdmi_lock); + return -EINVAL; + } + snddev_hdmi_active = 1; + + pr_debug("%s open done\n", dev_info->name); + + mutex_unlock(&snddev_hdmi_lock); + + return 0; +} + +static int snddev_hdmi_close(struct msm_snddev_info *dev_info) +{ + if (!dev_info) { + pr_aud_err("msm_snddev_info is null\n"); + return -EINVAL; + } + + if (!dev_info->opened) { + pr_aud_err("calling close device with out opening the" + " device\n"); + return -EPERM; + } + mutex_lock(&snddev_hdmi_lock); + + if (!snddev_hdmi_active) { + pr_aud_err("HDMI snddev not active\n"); + mutex_unlock(&snddev_hdmi_lock); + return -EPERM; + } + snddev_hdmi_active = 0; + + afe_close(HDMI_RX); + + pr_debug("%s closed\n", dev_info->name); + mutex_unlock(&snddev_hdmi_lock); + + return 0; +} + +static int snddev_hdmi_set_freq(struct msm_snddev_info *dev_info, u32 req_freq) +{ + if (req_freq != 48000) { + pr_debug("Unsupported Frequency:%d\n", req_freq); + return -EINVAL; + } + return 48000; +} + +static int snddev_hdmi_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_hdmi_data *pdata; + struct msm_snddev_info *dev_info; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + return -ENODEV; + } + + pdata = pdev->dev.platform_data; + if (!(pdata->capability & SNDDEV_CAP_RX)) { + pr_aud_err("invalid device data either RX or TX\n"); + return -ENODEV; + } + + dev_info = kzalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + pr_aud_err("unable to allocate memeory for msm_snddev_info\n"); + return -ENOMEM; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->acdb_id = pdata->acdb_id; + dev_info->private_data = (void *)pdata; + dev_info->dev_ops.open = snddev_hdmi_open; + dev_info->dev_ops.close = snddev_hdmi_close; + dev_info->dev_ops.set_freq = snddev_hdmi_set_freq; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + msm_snddev_register(dev_info); + dev_info->sample_rate = pdata->default_sample_rate; + + pr_debug("probe done for %s\n", pdata->name); + return rc; +} + +static struct platform_driver snddev_hdmi_driver = { + .probe = snddev_hdmi_probe, + .driver = {.name = "snddev_hdmi"} +}; + +static int __init snddev_hdmi_init(void) +{ + s32 rc; + + rc = platform_driver_register(&snddev_hdmi_driver); + if (IS_ERR_VALUE(rc)) { + + pr_aud_err("platform_driver_register failed.\n"); + goto error_platform_driver; + } + + pr_debug("snddev_hdmi_init : done\n"); + + return 0; + +error_platform_driver: + + pr_aud_err("encounterd error\n"); + return -ENODEV; +} + +module_init(snddev_hdmi_init); + +MODULE_DESCRIPTION("HDMI Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_icodec.c b/arch/arm/mach-msm/qdsp6v3/snddev_icodec.c new file mode 100644 index 00000000..f23797cf --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_icodec.c @@ -0,0 +1,1189 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" + +#define SNDDEV_ICODEC_PCM_SZ 32 /* 16 bit / sample stereo mode */ +#define SNDDEV_ICODEC_MUL_FACTOR 3 /* Multi by 8 Shift by 3 */ +#define SNDDEV_ICODEC_CLK_RATE(freq) \ + (((freq) * (SNDDEV_ICODEC_PCM_SZ)) << (SNDDEV_ICODEC_MUL_FACTOR)) +#define SNDDEV_LOW_POWER_MODE 0 +#define SNDDEV_HIGH_POWER_MODE 1 +/* Voltage required for S4 in microVolts, 2.2V or 2200000microvolts */ +#define SNDDEV_VREG_8058_S4_VOLTAGE (2200000) +/* Load Current required for S4 in microAmps, + 36mA - 56mA */ +#define SNDDEV_VREG_LOW_POWER_LOAD (36000) +#define SNDDEV_VREG_HIGH_POWER_LOAD (56000) + +static struct q6v2audio_icodec_ops default_audio_ops; +static struct q6v2audio_icodec_ops *audio_ops = &default_audio_ops; +static int support_aic3254; +static int support_adie; +static int support_aic3254_use_mclk; +static int aic3254_use_mclk_counter; +int msm_codec_i2s_slave_mode = 1; +static struct q6v2audio_aic3254_ops default_aic3254_ops; +static struct q6v2audio_aic3254_ops *aic3254_ops = &default_aic3254_ops; + +/* Global state for the driver */ +struct snddev_icodec_drv_state { + struct mutex rx_lock; + struct mutex tx_lock; + u32 rx_active; /* ensure one rx device at a time */ + u32 tx_active; /* ensure one tx device at a time */ + struct clk *rx_osrclk; + struct clk *rx_bitclk; + struct clk *tx_osrclk; + struct clk *tx_bitclk; + + struct wake_lock rx_idlelock; + struct wake_lock tx_idlelock; + + /* handle to pmic8058 regulator smps4 */ + struct regulator *snddev_vreg; + + struct mutex rx_mclk_lock; +}; + +static struct snddev_icodec_drv_state snddev_icodec_drv; + +struct regulator *vreg_init(void) +{ + int rc; + struct regulator *vreg_ptr; + + vreg_ptr = regulator_get(NULL, "8058_s4"); + if (IS_ERR(vreg_ptr)) { + pr_aud_err("%s: regulator_get 8058_s4 failed\n", __func__); + return NULL; + } + + rc = regulator_set_voltage(vreg_ptr, SNDDEV_VREG_8058_S4_VOLTAGE, + SNDDEV_VREG_8058_S4_VOLTAGE); + if (rc == 0) + return vreg_ptr; + else + return NULL; +} + +static void vreg_deinit(struct regulator *vreg) +{ + regulator_put(vreg); +} + +static void vreg_mode_vote(struct regulator *vreg, int enable, int mode) +{ + int rc; + if (enable) { + rc = regulator_enable(vreg); + if (rc != 0) + pr_aud_err("%s:Enabling regulator failed\n", __func__); + else { + if (mode) + regulator_set_optimum_mode(vreg, + SNDDEV_VREG_HIGH_POWER_LOAD); + else + regulator_set_optimum_mode(vreg, + SNDDEV_VREG_LOW_POWER_LOAD); + } + } else { + rc = regulator_disable(vreg); + if (rc != 0) + pr_aud_err("%s:Disabling regulator failed\n", __func__); + } +} + +struct msm_cdcclk_ctl_state { + unsigned int rx_mclk; + unsigned int rx_mclk_requested; + unsigned int tx_mclk; + unsigned int tx_mclk_requested; +}; + +static struct msm_cdcclk_ctl_state the_msm_cdcclk_ctl_state; + +static int msm_snddev_rx_mclk_request(void) +{ + int rc = 0; +/* + rc = gpio_request(the_msm_cdcclk_ctl_state.rx_mclk, + "MSM_SNDDEV_RX_MCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MSM SNDDEV RX failed\n", __func__); + return rc; + } + the_msm_cdcclk_ctl_state.rx_mclk_requested = 1; +*/ + return rc; +} +static int msm_snddev_tx_mclk_request(void) +{ + int rc = 0; +/* + rc = gpio_request(the_msm_cdcclk_ctl_state.tx_mclk, + "MSM_SNDDEV_TX_MCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MSM SNDDEV TX failed\n", __func__); + return rc; + } + the_msm_cdcclk_ctl_state.tx_mclk_requested = 1; +*/ + return rc; +} +static void msm_snddev_rx_mclk_free(void) +{ + if (the_msm_cdcclk_ctl_state.rx_mclk_requested) { + gpio_free(the_msm_cdcclk_ctl_state.rx_mclk); + the_msm_cdcclk_ctl_state.rx_mclk_requested = 0; + } +} +static void msm_snddev_tx_mclk_free(void) +{ + if (the_msm_cdcclk_ctl_state.tx_mclk_requested) { + gpio_free(the_msm_cdcclk_ctl_state.tx_mclk); + the_msm_cdcclk_ctl_state.tx_mclk_requested = 0; + } +} + +static int get_msm_cdcclk_ctl_gpios(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res; + + /* Claim all of the GPIOs. */ + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "msm_snddev_rx_mclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MSM SNDDEV RX\n", __func__); + return -ENODEV; + } + the_msm_cdcclk_ctl_state.rx_mclk = res->start; + the_msm_cdcclk_ctl_state.rx_mclk_requested = 0; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "msm_snddev_tx_mclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MSM SNDDEV TX\n", __func__); + return -ENODEV; + } + the_msm_cdcclk_ctl_state.tx_mclk = res->start; + the_msm_cdcclk_ctl_state.tx_mclk_requested = 0; + + return rc; +} +static int msm_cdcclk_ctl_probe(struct platform_device *pdev) +{ + int rc = 0; + + pr_aud_info("%s:\n", __func__); + + rc = get_msm_cdcclk_ctl_gpios(pdev); + if (rc < 0) { + pr_aud_err("%s: GPIO configuration failed\n", __func__); + return -ENODEV; + } + return rc; +} +static struct platform_driver msm_cdcclk_ctl_driver = { + .probe = msm_cdcclk_ctl_probe, + .driver = { .name = "msm_cdcclk_ctl"} +}; + +static int snddev_icodec_rxclk_enable(struct snddev_icodec_state *icodec, + int en) +{ + int trc; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + mutex_lock(&drv->rx_mclk_lock); + if (en) { + if (aic3254_use_mclk_counter == 0) { + drv->rx_osrclk = clk_get(0, "i2s_spkr_osr_clk"); + if (IS_ERR(drv->rx_osrclk)) { + pr_aud_err("%s turning on RX MCLK Error\n", \ + __func__); + goto error_invalid_osrclk; + } + + trc = clk_set_rate(drv->rx_osrclk, \ + SNDDEV_ICODEC_CLK_RATE(\ + icodec->sample_rate)); + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting RX m clock1\n"); + goto error_invalid_freq; + } + clk_enable(drv->rx_osrclk); + } + + aic3254_use_mclk_counter++; + + } else { + if (aic3254_use_mclk_counter > 0) { + aic3254_use_mclk_counter--; + if (aic3254_use_mclk_counter == 0) + clk_disable(drv->rx_osrclk); + } else + pr_aud_info("%s: counter error!\n", __func__); + } + + mutex_unlock(&drv->rx_mclk_lock); + + pr_aud_info("%s: en: %d counter: %d\n", __func__, en, \ + aic3254_use_mclk_counter); + + return 0; + +error_invalid_osrclk: +error_invalid_freq: + pr_aud_err("%s: encounter error\n", __func__); + msm_snddev_rx_mclk_free(); + + mutex_unlock(&drv->rx_mclk_lock); + return -ENODEV; +} + +static int snddev_icodec_open_rx(struct snddev_icodec_state *icodec) +{ + int trc; + int rc_clk; + int afe_channel_mode; + union afe_port_config afe_config; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + wake_lock(&drv->rx_idlelock); + + if (drv->snddev_vreg) { + if (!strcmp(icodec->data->name, "headset_stereo_rx")) + vreg_mode_vote(drv->snddev_vreg, 1, + SNDDEV_LOW_POWER_MODE); + else + vreg_mode_vote(drv->snddev_vreg, 1, + SNDDEV_HIGH_POWER_MODE); + } + + if (support_aic3254_use_mclk) { + rc_clk = snddev_icodec_rxclk_enable(icodec, 1); + if (IS_ERR_VALUE(rc_clk)) { + pr_aud_err("%s Enable RX master clock Error\n", \ + __func__); + goto error_invalid_freq; + } + } else { + msm_snddev_rx_mclk_request(); + + drv->rx_osrclk = clk_get(0, "i2s_spkr_osr_clk"); + if (IS_ERR(drv->rx_osrclk)) + pr_aud_err("%s master clock Error\n", __func__); + + trc = clk_set_rate(drv->rx_osrclk, + SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting m clock1\n"); + goto error_invalid_freq; + } + + clk_enable(drv->rx_osrclk); + } + + drv->rx_bitclk = clk_get(0, "i2s_spkr_bit_clk"); + if (IS_ERR(drv->rx_bitclk)) + pr_aud_err("%s clock Error\n", __func__); + + /* *************************************** + * 1. CPU MASTER MODE: + * Master clock = Sample Rate * OSR rate bit clock + * OSR Rate bit clock = bit/sample * channel master + * clock / bit clock = divider value = 8 + * + * 2. CPU SLAVE MODE: + * bitclk = 0 + * *************************************** */ + + if (msm_codec_i2s_slave_mode) { + pr_debug("%s: configuring bit clock for slave mode\n", + __func__); + trc = clk_set_rate(drv->rx_bitclk, 0); + } else + trc = clk_set_rate(drv->rx_bitclk, 8); + + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting m clock1\n"); + goto error_adie; + } + clk_enable(drv->rx_bitclk); + + if (icodec->data->voltage_on) + icodec->data->voltage_on(1); + + if (support_aic3254) { + if (aic3254_ops->aic3254_set_mode) { + if (msm_get_call_state() == 1) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_RX, + icodec->data->aic3254_voc_id); + else + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_RX, + icodec->data->aic3254_id); + } + } + if (support_adie) { + /* Configure ADIE */ + trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); + if (IS_ERR_VALUE(trc)) + pr_aud_err("%s: adie codec open failed\n", __func__); + else + adie_codec_setpath(icodec->adie_path, + icodec->sample_rate, 256); + /* OSR default to 256, can be changed for power optimization + * If OSR is to be changed, need clock API for setting the divider + */ + } + + switch (icodec->data->channel_mode) { + case 2: + afe_channel_mode = MSM_AFE_STEREO; + break; + case 1: + default: + afe_channel_mode = MSM_AFE_MONO; + break; + } + afe_config.mi2s.channel = afe_channel_mode; + afe_config.mi2s.bitwidth = 16; + afe_config.mi2s.line = 1; + if (msm_codec_i2s_slave_mode) + afe_config.mi2s.ws = 0; + else + afe_config.mi2s.ws = 1; + + trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate); + + if (support_adie) { + /* Enable ADIE */ + if (icodec->adie_path) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_READY); + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_ANALOG_READY); + } + + if (msm_codec_i2s_slave_mode) + adie_codec_set_master_mode(icodec->adie_path, 1); + else + adie_codec_set_master_mode(icodec->adie_path, 0); + } + /* Enable power amplifier */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(1); + + icodec->enabled = 1; + + wake_unlock(&drv->rx_idlelock); + return 0; + +error_adie: + clk_disable(drv->rx_bitclk); + clk_disable(drv->rx_osrclk); +error_invalid_freq: + + pr_aud_err("%s: encounter error\n", __func__); + msm_snddev_rx_mclk_free(); + + wake_unlock(&drv->rx_idlelock); + return -ENODEV; +} + +static int snddev_icodec_open_tx(struct snddev_icodec_state *icodec) +{ + int trc; + int rc_clk; + int afe_channel_mode; + union afe_port_config afe_config; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;; + + wake_lock(&drv->tx_idlelock); + + if (drv->snddev_vreg) + vreg_mode_vote(drv->snddev_vreg, 1, SNDDEV_HIGH_POWER_MODE); + + if (support_aic3254_use_mclk) { + rc_clk = snddev_icodec_rxclk_enable(icodec, 1); + if (IS_ERR_VALUE(rc_clk)) { + pr_aud_err("%s Enable RX master clock Error\n", \ + __func__); + goto error_invalid_osrclk; + } + } + + msm_snddev_tx_mclk_request(); + + drv->tx_osrclk = clk_get(0, "i2s_mic_osr_clk"); + if (IS_ERR(drv->tx_osrclk)) { + pr_aud_err("%s master clock Error\n", __func__); + goto error_invalid_osrclk; + } + + trc = clk_set_rate(drv->tx_osrclk, + SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting m clock1\n"); + goto error_invalid_freq; + } + + clk_enable(drv->tx_osrclk); + + drv->tx_bitclk = clk_get(0, "i2s_mic_bit_clk"); + if (IS_ERR(drv->tx_bitclk)) { + pr_aud_err("%s clock Error\n", __func__); + goto error_invalid_bitclk; + } + + /* *************************************** + * 1. CPU MASTER MODE: + * Master clock = Sample Rate * OSR rate bit clock + * OSR Rate bit clock = bit/sample * channel master + * clock / bit clock = divider value = 8 + * + * 2. CPU SLAVE MODE: + * bitclk = 0 + * *************************************** */ + if (msm_codec_i2s_slave_mode) { + pr_debug("%s: configuring bit clock for slave mode\n", + __func__); + trc = clk_set_rate(drv->tx_bitclk, 0); + } else + trc = clk_set_rate(drv->tx_bitclk, 8); + + clk_enable(drv->tx_bitclk); + + if (support_aic3254) { + if (aic3254_ops->aic3254_set_mode) { + if (msm_get_call_state() == 1) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_TX, + icodec->data->aic3254_voc_id); + else + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_TX, + icodec->data->aic3254_id); + } + } + if (support_adie) { + /* Enable ADIE */ + trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); + if (IS_ERR_VALUE(trc)) + pr_aud_err("%s: adie codec open failed\n", __func__); + else + adie_codec_setpath(icodec->adie_path, + icodec->sample_rate, 256); + } + switch (icodec->data->channel_mode) { + case 2: + afe_channel_mode = MSM_AFE_STEREO; + break; + case 1: + default: + afe_channel_mode = MSM_AFE_MONO; + break; + } + afe_config.mi2s.channel = afe_channel_mode; + afe_config.mi2s.bitwidth = 16; + afe_config.mi2s.line = 1; + if (msm_codec_i2s_slave_mode) + afe_config.mi2s.ws = 0; + else + afe_config.mi2s.ws = 1; + + trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate); + + if (icodec->adie_path && support_adie) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_READY); + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_ANALOG_READY); + + if (msm_codec_i2s_slave_mode) + adie_codec_set_master_mode(icodec->adie_path, 1); + else + adie_codec_set_master_mode(icodec->adie_path, 0); + } + + /* Reuse pamp_on for TX platform-specific setup */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(1); + + icodec->enabled = 1; + + wake_unlock(&drv->tx_idlelock); + return 0; + +error_invalid_bitclk: + clk_disable(drv->tx_osrclk); +error_invalid_freq: +error_invalid_osrclk: + if (icodec->data->pamp_on) + icodec->data->pamp_on(0); + msm_snddev_tx_mclk_free(); + + pr_aud_err("%s: encounter error\n", __func__); + + wake_unlock(&drv->tx_idlelock); + return -ENODEV; +} + +static int snddev_icodec_close_rx(struct snddev_icodec_state *icodec) +{ + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + struct snddev_icodec_data *data = icodec->data; + + wake_lock(&drv->rx_idlelock); + + if (drv->snddev_vreg) + vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_HIGH_POWER_MODE); + + /* Disable power amplifier */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(0); + + if (support_aic3254) { + /* Restore default id for A3254 */ + if (data->aic3254_id != data->default_aic3254_id) + data->aic3254_id = data->default_aic3254_id; + /* Disable External Codec A3254 */ + if (aic3254_ops->aic3254_set_mode) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_RX, DOWNLINK_OFF); + } + if (support_adie) { + /* Disable ADIE */ + if (icodec->adie_path) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_OFF); + adie_codec_close(icodec->adie_path); + icodec->adie_path = NULL; + } + } + + afe_close(icodec->data->copp_id); + + if (icodec->data->voltage_on) + icodec->data->voltage_on(0); + + clk_disable(drv->rx_bitclk); + + if (support_aic3254_use_mclk) + snddev_icodec_rxclk_enable(icodec, 0); + else + clk_disable(drv->rx_osrclk); + + msm_snddev_rx_mclk_free(); + + icodec->enabled = 0; + + wake_unlock(&drv->rx_idlelock); + return 0; +} + +static int snddev_icodec_close_tx(struct snddev_icodec_state *icodec) +{ + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + struct snddev_icodec_data *data = icodec->data; + + wake_lock(&drv->tx_idlelock); + + if (drv->snddev_vreg) + vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_HIGH_POWER_MODE); + + /* Reuse pamp_off for TX platform-specific setup */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(0); + + if (support_aic3254) { + /* Restore default id for A3254 */ + if (data->aic3254_id != data->default_aic3254_id) + data->aic3254_id = data->default_aic3254_id; + /* Disable External Codec A3254 */ + if (aic3254_ops->aic3254_set_mode) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_TX, UPLINK_OFF); + } + if (support_adie) { + /* Disable ADIE */ + if (icodec->adie_path) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_OFF); + adie_codec_close(icodec->adie_path); + icodec->adie_path = NULL; + } + } + afe_close(icodec->data->copp_id); + + clk_disable(drv->tx_bitclk); + clk_disable(drv->tx_osrclk); + + if (support_aic3254_use_mclk) + snddev_icodec_rxclk_enable(icodec, 0); + + msm_snddev_tx_mclk_free(); + + + icodec->enabled = 0; + + wake_unlock(&drv->tx_idlelock); + return 0; +} + +static int snddev_icodec_set_device_volume_impl( + struct msm_snddev_info *dev_info, u32 volume) +{ + struct snddev_icodec_state *icodec; + + int rc = 0; + + icodec = dev_info->private_data; + + if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_DIGITAL) { + + rc = adie_codec_set_device_digital_volume(icodec->adie_path, + icodec->data->channel_mode, volume); + if (rc < 0) { + pr_aud_err("%s: unable to set_device_digital_volume for" + "%s volume in percentage = %u\n", + __func__, dev_info->name, volume); + return rc; + } + + } else if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_ANALOG) + rc = adie_codec_set_device_analog_volume(icodec->adie_path, + icodec->data->channel_mode, volume); + if (rc < 0) { + pr_aud_err("%s: unable to set_device_analog_volume for" + "%s volume in percentage = %u\n", + __func__, dev_info->name, volume); + return rc; + } + else { + pr_aud_err("%s: Invalid device volume control\n", __func__); + return -EPERM; + } + return rc; +} + +static int snddev_icodec_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + if (!dev_info) { + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + + if (icodec->data->capability & SNDDEV_CAP_RX) { + mutex_lock(&drv->rx_lock); + if (drv->rx_active) { + mutex_unlock(&drv->rx_lock); + rc = -EBUSY; + goto error; + } + rc = snddev_icodec_open_rx(icodec); + + if (!IS_ERR_VALUE(rc)) { + drv->rx_active = 1; + if (support_adie && (icodec->data->dev_vol_type & ( + SNDDEV_DEV_VOL_DIGITAL | + SNDDEV_DEV_VOL_ANALOG))) + rc = snddev_icodec_set_device_volume_impl( + dev_info, dev_info->dev_volume); + } + mutex_unlock(&drv->rx_lock); + } else { + mutex_lock(&drv->tx_lock); + if (drv->tx_active) { + mutex_unlock(&drv->tx_lock); + rc = -EBUSY; + goto error; + } + rc = snddev_icodec_open_tx(icodec); + + if (!IS_ERR_VALUE(rc)) { + drv->tx_active = 1; + if (support_adie && (icodec->data->dev_vol_type & ( + SNDDEV_DEV_VOL_DIGITAL | + SNDDEV_DEV_VOL_ANALOG))) + rc = snddev_icodec_set_device_volume_impl( + dev_info, dev_info->dev_volume); + } + mutex_unlock(&drv->tx_lock); + } +error: + return rc; +} + +static int snddev_icodec_close(struct msm_snddev_info *dev_info) +{ + int rc = 0; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + if (!dev_info) { + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + + if (icodec->data->capability & SNDDEV_CAP_RX) { + mutex_lock(&drv->rx_lock); + if (!drv->rx_active) { + mutex_unlock(&drv->rx_lock); + rc = -EPERM; + goto error; + } + rc = snddev_icodec_close_rx(icodec); + if (!IS_ERR_VALUE(rc)) + drv->rx_active = 0; + mutex_unlock(&drv->rx_lock); + } else { + mutex_lock(&drv->tx_lock); + if (!drv->tx_active) { + mutex_unlock(&drv->tx_lock); + rc = -EPERM; + goto error; + } + rc = snddev_icodec_close_tx(icodec); + if (!IS_ERR_VALUE(rc)) + drv->tx_active = 0; + mutex_unlock(&drv->tx_lock); + } + +error: + return rc; +} + +static int snddev_icodec_check_freq(u32 req_freq) +{ + int rc = -EINVAL; + + if ((req_freq != 0) && (req_freq >= 8000) && (req_freq <= 48000)) { + if ((req_freq == 8000) || (req_freq == 11025) || + (req_freq == 12000) || (req_freq == 16000) || + (req_freq == 22050) || (req_freq == 24000) || + (req_freq == 32000) || (req_freq == 44100) || + (req_freq == 48000)) { + rc = 0; + } else + pr_aud_info("%s: Unsupported Frequency:%d\n", __func__, + req_freq); + } + return rc; +} + +static int snddev_icodec_set_freq(struct msm_snddev_info *dev_info, u32 rate) +{ + int rc; + struct snddev_icodec_state *icodec; + + if (!dev_info) { + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + if (support_adie && + adie_codec_freq_supported(icodec->data->profile, rate) != 0) { + rc = -EINVAL; + goto error; + } else { + if (snddev_icodec_check_freq(rate) != 0) { + rc = -EINVAL; + goto error; + } else + icodec->sample_rate = rate; + } + + if (icodec->enabled) { + snddev_icodec_close(dev_info); + snddev_icodec_open(dev_info); + } + + return icodec->sample_rate; + +error: + return rc; +} + +static int snddev_icodec_enable_sidetone(struct msm_snddev_info *dev_info, + u32 enable, uint16_t gain) +{ + int rc = 0; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + /*3254 sidetone will be binded with dsp image.*/ + if (support_aic3254 || !support_adie) + goto error; + + if (!dev_info) { + pr_aud_err("invalid dev_info\n"); + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + + if (icodec->data->capability & SNDDEV_CAP_RX) { + mutex_lock(&drv->rx_lock); + if (!drv->rx_active || !dev_info->opened) { + pr_aud_err("dev not active\n"); + rc = -EPERM; + mutex_unlock(&drv->rx_lock); + goto error; + } + rc = afe_sidetone(PRIMARY_I2S_TX, PRIMARY_I2S_RX, enable, gain); + if (rc < 0) + pr_aud_err("%s: AFE command sidetone failed\n", __func__); + mutex_unlock(&drv->rx_lock); + } else { + rc = -EINVAL; + pr_aud_err("rx device only\n"); + } + +error: + return rc; + +} +static int snddev_icodec_enable_anc(struct msm_snddev_info *dev_info, + u32 enable) +{ + int rc = 0; + struct adie_codec_anc_data *reg_writes; + struct acdb_cal_block cal_block; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + if (support_aic3254 || !support_adie) + goto error; + + pr_aud_info("%s: enable=%d\n", __func__, enable); + + if (!dev_info) { + pr_aud_err("invalid dev_info\n"); + rc = -EINVAL; + goto error; + } + icodec = dev_info->private_data; + + if ((icodec->data->capability & SNDDEV_CAP_RX) && + (icodec->data->capability & SNDDEV_CAP_ANC)) { + mutex_lock(&drv->rx_lock); + + if (!drv->rx_active || !dev_info->opened) { + pr_aud_err("dev not active\n"); + rc = -EPERM; + mutex_unlock(&drv->rx_lock); + goto error; + } + if (enable) { + get_anc_cal(&cal_block); + reg_writes = (struct adie_codec_anc_data *) + cal_block.cal_kvaddr; + + if (reg_writes == NULL) { + pr_aud_err("error, no calibration data\n"); + rc = -1; + mutex_unlock(&drv->rx_lock); + goto error; + } + + rc = adie_codec_enable_anc(icodec->adie_path, + 1, reg_writes); + } else { + rc = adie_codec_enable_anc(icodec->adie_path, + 0, NULL); + } + mutex_unlock(&drv->rx_lock); + } else { + rc = -EINVAL; + pr_aud_err("rx and ANC device only\n"); + } + +error: + return rc; + +} + +int snddev_icodec_set_device_volume(struct msm_snddev_info *dev_info, + u32 volume) +{ + struct snddev_icodec_state *icodec; + struct mutex *lock; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + int rc = -EPERM; + + if (!dev_info) { + pr_aud_info("%s : device not intilized.\n", __func__); + return -EINVAL; + } + + icodec = dev_info->private_data; + + if (!(icodec->data->dev_vol_type & (SNDDEV_DEV_VOL_DIGITAL + | SNDDEV_DEV_VOL_ANALOG))) { + + pr_aud_info("%s : device %s does not support device volume " + "control.", __func__, dev_info->name); + return -EPERM; + } + dev_info->dev_volume = volume; + + if (icodec->data->capability & SNDDEV_CAP_RX) + lock = &drv->rx_lock; + else + lock = &drv->tx_lock; + + mutex_lock(lock); + + rc = snddev_icodec_set_device_volume_impl(dev_info, + dev_info->dev_volume); + mutex_unlock(lock); + return rc; +} + +void htc_8x60_register_icodec_ops(struct q6v2audio_icodec_ops *ops) +{ + audio_ops = ops; +} + +static int snddev_icodec_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_icodec_data *pdata; + struct msm_snddev_info *dev_info; + struct snddev_icodec_state *icodec; + static int first_time = 1; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + rc = -1; + goto error; + } + pdata = pdev->dev.platform_data; + if ((pdata->capability & SNDDEV_CAP_RX) && + (pdata->capability & SNDDEV_CAP_TX)) { + pr_aud_err("%s: invalid device data either RX or TX\n", __func__); + goto error; + } + icodec = kzalloc(sizeof(struct snddev_icodec_state), GFP_KERNEL); + if (!icodec) { + rc = -ENOMEM; + goto error; + } + dev_info = kmalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + kfree(icodec); + rc = -ENOMEM; + goto error; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->private_data = (void *) icodec; + dev_info->dev_ops.open = snddev_icodec_open; + dev_info->dev_ops.close = snddev_icodec_close; + dev_info->dev_ops.set_freq = snddev_icodec_set_freq; + dev_info->dev_ops.set_device_volume = snddev_icodec_set_device_volume; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + msm_snddev_register(dev_info); + icodec->data = pdata; + icodec->sample_rate = pdata->default_sample_rate; + dev_info->sample_rate = pdata->default_sample_rate; + dev_info->channel_mode = pdata->channel_mode; + if (pdata->capability & SNDDEV_CAP_RX) + dev_info->dev_ops.enable_sidetone = + snddev_icodec_enable_sidetone; + else + dev_info->dev_ops.enable_sidetone = NULL; + + if (pdata->capability & SNDDEV_CAP_ANC) { + dev_info->dev_ops.enable_anc = + snddev_icodec_enable_anc; + } else { + dev_info->dev_ops.enable_anc = NULL; + } + if (first_time) { + if (audio_ops->support_aic3254) { + support_aic3254 = audio_ops->support_aic3254(); + } else { + support_aic3254 = 0; + } + pr_aud_info("%s: support_aic3254 = %d\n", + __func__, support_aic3254); + + if (audio_ops->support_adie) { + support_adie = audio_ops->support_adie(); + } else { + support_adie = 0; + } + pr_aud_info("%s: support_adie = %d\n", + __func__, support_adie); + + if (audio_ops->is_msm_i2s_slave) { + msm_codec_i2s_slave_mode = audio_ops->is_msm_i2s_slave(); + } else { + msm_codec_i2s_slave_mode = 0; + } + pr_aud_info("%s: msm_codec_i2s_slave_mode = %d\n", + __func__, msm_codec_i2s_slave_mode); + + if (audio_ops->support_aic3254_use_mclk) + support_aic3254_use_mclk = \ + audio_ops->support_aic3254_use_mclk(); + else + support_aic3254_use_mclk = 0; + pr_aud_info("%s: support_aic3254_use_mclk = %d\n", + __func__, support_aic3254_use_mclk); + + first_time = 0; + } + +error: + return rc; +} + +static int snddev_icodec_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver snddev_icodec_driver = { + .probe = snddev_icodec_probe, + .remove = snddev_icodec_remove, + .driver = { .name = "snddev_icodec" } +}; + + +void htc_8x60_register_aic3254_ops(struct q6v2audio_aic3254_ops *ops) +{ + aic3254_ops = ops; +} + +int update_aic3254_info(struct aic3254_info *info) +{ + struct msm_snddev_info *dev_info; + int rc = 0; + + dev_info = audio_dev_ctrl_find_dev(info->dev_id); + if (IS_ERR(dev_info)) + rc = -ENODEV; + else { + if ((dev_info->copp_id == PRIMARY_I2S_RX) || + (dev_info->copp_id == PRIMARY_I2S_TX)) { + struct snddev_icodec_state *icodec; + icodec = dev_info->private_data; + icodec->data->aic3254_id = info->path_id; + pr_aud_info("%s: update aic3254 id of device %s as %d\n", + __func__, dev_info->name, icodec->data->aic3254_id); + } + } + + return rc; +} + +module_param(msm_codec_i2s_slave_mode, bool, 0); +MODULE_PARM_DESC(msm_codec_i2s_slave_mode, "Set MSM to I2S slave clock mode"); + +static int __init snddev_icodec_init(void) +{ + s32 rc; + struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv; + + rc = platform_driver_register(&snddev_icodec_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for snddev icodec failed\n", + __func__); + goto error_snddev_icodec_driver; + } + + rc = platform_driver_register(&msm_cdcclk_ctl_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for msm snddev failed\n", + __func__); + goto error_msm_cdcclk_ctl_driver; + } + + mutex_init(&icodec_drv->rx_lock); + mutex_init(&icodec_drv->tx_lock); + + mutex_init(&icodec_drv->rx_mclk_lock); + + icodec_drv->rx_active = 0; + icodec_drv->tx_active = 0; + icodec_drv->snddev_vreg = vreg_init(); + + wake_lock_init(&icodec_drv->tx_idlelock, WAKE_LOCK_IDLE, + "snddev_tx_idle"); + wake_lock_init(&icodec_drv->rx_idlelock, WAKE_LOCK_IDLE, + "snddev_rx_idle"); + return 0; + +error_msm_cdcclk_ctl_driver: + platform_driver_unregister(&snddev_icodec_driver); +error_snddev_icodec_driver: + return -ENODEV; +} + +static void __exit snddev_icodec_exit(void) +{ + struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv; + + platform_driver_unregister(&snddev_icodec_driver); + platform_driver_unregister(&msm_cdcclk_ctl_driver); + + clk_put(icodec_drv->rx_osrclk); + clk_put(icodec_drv->tx_osrclk); + if (icodec_drv->snddev_vreg) { + vreg_deinit(icodec_drv->snddev_vreg); + icodec_drv->snddev_vreg = NULL; + } + return; +} + +module_init(snddev_icodec_init); +module_exit(snddev_icodec_exit); + +MODULE_DESCRIPTION("ICodec Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c new file mode 100644 index 00000000..ddeb8d2a --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c @@ -0,0 +1,472 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "snddev_mi2s.h" + +#define SNDDEV_MI2S_PCM_SZ 32 /* 16 bit / sample stereo mode */ +#define SNDDEV_MI2S_MUL_FACTOR 3 /* Multi by 8 Shift by 3 */ +#define SNDDEV_MI2S_CLK_RATE(freq) \ + (((freq) * (SNDDEV_MI2S_PCM_SZ)) << (SNDDEV_MI2S_MUL_FACTOR)) + + +/* Global state for the driver */ +struct snddev_mi2s_drv_state { + + struct clk *tx_osrclk; + struct clk *tx_bitclk; + int mi2s_ws; + int mi2s_mclk; + int mi2s_sclk; + int fm_mi2s_sd; +}; + +static struct snddev_mi2s_drv_state snddev_mi2s_drv; + +static struct msm_mi2s_gpio_data *mi2s_gpio; + +static int mi2s_gpios_request(void) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + rc = gpio_request(snddev_mi2s_drv.mi2s_ws, "MI2S_WS"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MI2S_WS failed\n", __func__); + return rc; + } + + rc = gpio_request(snddev_mi2s_drv.mi2s_sclk, "MI2S_SCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MI2S_SCLK failed\n", __func__); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + return rc; + } + + rc = gpio_request(snddev_mi2s_drv.mi2s_mclk, "MI2S_MCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MI2S_MCLK failed\n", + __func__); + gpio_free(snddev_mi2s_drv.mi2s_ws); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + return rc; + } + + rc = gpio_request(snddev_mi2s_drv.fm_mi2s_sd, "FM_MI2S_SD"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for FM_MI2S_SD failed\n", + __func__); + gpio_free(snddev_mi2s_drv.mi2s_ws); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + gpio_free(snddev_mi2s_drv.mi2s_mclk); + return rc; + } + + return rc; +} + +static void mi2s_gpios_free(void) +{ + pr_debug("%s\n", __func__); + gpio_free(snddev_mi2s_drv.mi2s_ws); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + gpio_free(snddev_mi2s_drv.mi2s_mclk); + gpio_free(snddev_mi2s_drv.fm_mi2s_sd); +} + +static int mi2s_get_gpios(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res; + + /* Claim all of the GPIOs. */ + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "mi2s_ws"); + if (!res) { + pr_aud_err("%s: failed to get gpio MI2S_WS\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.mi2s_ws = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "mi2s_sclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MI2S_SCLK\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.mi2s_sclk = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "mi2s_mclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MI2S_MCLK\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.mi2s_mclk = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "fm_mi2s_sd"); + if (!res) { + pr_aud_err("%s: failed to get gpio FM_MI2S_SD\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.fm_mi2s_sd = res->start; + + return rc; +} + +static int mi2s_fm_probe(struct platform_device *pdev) +{ + int rc = 0; + + pr_aud_info("%s:\n", __func__); + + rc = mi2s_get_gpios(pdev); + if (rc < 0) { + pr_aud_err("%s: GPIO configuration failed\n", __func__); + return rc; + } + + mi2s_gpio = (struct msm_mi2s_gpio_data *)(pdev->dev.platform_data); + return rc; +} + +static struct platform_driver mi2s_fm_driver = { + .probe = mi2s_fm_probe, + .driver = { .name = "msm_mi2s"} +}; + +static u8 num_of_bits_set(u8 sd_line_mask) +{ + u8 num_bits_set = 0; + + while (sd_line_mask) { + + if (sd_line_mask & 1) + num_bits_set++; + sd_line_mask = sd_line_mask >> 1; + } + return num_bits_set; +} + +static int snddev_mi2s_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + union afe_port_config afe_config; + u8 channels; + u8 num_of_sd_lines = 0; + struct snddev_mi2s_drv_state *drv = &snddev_mi2s_drv; + struct snddev_mi2s_data *snddev_mi2s_data = dev_info->private_data; + + if (!dev_info) { + pr_aud_err("%s: msm_snddev_info is null\n", __func__); + return -EINVAL; + } + + /* set up osr clk */ + drv->tx_osrclk = clk_get(0, "mi2s_osr_clk"); + if (IS_ERR(drv->tx_osrclk)) + pr_aud_err("%s master clock Error\n", __func__); + + rc = clk_set_rate(drv->tx_osrclk, + SNDDEV_MI2S_CLK_RATE(dev_info->sample_rate)); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("ERROR setting osr clock\n"); + return -ENODEV; + } + clk_enable(drv->tx_osrclk); + + /* set up bit clk */ + drv->tx_bitclk = clk_get(0, "mi2s_bit_clk"); + if (IS_ERR(drv->tx_bitclk)) + pr_aud_err("%s clock Error\n", __func__); + + rc = clk_set_rate(drv->tx_bitclk, 8); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("ERROR setting bit clock\n"); + clk_disable(drv->tx_osrclk); + return -ENODEV; + } + clk_enable(drv->tx_bitclk); + + afe_config.mi2s.bitwidth = 16; + + if (snddev_mi2s_data->channel_mode == 1) + channels = AFE_MI2S_MONO; + else if (snddev_mi2s_data->channel_mode == 2) + channels = AFE_MI2S_STEREO; + else if (snddev_mi2s_data->channel_mode == 4) + channels = AFE_MI2S_4CHANNELS; + else if (snddev_mi2s_data->channel_mode == 6) + channels = AFE_MI2S_6CHANNELS; + else if (snddev_mi2s_data->channel_mode == 8) + channels = AFE_MI2S_8CHANNELS; + else { + pr_aud_err("ERROR: Invalid MI2S channel mode\n"); + goto error_invalid_data; + } + + num_of_sd_lines = num_of_bits_set(snddev_mi2s_data->sd_lines); + + switch (num_of_sd_lines) { + case 1: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0: + afe_config.mi2s.line = AFE_I2S_SD0; + break; + case MI2S_SD1: + afe_config.mi2s.line = AFE_I2S_SD1; + break; + case MI2S_SD2: + afe_config.mi2s.line = AFE_I2S_SD2; + break; + case MI2S_SD3: + afe_config.mi2s.line = AFE_I2S_SD3; + break; + default: + pr_aud_err("%s: invalid SD line\n", + __func__); + goto error_invalid_data; + } + if (channels != AFE_MI2S_STEREO && + channels != AFE_MI2S_MONO) { + pr_aud_err("%s: for one SD line, channel " + "must be 1 or 2\n", __func__); + goto error_invalid_data; + } + afe_config.mi2s.channel = channels; + break; + case 2: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0 | MI2S_SD1: + afe_config.mi2s.line = AFE_I2S_QUAD01; + break; + case MI2S_SD2 | MI2S_SD3: + afe_config.mi2s.line = AFE_I2S_QUAD23; + break; + default: + pr_aud_err("%s: invalid SD line\n", + __func__); + goto error_invalid_data; + } + if (channels != AFE_MI2S_4CHANNELS) { + pr_aud_err("%s: for two SD lines, channel " + "must be 1 and 2 or 3 and 4\n", __func__); + goto error_invalid_data; + } + break; + case 3: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0 | MI2S_SD1 | MI2S_SD2: + afe_config.mi2s.line = AFE_I2S_6CHS; + break; + default: + pr_aud_err("%s: invalid SD lines\n", + __func__); + goto error_invalid_data; + } + if (channels != AFE_MI2S_6CHANNELS) { + pr_aud_err("%s: for three SD lines, lines " + "must be 1, 2, and 3\n", __func__); + goto error_invalid_data; + } + break; + case 4: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0 | MI2S_SD1 | MI2S_SD2 | MI2S_SD3: + afe_config.mi2s.line = AFE_I2S_8CHS; + break; + default: + pr_aud_err("%s: invalid SD lines\n", + __func__); + goto error_invalid_data; + } + + if (channels != AFE_MI2S_8CHANNELS) { + pr_aud_err("%s: for four SD lines, lines " + "must be 1, 2, 3, and 4\n", __func__); + goto error_invalid_data; + } + break; + default: + pr_aud_err("%s: invalid SD lines\n", __func__); + goto error_invalid_data; + } + afe_config.mi2s.ws = 1; + rc = afe_open(snddev_mi2s_data->copp_id, &afe_config, + dev_info->sample_rate); + + if (rc < 0) { + pr_aud_err("%s: afe_open failed\n", __func__); + goto error_invalid_data; + } + + /*enable fm gpio here*/ + rc = mi2s_gpios_request(); + if (rc < 0) { + pr_aud_err("%s: GPIO request failed\n", __func__); + return rc; + } + + pr_aud_info("%s: afe_open done\n", __func__); + + return rc; + +error_invalid_data: + + clk_disable(drv->tx_bitclk); + clk_disable(drv->tx_osrclk); + return -EINVAL; +} + +static int snddev_mi2s_close(struct msm_snddev_info *dev_info) +{ + + struct snddev_mi2s_drv_state *mi2s_drv = &snddev_mi2s_drv; + struct snddev_mi2s_data *snddev_mi2s_data = dev_info->private_data; + + if (!dev_info) { + pr_aud_err("%s: msm_snddev_info is null\n", __func__); + return -EINVAL; + } + + if (!dev_info->opened) { + pr_aud_err(" %s: calling close device with out opening the" + " device\n", __func__); + return -EIO; + } + afe_close(snddev_mi2s_data->copp_id); + clk_disable(mi2s_drv->tx_bitclk); + clk_disable(mi2s_drv->tx_osrclk); + + mi2s_gpios_free(); + + pr_aud_info("%s:\n", __func__); + + return 0; +} + +static int snddev_mi2s_set_freq(struct msm_snddev_info *dev_info, u32 req_freq) +{ + if (req_freq != 48000) { + pr_aud_info("%s: Unsupported Frequency:%d\n", __func__, req_freq); + return -EINVAL; + } + return 48000; +} + + +static int snddev_mi2s_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_mi2s_data *pdata; + struct msm_snddev_info *dev_info; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + return -ENODEV; + } + + pdata = pdev->dev.platform_data; + + dev_info = kzalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + pr_aud_err("%s: uneable to allocate memeory for msm_snddev_info\n", + __func__); + + return -ENOMEM; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->dev_ops.open = snddev_mi2s_open; + dev_info->dev_ops.close = snddev_mi2s_close; + dev_info->private_data = (void *)pdata; + dev_info->dev_ops.set_freq = snddev_mi2s_set_freq; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + dev_info->sample_rate = pdata->sample_rate; + msm_snddev_register(dev_info); + + pr_aud_info("%s: probe done for %s\n", __func__, pdata->name); + return rc; +} + +static struct platform_driver snddev_mi2s_driver = { + .probe = snddev_mi2s_probe, + .driver = {.name = "snddev_mi2s"} +}; + +static int __init snddev_mi2s_init(void) +{ + s32 rc = 0; + + rc = platform_driver_register(&mi2s_fm_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for mi2s_fm_driver failed\n", + __func__); + goto error_mi2s_fm_platform_driver; + } + + rc = platform_driver_register(&snddev_mi2s_driver); + if (IS_ERR_VALUE(rc)) { + + pr_aud_err("%s: platform_driver_register failed\n", __func__); + goto error_platform_driver; + } + + pr_aud_info("snddev_mi2s_init : done\n"); + + return rc; + +error_platform_driver: + platform_driver_unregister(&mi2s_fm_driver); +error_mi2s_fm_platform_driver: + pr_aud_err("%s: encounter error\n", __func__); + return -ENODEV; +} + +static void __exit snddev_mi2s_exit(void) +{ + struct snddev_mi2s_drv_state *mi2s_drv = &snddev_mi2s_drv; + + platform_driver_unregister(&snddev_mi2s_driver); + clk_put(mi2s_drv->tx_osrclk); + clk_put(mi2s_drv->tx_bitclk); + return; +} + + +module_init(snddev_mi2s_init); +module_exit(snddev_mi2s_exit); + +MODULE_DESCRIPTION("MI2S Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h new file mode 100644 index 00000000..fa1c55e2 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6_V2_SNDDEV_MI2S_H +#define __MACH_QDSP6_V2_SNDDEV_MI2S_H + +struct snddev_mi2s_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + u16 channel_mode; + u16 sd_lines; + u32 sample_rate; +}; + +#define MI2S_SD0 (1 << 0) +#define MI2S_SD1 (1 << 1) +#define MI2S_SD2 (1 << 2) +#define MI2S_SD3 (1 << 3) + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_virtual.c b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.c new file mode 100644 index 00000000..c0f2a932 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.c @@ -0,0 +1,172 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include "snddev_virtual.h" + +static DEFINE_MUTEX(snddev_virtual_lock); + +static int snddev_virtual_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + + mutex_lock(&snddev_virtual_lock); + + if (!dev_info) { + pr_aud_err("%s: NULL dev_info\n", __func__); + + rc = -EINVAL; + goto done; + } + + if (!dev_info->opened) { + rc = afe_start_pseudo_port(dev_info->copp_id); + } else { + pr_aud_err("%s: Pseudo port 0x%x is already open\n", + __func__, dev_info->copp_id); + + rc = -EBUSY; + } + +done: + mutex_unlock(&snddev_virtual_lock); + + return rc; +} + +static int snddev_virtual_close(struct msm_snddev_info *dev_info) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + + mutex_lock(&snddev_virtual_lock); + + if (!dev_info) { + pr_aud_err("%s: NULL dev_info\n", __func__); + + rc = -EINVAL; + goto done; + } + + if (dev_info->opened) { + rc = afe_stop_pseudo_port(dev_info->copp_id); + } else { + pr_aud_err("%s: Pseudo port 0x%x is not open\n", + __func__, dev_info->copp_id); + + rc = -EPERM; + } + +done: + mutex_unlock(&snddev_virtual_lock); + + return rc; +} + +static int snddev_virtual_set_freq(struct msm_snddev_info *dev_info, u32 rate) +{ + int rc = 0; + + if (!dev_info) + rc = -EINVAL; + + return rate; +} + +static int snddev_virtual_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_virtual_data *pdata; + struct msm_snddev_info *dev_info; + + pr_debug("%s\n", __func__); + + if (!pdev || !pdev->dev.platform_data) { + pr_aud_err("%s: Invalid caller\n", __func__); + + rc = -EPERM; + goto done; + } + + pdata = pdev->dev.platform_data; + + dev_info = kmalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + pr_aud_err("%s: Out of memory\n", __func__); + + rc = -ENOMEM; + goto done; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->private_data = (void *) NULL; + dev_info->dev_ops.open = snddev_virtual_open; + dev_info->dev_ops.close = snddev_virtual_close; + dev_info->dev_ops.set_freq = snddev_virtual_set_freq; + dev_info->capability = pdata->capability; + dev_info->sample_rate = 48000; + dev_info->opened = 0; + dev_info->sessions = 0; + + msm_snddev_register(dev_info); + +done: + return rc; +} + +static int snddev_virtual_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver snddev_virtual_driver = { + .probe = snddev_virtual_probe, + .remove = snddev_virtual_remove, + .driver = { .name = "snddev_virtual" } +}; + +static int __init snddev_virtual_init(void) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + + rc = platform_driver_register(&snddev_virtual_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: Platform driver register failure\n", __func__); + + return -ENODEV; + } + + return 0; +} + +static void __exit snddev_virtual_exit(void) +{ + platform_driver_unregister(&snddev_virtual_driver); + + return; +} + +module_init(snddev_virtual_init); +module_exit(snddev_virtual_exit); + +MODULE_DESCRIPTION("Virtual Sound Device driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_virtual.h b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.h new file mode 100644 index 00000000..dec4d073 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.h @@ -0,0 +1,20 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __MACH_QDSP6V2_SNDDEV_VIRTUAL_H +#define __MACH_QDSP6V2_SNDDEV_VIRTUAL_H + +struct snddev_virtual_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* Audpp routing */ +}; +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60.h b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60.h new file mode 100644 index 00000000..59991998 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60.h @@ -0,0 +1,2269 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6V2_TIMPANI_PROFILE_H +#define __MACH_QDSP6V2_TIMPANI_PROFILE_H + +/* + * TX Device Profiles + */ + +/* Analog MIC */ +/* AMIC Primary mono */ +#define AMIC_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + + +/* AMIC Secondary mono */ +#define AMIC_SEC_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98 },\ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* AMIC dual */ +#define AMIC_DUAL_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xB0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* + * Digital MIC + */ +/* DMIC1 Primary (DMIC 1 - TX1) */ +#define DMIC1_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0x1F, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0x3F, 0x21)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0x3F, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x39, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* DMIC1 Secondary - (DMIC 2 - TX1) */ +#define DMIC1_SEC_MONO_8000_OSR_64 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x12)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* DMIC Dual Primary (DMIC 1/2 - TX1) */ +#define DMIC1_PRI_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0x1F, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0x3F, 0x19)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0x3F, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x39, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)} } + +/* DMIC2 Dual Primary (DMIC 3/4 - TX2 - Left/Right) */ +#define DMIC2_SEC_DUAL_8000_OSR_64 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x22)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0xF0, 0xE0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HS_DMIC2_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0x1F, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0x3F, 0x19)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0x3F, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x39, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * LINE IN + */ +#define LINEIN_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_PRI_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_SEC_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x2E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0xF0, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_SEC_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x2E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0xF0, 0xE0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_SEC_STEREO_8000_OSR_64 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x22)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x18)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0xF0, 0xE0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * AUX IN + */ +#define AUXIN_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* Headset MIC */ +#define HEADSET_AMIC2_TX_MONO_PRI_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * RX Device Profiles + */ + +/* RX EAR */ +#define EAR_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define EAR_SEC_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xCA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* ANC Headset: Speakers on Primary Rx, Noise Microphones on Secondary Tx */ + +#define ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x95, 0xFF, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9B, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xC0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xD0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * RX HPH PRIMARY + */ + +/* RX HPH CLASS AB CAPLESS */ + +#define HEADSET_AB_CPLS_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_CPLS_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_CPLS_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_CPLS_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS AB LEGACY */ + +#define HPH_PRI_AB_LEG_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HP_PRI_AB_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x09)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x186A0}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF9)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x27)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS D LEGACY */ + +#define HPH_PRI_D_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0A, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_D_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x21, 0xFF, 0x60)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x22, 0xFF, 0xE1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x26, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2D, 0xFF, 0x6F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2E, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3F, 0xFF, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x40, 0xFF, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x41, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x42, 0xFF, 0xBB)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x43, 0xFF, 0xF2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x44, 0xF7, 0x37)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x45, 0xFF, 0xFF)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x46, 0xFF, 0x77)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x47, 0xFF, 0xF2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x48, 0xF7, 0x37)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x49, 0xFF, 0xFF)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4A, 0xFF, 0x77)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0xFF, 0x8C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * RX HPH SECONDARY + */ + +/* RX HPH CLASS AB CAPLESS */ +#define HPH_SEC_AB_CPLS_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#define HPH_SEC_AB_CPLS_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_SEC_AB_CPLS_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x48)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS AB LEGACY */ +#define HPH_SEC_AB_LEG_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_SEC_AB_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#define HPH_SEC_AB_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x48)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS D LEGACY */ + +#define HPH_SEC_D_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x50, 0x50)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0A, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000},\ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_SEC_D_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x50, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0A, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX LINE OUT PRIMARY */ +#define LINEOUT_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define SPEAKER_HPH_AB_CPL_PRI_STEREO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0c)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX LINE OUT SECONDARY */ +#define LINEOUT_SEC_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_SEC_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_SEC_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x48)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define SPEAKER_PRI_STEREO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX AUX */ +#define AUXOUT_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x20, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 50000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x07)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x20, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define AUXOUT_SEC_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xCA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x40, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 50000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x07)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x40, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* + * LB Device Profiles + */ + +/* EAR */ +#define LB_EAR_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* HPH CLASS AB CAPLESS */ +#define LB_HPH_AB_CPLS_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_AB_CPLS_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define SPEAKER_HPH_AB_CPL_PRI_STEREO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_AB_CPLS_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* HPH CLASS AB LEGACY */ +#define LB_HPH_AB_LEG_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xFC)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_PHP_AB_LEG_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xFC)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_AB_LEG_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xFC)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* HPH CLASS D LEGACY */ +#define LB_HPH_D_LEG_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3A, 0x2A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x2F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_D_LEG_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3A, 0x3A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x3F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* LINE OUT */ +#define LB_LINEOUT_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_LINEOUT_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x10, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x10, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_LINEOUT_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* AUX OUT */ +#define LB_AUXOUT_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0xE0, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 50000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x07)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0xE0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY RX */ +#define TTY_HEADSET_MONO_RX_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x06)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x45)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xC5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY TX */ +#define TTY_HEADSET_MONO_TX_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h new file mode 100644 index 00000000..3f747bd9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h @@ -0,0 +1,699 @@ +/* arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h + * + * Copyright (C) 2010 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __TIMPANI_PROFILE_LEAD_H +#define __TIMPANI_PROFILE_LEAD_H + +/* RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define HEADSET_STEREO_AB_CPLS_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x27)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* RX EAR MONO*/ +#define EAR_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x23)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define SPEAKER_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x23)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANALOG IMIC Primary MONO */ +#define AMIC_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/*back mic tx*/ +#define AMIC_SEC_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANC Headset: Speakers on Primary Rx, Noise Microphones on Secondary Tx */ + +#define ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x95, 0xFF, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9B, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xC0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xD0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY RX */ +#define TTY_HEADSET_MONO_RX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x06)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x45)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xC5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x20, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* TTY TX */ +#define TTY_HEADSET_MONO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* FM HPH, RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_HEADSET_AB_CPLS_RX_48000 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* FM SPK, RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_SPEAKER_RX \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* +#define SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } +*/ +#define SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x50, 0xFF, 0xEA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x50, 0xFF, 0xEA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0x29)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xBB)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define DUAL_MIC_STEREO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_vigor.h b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_vigor.h new file mode 100644 index 00000000..e2bcc30e --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_vigor.h @@ -0,0 +1,641 @@ +/* arch/arm/mach-msm/qdsp6v2_1x/timpani_profile_8x60_lead.h + * + * Copyright (C) 2010 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __TIMPANI_PROFILE_VIGOR_H +#define __TIMPANI_PROFILE_VIGOR_H + +/* RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define HEADSET_STEREO_AB_CPLS_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x27)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* RX EAR MONO*/ +#define EAR_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x21, 0xFF, 0x28)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x22, 0xFF, 0xA9)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x26, 0xFF, 0xD1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define SPEAKER_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x21, 0xFF, 0x28)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x22, 0xFF, 0xA9)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x26, 0xFF, 0xD1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANALOG IMIC Primary MONO */ +#define AMIC_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/*back mic tx*/ +#define AMIC_SEC_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xFC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xFC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98 },\ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANC Headset: Speakers on Primary Rx, Noise Microphones on Secondary Tx */ + +#define ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x95, 0xFF, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9B, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xC0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xD0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY RX */ +#define TTY_HEADSET_MONO_RX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x06)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x45)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xC5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x20, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* TTY TX */ +#define TTY_HEADSET_MONO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* FM HPH, RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_HEADSET_AB_CPLS_RX_48000 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* FM SPK, RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_SPEAKER_RX \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#define SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define DUAL_MIC_STEREO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#endif diff --git a/include/linux/msm_audio_mvs.h b/include/linux/msm_audio_mvs.h new file mode 100644 index 00000000..2ea486c4 --- /dev/null +++ b/include/linux/msm_audio_mvs.h @@ -0,0 +1,113 @@ +#ifndef __MSM_AUDIO_MVS_H +#define __MSM_AUDIO_MVS_H + +#include + +#define AUDIO_GET_MVS_CONFIG _IOW(AUDIO_IOCTL_MAGIC, \ + (AUDIO_MAX_COMMON_IOCTL_NUM + 0), unsigned) +#define AUDIO_SET_MVS_CONFIG _IOR(AUDIO_IOCTL_MAGIC, \ + (AUDIO_MAX_COMMON_IOCTL_NUM + 1), unsigned) + +/* MVS modes */ +#define MVS_MODE_IS733 0x1 +#define MVS_MODE_IS127 0x2 +#define MVS_MODE_4GV_NB 0x3 +#define MVS_MODE_4GV_WB 0x4 +#define MVS_MODE_AMR 0x5 +#define MVS_MODE_EFR 0x6 +#define MVS_MODE_FR 0x7 +#define MVS_MODE_HR 0x8 +#define MVS_MODE_LINEAR_PCM 0x9 +#define MVS_MODE_G711 0xA +#define MVS_MODE_PCM 0xC +#define MVS_MODE_AMR_WB 0xD +#define MVS_MODE_G729A 0xE +#define MVS_MODE_G711A 0xF +#define MVS_MODE_G722 0x10 +#define MVS_MODE_PCM_WB 0x80000000 + +enum msm_audio_amr_mode { + MVS_AMR_MODE_0475, /* AMR 4.75 kbps */ + MVS_AMR_MODE_0515, /* AMR 5.15 kbps */ + MVS_AMR_MODE_0590, /* AMR 5.90 kbps */ + MVS_AMR_MODE_0670, /* AMR 6.70 kbps */ + MVS_AMR_MODE_0740, /* AMR 7.40 kbps */ + MVS_AMR_MODE_0795, /* AMR 7.95 kbps */ + MVS_AMR_MODE_1020, /* AMR 10.20 kbps */ + MVS_AMR_MODE_1220, /* AMR 12.20 kbps */ + MVS_AMR_MODE_0660, /* AMR-WB 6.60 kbps */ + MVS_AMR_MODE_0885, /* AMR-WB 8.85 kbps */ + MVS_AMR_MODE_1265, /* AMR-WB 12.65 kbps */ + MVS_AMR_MODE_1425, /* AMR-WB 14.25 kbps */ + MVS_AMR_MODE_1585, /* AMR-WB 15.85 kbps */ + MVS_AMR_MODE_1825, /* AMR-WB 18.25 kbps */ + MVS_AMR_MODE_1985, /* AMR-WB 19.85 kbps */ + MVS_AMR_MODE_2305, /* AMR-WB 23.05 kbps */ + MVS_AMR_MODE_2385, /* AMR-WB 23.85 kbps */ + MVS_AMR_MODE_UNDEF +}; + +enum msm_audio_voc_rate { + MVS_VOC_0_RATE, /* Blank frame */ + MVS_VOC_8_RATE, /* 1/8 rate */ + MVS_VOC_4_RATE, /* 1/4 rate */ + MVS_VOC_2_RATE, /* 1/2 rate */ + MVS_VOC_1_RATE /* Full rate */ +}; + +enum msm_audio_amr_frame_type { + MVS_AMR_SPEECH_GOOD, /* Good speech frame */ + MVS_AMR_SPEECH_DEGRADED, /* Speech degraded */ + MVS_AMR_ONSET, /* Onset */ + MVS_AMR_SPEECH_BAD, /* Corrupt speech frame (bad CRC) */ + MVS_AMR_SID_FIRST, /* First silence descriptor */ + MVS_AMR_SID_UPDATE, /* Comfort noise frame */ + MVS_AMR_SID_BAD, /* Corrupt SID frame (bad CRC) */ + MVS_AMR_NO_DATA, /* Nothing to transmit */ + MVS_AMR_SPEECH_LOST /* Downlink speech lost */ +}; + +enum msm_audio_g711a_mode { + MVS_G711A_MODE_MULAW, + MVS_G711A_MODE_ALAW +}; + +enum mvs_g722_mode_type { + MVS_G722_MODE_01, + MVS_G722_MODE_02, + MVS_G722_MODE_03, + MVS_G722_MODE_MAX, + MVS_G722_MODE_UNDEF +}; + +enum msm_audio_g711a_frame_type { + MVS_G711A_SPEECH_GOOD, + MVS_G711A_SID, + MVS_G711A_NO_DATA, + MVS_G711A_ERASURE +}; + +enum msm_audio_g729a_frame_type { + MVS_G729A_NO_DATA, + MVS_G729A_SPEECH_GOOD, + MVS_G729A_SID, + MVS_G729A_ERASURE +}; + +struct msm_audio_mvs_config { + uint32_t mvs_mode; + uint32_t rate_type; + uint32_t dtx_mode; +}; + +#define MVS_MAX_VOC_PKT_SIZE 640 + +struct msm_audio_mvs_frame { + uint32_t frame_type; + uint32_t frame_rate; + uint32_t len; + uint8_t voc_pkt[MVS_MAX_VOC_PKT_SIZE]; + +}; + +#endif /* __MSM_AUDIO_MVS_H */ From 0638c19f95c8209df9d7292780887066d0359d1a Mon Sep 17 00:00:00 2001 From: Matt Filetto Date: Tue, 16 Apr 2013 13:50:03 -0700 Subject: [PATCH 091/117] GPU: update genlock to jb-chocolote For use with new display and media repos Change-Id: Ibf0eb4f9dbdfa37081af24b9e1fd3accc3ffce4e --- drivers/base/genlock.c | 199 +++++++++++++++++++++++++++++++--------- include/linux/genlock.h | 7 +- 2 files changed, 164 insertions(+), 42 deletions(-) diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c index 9f7a6008..1fb606f1 100644 --- a/drivers/base/genlock.c +++ b/drivers/base/genlock.c @@ -34,7 +34,15 @@ #define GENLOCK_LOG_ERR(fmt, args...) \ pr_err("genlock: %s: " fmt, __func__, ##args) +/* The genlock magic stored in the kernel private data is used to protect + * against the possibility of user space passing a valid fd to a + * non-genlock file for genlock_attach_lock() + */ +#define GENLOCK_MAGIC_OK 0xD2EAD10C +#define GENLOCK_MAGIC_BAD 0xD2EADBAD + struct genlock { + unsigned int magic; /* Magic for attach verification */ struct list_head active; /* List of handles holding lock */ spinlock_t lock; /* Spinlock to protect the lock internals */ wait_queue_head_t queue; /* Holding pen for processes pending lock */ @@ -56,7 +64,7 @@ struct genlock_handle { * released while another process tries to attach it */ -static DEFINE_SPINLOCK(genlock_file_lock); +static DEFINE_SPINLOCK(genlock_ref_lock); static void genlock_destroy(struct kref *kref) { @@ -68,10 +76,9 @@ static void genlock_destroy(struct kref *kref) * still active after the lock gets released */ - spin_lock(&genlock_file_lock); if (lock->file) lock->file->private_data = NULL; - spin_unlock(&genlock_file_lock); + lock->magic = GENLOCK_MAGIC_BAD; kfree(lock); } @@ -125,6 +132,7 @@ struct genlock *genlock_create_lock(struct genlock_handle *handle) init_waitqueue_head(&lock->queue); spin_lock_init(&lock->lock); + lock->magic = GENLOCK_MAGIC_OK; lock->state = _UNLOCKED; /* @@ -193,21 +201,30 @@ struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd) * released and then attached */ - spin_lock(&genlock_file_lock); + spin_lock(&genlock_ref_lock); lock = file->private_data; - spin_unlock(&genlock_file_lock); fput(file); if (lock == NULL) { GENLOCK_LOG_ERR("File descriptor is invalid\n"); - return ERR_PTR(-EINVAL); + goto fail_invalid; + } + + if (lock->magic != GENLOCK_MAGIC_OK) { + GENLOCK_LOG_ERR("Magic is invalid - 0x%X\n", lock->magic); + goto fail_invalid; } handle->lock = lock; kref_get(&lock->refcount); + spin_unlock(&genlock_ref_lock); return lock; + +fail_invalid: + spin_unlock(&genlock_ref_lock); + return ERR_PTR(-EINVAL); } EXPORT_SYMBOL(genlock_attach_lock); @@ -278,7 +295,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, { unsigned long irqflags; int ret = 0; - unsigned int ticks = msecs_to_jiffies(timeout); + unsigned long ticks = msecs_to_jiffies(timeout); spin_lock_irqsave(&lock->lock, irqflags); @@ -297,12 +314,15 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, if (handle_has_lock(lock, handle)) { /* - * If the handle already holds the lock and the type matches, - * then just increment the active pointer. This allows the - * handle to do recursive locks + * If the handle already holds the lock and the lock type is + * a read lock then just increment the active pointer. This + * allows the handle to do recursive read locks. Recursive + * write locks are not allowed in order to support + * synchronization within a process using a single gralloc + * handle. */ - if (lock->state == op) { + if (lock->state == _RDLOCK && op == _RDLOCK) { handle->active++; goto done; } @@ -311,32 +331,45 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, * If the handle holds a write lock then the owner can switch * to a read lock if they want. Do the transition atomically * then wake up any pending waiters in case they want a read - * lock too. + * lock too. In order to support synchronization within a + * process the caller must explicity request to convert the + * lock type with the GENLOCK_WRITE_TO_READ flag. */ - if (op == _RDLOCK && handle->active == 1) { - lock->state = _RDLOCK; - wake_up(&lock->queue); - goto done; + if (flags & GENLOCK_WRITE_TO_READ) { + if (lock->state == _WRLOCK && op == _RDLOCK) { + lock->state = _RDLOCK; + wake_up(&lock->queue); + goto done; + } else { + GENLOCK_LOG_ERR("Invalid state to convert" + "write to read\n"); + ret = -EINVAL; + goto done; + } } + } else { /* - * Otherwise the user tried to turn a read into a write, and we - * don't allow that. + * Check to ensure the caller has not attempted to convert a + * write to a read without holding the lock. */ - GENLOCK_LOG_ERR("Trying to upgrade a read lock to a write" - "lock\n"); - ret = -EINVAL; - goto done; - } - /* - * If we request a read and the lock is held by a read, then go - * ahead and share the lock - */ + if (flags & GENLOCK_WRITE_TO_READ) { + GENLOCK_LOG_ERR("Handle must have lock to convert" + "write to read\n"); + ret = -EINVAL; + goto done; + } - if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK) - goto dolock; + /* + * If we request a read and the lock is held by a read, then go + * ahead and share the lock + */ + + if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK) + goto dolock; + } /* Treat timeout 0 just like a NOBLOCK flag and return if the lock cannot be aquired without blocking */ @@ -346,15 +379,26 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, goto done; } - /* Wait while the lock remains in an incompatible state */ + /* + * Wait while the lock remains in an incompatible state + * state op wait + * ------------------- + * unlocked n/a no + * read read no + * read write yes + * write n/a yes + */ - while (lock->state != _UNLOCKED) { - int elapsed; + while ((lock->state == _RDLOCK && op == _WRLOCK) || + lock->state == _WRLOCK) { + signed long elapsed; spin_unlock_irqrestore(&lock->lock, irqflags); elapsed = wait_event_interruptible_timeout(lock->queue, - lock->state == _UNLOCKED, ticks); + lock->state == _UNLOCKED || + (lock->state == _RDLOCK && op == _RDLOCK), + ticks); spin_lock_irqsave(&lock->lock, irqflags); @@ -363,7 +407,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, goto done; } - ticks = elapsed; + ticks = (unsigned long) elapsed; } dolock: @@ -371,7 +415,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, list_add_tail(&handle->entry, &lock->active); lock->state = op; - handle->active = 1; + handle->active++; done: spin_unlock_irqrestore(&lock->lock, irqflags); @@ -380,7 +424,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, } /** - * genlock_lock - Acquire or release a lock + * genlock_lock - Acquire or release a lock (depreciated) * @handle - pointer to the genlock handle that is requesting the lock * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) * @flags - flags to control the operation @@ -391,10 +435,73 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, int genlock_lock(struct genlock_handle *handle, int op, int flags, uint32_t timeout) + { + struct genlock *lock; + unsigned long irqflags; + + int ret = 0; + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } + + switch (op) { + case GENLOCK_UNLOCK: + ret = _genlock_unlock(lock, handle); + break; + case GENLOCK_RDLOCK: + spin_lock_irqsave(&lock->lock, irqflags); + if (handle_has_lock(lock, handle)) { + /* request the WRITE_TO_READ flag for compatibility */ + flags |= GENLOCK_WRITE_TO_READ; + } + spin_unlock_irqrestore(&lock->lock, irqflags); + /* fall through to take lock */ + case GENLOCK_WRLOCK: + ret = _genlock_lock(lock, handle, op, flags, timeout); + break; + default: + GENLOCK_LOG_ERR("Invalid lock operation\n"); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(genlock_lock); + +/** + * genlock_dreadlock - Acquire or release a lock + * @handle - pointer to the genlock handle that is requesting the lock + * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) + * @flags - flags to control the operation + * @timeout - optional timeout to wait for the lock to come free + * + * Returns: 0 on success or error code on failure + */ + +int genlock_dreadlock(struct genlock_handle *handle, int op, int flags, + uint32_t timeout) { - struct genlock *lock = handle->lock; + struct genlock *lock; + int ret = 0; + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + if (lock == NULL) { GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); return -EINVAL; @@ -416,7 +523,7 @@ int genlock_lock(struct genlock_handle *handle, int op, int flags, return ret; } -EXPORT_SYMBOL(genlock_lock); +EXPORT_SYMBOL(genlock_dreadlock); /** * genlock_wait - Wait for the lock to be released @@ -429,7 +536,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) struct genlock *lock = handle->lock; unsigned long irqflags; int ret = 0; - unsigned int ticks = msecs_to_jiffies(timeout); + unsigned long ticks = msecs_to_jiffies(timeout); if (lock == NULL) { GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); @@ -449,7 +556,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) } while (lock->state != _UNLOCKED) { - int elapsed; + signed long elapsed; spin_unlock_irqrestore(&lock->lock, irqflags); @@ -463,7 +570,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) break; } - ticks = elapsed; + ticks = (unsigned long) elapsed; } done: @@ -493,7 +600,9 @@ void genlock_release_lock(struct genlock_handle *handle) } spin_unlock_irqrestore(&handle->lock->lock, flags); + spin_lock(&genlock_ref_lock); kref_put(&handle->lock->refcount, genlock_destroy); + spin_unlock(&genlock_ref_lock); handle->lock = NULL; handle->active = 0; } @@ -635,6 +744,14 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd, return genlock_lock(handle, param.op, param.flags, param.timeout); } + case GENLOCK_IOC_DREADLOCK: { + if (copy_from_user(¶m, (void __user *) arg, + sizeof(param))) + return -EFAULT; + + return genlock_dreadlock(handle, param.op, param.flags, + param.timeout); + } case GENLOCK_IOC_WAIT: { if (copy_from_user(¶m, (void __user *) arg, sizeof(param))) diff --git a/include/linux/genlock.h b/include/linux/genlock.h index 2e9f9d68..128d676c 100644 --- a/include/linux/genlock.h +++ b/include/linux/genlock.h @@ -21,7 +21,8 @@ int genlock_lock(struct genlock_handle *handle, int op, int flags, #define GENLOCK_WRLOCK 1 #define GENLOCK_RDLOCK 2 -#define GENLOCK_NOBLOCK (1 << 0) +#define GENLOCK_NOBLOCK (1 << 0) +#define GENLOCK_WRITE_TO_READ (1 << 1) struct genlock_lock { int fd; @@ -37,9 +38,13 @@ struct genlock_lock { struct genlock_lock) #define GENLOCK_IOC_ATTACH _IOW(GENLOCK_IOC_MAGIC, 2, \ struct genlock_lock) + +/* Deprecated */ #define GENLOCK_IOC_LOCK _IOW(GENLOCK_IOC_MAGIC, 3, \ struct genlock_lock) #define GENLOCK_IOC_RELEASE _IO(GENLOCK_IOC_MAGIC, 4) #define GENLOCK_IOC_WAIT _IOW(GENLOCK_IOC_MAGIC, 5, \ struct genlock_lock) +#define GENLOCK_IOC_DREADLOCK _IOW(GENLOCK_IOC_MAGIC, 6, \ + struct genlock_lock) #endif From d3fe2ea5261ba8b050c448f2e961669d20abdbd9 Mon Sep 17 00:00:00 2001 From: Matt Filetto Date: Sun, 14 Apr 2013 12:32:07 -0700 Subject: [PATCH 092/117] GPU: Correctly update Adreno/KGSL drivers to jb-chocolote. Change-Id: If9c8e2ff366acc3e6c08e6342f67d32c37a43984 --- drivers/gpu/msm/a2xx_reg.h | 3 +- drivers/gpu/msm/adreno.c | 212 +++++++++++++++++++++------- drivers/gpu/msm/adreno.h | 17 ++- drivers/gpu/msm/adreno_a2xx.c | 15 +- drivers/gpu/msm/adreno_debugfs.c | 5 + drivers/gpu/msm/adreno_drawctxt.c | 2 +- drivers/gpu/msm/adreno_postmortem.c | 2 +- drivers/gpu/msm/adreno_ringbuffer.c | 47 ++++-- drivers/gpu/msm/kgsl.c | 2 +- drivers/gpu/msm/kgsl_device.h | 7 +- drivers/gpu/msm/kgsl_gpummu.c | 5 +- drivers/gpu/msm/kgsl_iommu.c | 6 +- drivers/gpu/msm/kgsl_mmu.c | 2 +- drivers/gpu/msm/kgsl_pwrctrl.c | 44 ++++-- drivers/gpu/msm/z180.c | 47 +++--- drivers/gpu/msm/z180.h | 3 + 16 files changed, 307 insertions(+), 112 deletions(-) diff --git a/drivers/gpu/msm/a2xx_reg.h b/drivers/gpu/msm/a2xx_reg.h index 50b2745b..28b8dac5 100644 --- a/drivers/gpu/msm/a2xx_reg.h +++ b/drivers/gpu/msm/a2xx_reg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -355,6 +355,7 @@ union reg_cp_rb_cntl { #define REG_RB_MODECONTROL 0x2208 #define REG_RB_SURFACE_INFO 0x2000 #define REG_RB_SAMPLE_POS 0x220a +#define REG_RB_BC_CONTROL 0x0F01 #define REG_SCRATCH_ADDR 0x01DD #define REG_SCRATCH_REG0 0x0578 diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index d38528a3..e01bb605 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -113,10 +113,25 @@ static struct adreno_device device_3d0 = { }, .pfp_fw = NULL, .pm4_fw = NULL, - .wait_timeout = 10000, /* in milliseconds */ + .wait_timeout = 0, /* in milliseconds, 0 means disabled */ .ib_check_level = 0, }; +/* This set of registers are used for Hang detection + * If the values of these registers are same after + * KGSL_TIMEOUT_PART time, GPU hang is reported in + * kernel log. + */ +unsigned int hang_detect_regs[] = { + REG_RBBM_STATUS, + REG_CP_RB_RPTR, + REG_CP_IB1_BASE, + REG_CP_IB1_BUFSZ, + REG_CP_IB2_BASE, + REG_CP_IB2_BUFSZ, +}; + +const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs); /* * This is the master list of all GPU cores that are supported by this @@ -277,6 +292,12 @@ static void adreno_setstate(struct kgsl_device *device, struct kgsl_context *context; struct adreno_context *adreno_ctx = NULL; + /* + * Fix target freeze issue by adding TLB flush for each submit + * on A20X based targets. + */ + if (adreno_is_a20x(adreno_dev)) + flags |= KGSL_MMUFLAGS_TLBFLUSH; /* * If possible, then set the state via the command stream to avoid * a CPU idle. Otherwise, use the default setstate which uses register @@ -530,7 +551,9 @@ static int adreno_start(struct kgsl_device *device, unsigned int init_ram) } kgsl_mh_start(device); - + /* Assign correct RBBM status register to hang detect regs + */ + hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status; if (kgsl_mmu_start(device)) goto error_clk_off; @@ -555,7 +578,10 @@ static int adreno_start(struct kgsl_device *device, unsigned int init_ram) adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000); - adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442); + if (adreno_is_a200(adreno_dev)) + adreno_regwrite(device, REG_RBBM_CNTL, 0x0000FFFF); + else + adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442); if (adreno_is_a225(adreno_dev)) { /* Enable large instruction store for A225 */ @@ -718,7 +744,7 @@ adreno_recover_hang(struct kgsl_device *device, rb->timestamp = timestamp; /* wait for idle */ - ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + ret = adreno_idle(device); done: kgsl_sharedmem_writel(&device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), @@ -930,61 +956,93 @@ static inline void adreno_poke(struct kgsl_device *device) adreno_regwrite(device, REG_CP_RB_WPTR, adreno_dev->ringbuffer.wptr); } -/* Caller must hold the device mutex. */ -int adreno_idle(struct kgsl_device *device, unsigned int timeout) +static int adreno_ringbuffer_drain(struct kgsl_device *device, + unsigned int *regs) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + unsigned long wait; + unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); + + if (!(rb->flags & KGSL_FLAGS_STARTED)) + return 0; + + /* + * The first time into the loop, wait for 100 msecs and kick wptr again + * to ensure that the hardware has updated correctly. After that, kick + * it periodically every KGSL_TIMEOUT_PART msecs until the timeout + * expires + */ + + wait = jiffies + msecs_to_jiffies(100); + + adreno_poke(device); + + do { + if (time_after(jiffies, wait)) { + adreno_poke(device); + + /* Check to see if the core is hung */ + if (adreno_hang_detect(device, regs)) + return -ETIMEDOUT; + + wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); + } + GSL_RB_GET_READPTR(rb, &rb->rptr); + + if (time_after(jiffies, timeout)) { + KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n", + rb->rptr, rb->wptr); + return -ETIMEDOUT; + } + } while (rb->rptr != rb->wptr); + + return 0; +} + +/* Caller must hold the device mutex. */ +int adreno_idle(struct kgsl_device *device) +{ unsigned int rbbm_status; - unsigned long wait_timeout = - msecs_to_jiffies(adreno_dev->wait_timeout); unsigned long wait_time; unsigned long wait_time_part; - unsigned int msecs; - unsigned int msecs_first; - unsigned int msecs_part; + unsigned int prev_reg_val[hang_detect_regs_count]; + + memset(prev_reg_val, 0, sizeof(prev_reg_val)); kgsl_cffdump_regpoll(device->id, REG_RBBM_STATUS << 2, 0x00000000, 0x80000000); - /* first, wait until the CP has consumed all the commands in - * the ring buffer - */ + retry: - if (rb->flags & KGSL_FLAGS_STARTED) { - msecs = adreno_dev->wait_timeout; - msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100; - msecs_part = (msecs - msecs_first + 3) / 4; - wait_time = jiffies + wait_timeout; - wait_time_part = jiffies + msecs_to_jiffies(msecs_first); - adreno_poke(device); - do { - if (time_after(jiffies, wait_time_part)) { - adreno_poke(device); - wait_time_part = jiffies + - msecs_to_jiffies(msecs_part); - } - GSL_RB_GET_READPTR(rb, &rb->rptr); - if (time_after(jiffies, wait_time)) { - KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n", - rb->rptr, rb->wptr); - goto err; - } - } while (rb->rptr != rb->wptr); - } + /* First, wait for the ringbuffer to drain */ + if (adreno_ringbuffer_drain(device, prev_reg_val)) + goto err; /* now, wait for the GPU to finish its operations */ - wait_time = jiffies + wait_timeout; + wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); + wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); + while (time_before(jiffies, wait_time)) { adreno_regread(device, REG_RBBM_STATUS, &rbbm_status); if (rbbm_status == 0x110) return 0; } + /* Dont wait for timeout, detect hang faster. + */ + if (time_after(jiffies, wait_time_part)) { + wait_time_part = jiffies + + msecs_to_jiffies(KGSL_TIMEOUT_PART); + if ((adreno_hang_detect(device, prev_reg_val))) + goto err; +} + + err: KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n"); if (KGSL_STATE_DUMP_AND_RECOVER != device->state && !adreno_dump_and_recover(device)) { - wait_time = jiffies + wait_timeout; + wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); goto retry; } return -ETIMEDOUT; @@ -1024,7 +1082,7 @@ static int adreno_suspend_context(struct kgsl_device *device) /* switch to NULL ctxt */ if (adreno_dev->drawctxt_active != NULL) { adreno_drawctxt_switch(adreno_dev, NULL, 0); - status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + status = adreno_idle(device); } return status; @@ -1217,6 +1275,30 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device, __ret; \ }) + +unsigned int adreno_hang_detect(struct kgsl_device *device, + unsigned int *prev_reg_val) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int curr_reg_val[hang_detect_regs_count]; + unsigned int hang_detected = 1; + unsigned int i; + + if (!adreno_dev->fast_hang_detect) + return 0; + + for (i = 0; i < hang_detect_regs_count; i++) { + adreno_regread(device, hang_detect_regs[i], + &curr_reg_val[i]); + if (curr_reg_val[i] != prev_reg_val[i]) { + prev_reg_val[i] = curr_reg_val[i]; + hang_detected = 0; + } + } + + return hang_detected; +} + /* MUST be called with the device mutex held */ static int adreno_waittimestamp(struct kgsl_device *device, unsigned int timestamp, @@ -1227,12 +1309,16 @@ static int adreno_waittimestamp(struct kgsl_device *device, static uint io_cnt; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_pwrctrl *pwr = &device->pwrctrl; - int retries; - unsigned int msecs_first; - unsigned int msecs_part; + int retries = 0; + + unsigned int time_elapsed = 0; + unsigned int prev_reg_val[hang_detect_regs_count]; + unsigned int wait; + + memset(prev_reg_val, 0, sizeof(prev_reg_val)); /* Don't wait forever, set a max value for now */ - if (msecs == -1) + if (msecs == KGSL_TIMEOUT_DEFAULT) msecs = adreno_dev->wait_timeout; if (timestamp_cmp(timestamp, adreno_dev->ringbuffer.timestamp) > 0) { @@ -1243,13 +1329,18 @@ static int adreno_waittimestamp(struct kgsl_device *device, goto done; } - /* Keep the first timeout as 100msecs before rewriting - * the WPTR. Less visible impact if the WPTR has not - * been updated properly. + /* + * Make the first timeout interval 100 msecs and then try to kick the + * wptr again. This helps to ensure the wptr is updated properly. If + * the requested timeout is less than 100 msecs, then wait 20msecs which + * is the minimum amount of time we can safely wait at 100HZ */ - msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100; - msecs_part = (msecs - msecs_first + 3) / 4; - for (retries = 0; retries < 5; retries++) { + if (msecs == 0 || msecs >= 100) + wait = 100; + else + wait = 20; + + do { if (kgsl_check_timestamp(device, timestamp)) { /* if the timestamp happens while we're not * waiting, there's a chance that an interrupt @@ -1265,6 +1356,11 @@ static int adreno_waittimestamp(struct kgsl_device *device, if (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction) io = 0; + + if ((retries > 0) && + (adreno_hang_detect(device, prev_reg_val))) + goto hang_dump; + mutex_unlock(&device->mutex); /* We need to make sure that the process is * placed in wait-q before its condition is called @@ -1272,9 +1368,9 @@ static int adreno_waittimestamp(struct kgsl_device *device, status = kgsl_wait_event_interruptible_timeout( device->wait_queue, kgsl_check_interrupt_timestamp(device, - timestamp), - msecs_to_jiffies(retries ? - msecs_part : msecs_first), io); + timestamp), + msecs_to_jiffies(wait), io); + mutex_lock(&device->mutex); if (status > 0) { @@ -1286,7 +1382,15 @@ static int adreno_waittimestamp(struct kgsl_device *device, goto done; } /*this wait timed out*/ - } + + time_elapsed += wait; + wait = KGSL_TIMEOUT_PART; + + retries++; + + } while (!msecs || time_elapsed < msecs); + +hang_dump: /* Check if timestamp has retired here because we may have hit * recovery which can take some time and cause waiting threads @@ -1362,8 +1466,8 @@ static long adreno_ioctl(struct kgsl_device_private *dev_priv, static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq) { - gpu_freq /= 1000000; - return ticks / gpu_freq; + s64 ticksus = (s64)ticks*1000000; + return div_u64(ticksus, gpu_freq); } static void adreno_power_stats(struct kgsl_device *device, diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 9c75f6dc..d50eec6c 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -49,6 +49,12 @@ #define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50 +/* One cannot wait forever for the core to idle, so set an upper limit to the + * amount of time to wait for the core to go idle + */ + +#define ADRENO_IDLE_TIMEOUT (20 * 1000) + enum adreno_gpurev { ADRENO_REV_UNKNOWN = 0, ADRENO_REV_A200 = 200, @@ -78,10 +84,12 @@ struct adreno_device { unsigned int istore_size; unsigned int pix_shader_start; unsigned int ib_check_level; + unsigned int fast_hang_detect; }; struct adreno_gpudev { /* keeps track of when we need to execute the draw workaround code */ + unsigned int reg_rbbm_status; int ctx_switches_since_last_draw; int (*ctxt_create)(struct adreno_device *, struct adreno_context *); void (*ctxt_save)(struct adreno_device *, struct adreno_context *); @@ -124,7 +132,11 @@ extern const unsigned int a220_registers[]; extern const unsigned int a200_registers_count; extern const unsigned int a220_registers_count; -int adreno_idle(struct kgsl_device *device, unsigned int timeout); +extern unsigned int hang_detect_regs[]; +extern const unsigned int hang_detect_regs_count; + + +int adreno_idle(struct kgsl_device *device); void adreno_regread(struct kgsl_device *device, unsigned int offsetwords, unsigned int *value); void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords, @@ -143,6 +155,9 @@ void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain, int adreno_dump_and_recover(struct kgsl_device *device); +unsigned int adreno_hang_detect(struct kgsl_device *device, + unsigned int *prev_reg_val); + static inline int adreno_is_a200(struct adreno_device *adreno_dev) { return (adreno_dev->gpurev == ADRENO_REV_A200); diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c index 9efd587f..9b9fcc8b 100644 --- a/drivers/gpu/msm/adreno_a2xx.c +++ b/drivers/gpu/msm/adreno_a2xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -581,6 +581,12 @@ static void build_regsave_cmds(struct adreno_device *adreno_dev, *cmd++ = REG_TP0_CHICKEN; *cmd++ = tmp_ctx.reg_values[1]; + if (adreno_is_a20x(adreno_dev)) { + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = REG_RB_BC_CONTROL; + *cmd++ = tmp_ctx.reg_values[2]; + } + if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; @@ -1107,6 +1113,12 @@ static void build_regrestore_cmds(struct adreno_device *adreno_dev, tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; + if (adreno_is_a20x(adreno_dev)) { + *cmd++ = cp_type0_packet(REG_RB_BC_CONTROL, 1); + tmp_ctx.reg_values[2] = virt2gpu(cmd, &drawctxt->gpustate); + *cmd++ = 0x00000000; + } + if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; @@ -1780,6 +1792,7 @@ void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot, int *remain, int hang); struct adreno_gpudev adreno_a2xx_gpudev = { + .reg_rbbm_status=REG_RBBM_STATUS, .ctxt_create = a2xx_drawctxt_create, .ctxt_save = a2xx_drawctxt_save, .ctxt_restore = a2xx_drawctxt_restore, diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 81d3419f..66935951 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -364,6 +364,11 @@ void adreno_debugfs_init(struct kgsl_device *device) debugfs_create_u32("ib_check", 0644, device->d_debugfs, &adreno_dev->ib_check_level); + /* By Default enable fast hang detection */ + adreno_dev->fast_hang_detect = 1; + debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs, + &adreno_dev->fast_hang_detect); + /* Create post mortem control files */ pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs); diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index 990af5f0..4fdbebc9 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -192,7 +192,7 @@ void adreno_drawctxt_destroy(struct kgsl_device *device, adreno_drawctxt_switch(adreno_dev, NULL, 0); } - adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + adreno_idle(device); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c index af4bb85b..f06248e9 100644 --- a/drivers/gpu/msm/adreno_postmortem.c +++ b/drivers/gpu/msm/adreno_postmortem.c @@ -715,7 +715,7 @@ int adreno_postmortem_dump(struct kgsl_device *device, int manual) } if (device->state == KGSL_STATE_ACTIVE) - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); } KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X", diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index f9d9df1a..49f509c7 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -47,7 +47,16 @@ #define A225_PFP_FW "a225_pfp.fw" #define A225_PM4_FW "a225_pm4.fw" -static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb) + +/* + * CP DEBUG settings for all cores: + * DYNAMIC_CLK_DISABLE [27] - turn off the dynamic clock control + * PROG_END_PTR_ENABLE [25] - Allow 128 bit writes to the VBIF + */ + +#define CP_DEBUG_DEFAULT ((1 << 27) | (1 << 25)) + +void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb) { BUG_ON(rb->wptr == 0); @@ -71,9 +80,12 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, unsigned int freecmds; unsigned int *cmds; uint cmds_gpu; - struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); - unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout); unsigned long wait_time; + unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); + unsigned long wait_time_part; + unsigned int prev_reg_val[hang_detect_regs_count]; + + memset(prev_reg_val, 0, sizeof(prev_reg_val)); /* if wptr ahead, fill the remaining with NOPs */ if (wptr_ahead) { @@ -101,6 +113,7 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, } wait_time = jiffies + wait_timeout; + wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); /* wait for space in ringbuffer */ while (1) { GSL_RB_GET_READPTR(rb, &rb->rptr); @@ -110,16 +123,34 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, if (freecmds == 0 || freecmds > numcmds) break; + /* Dont wait for timeout, detect hang faster. + */ + if (time_after(jiffies, wait_time_part)) { + wait_time_part = jiffies + + msecs_to_jiffies(KGSL_TIMEOUT_PART); + if ((adreno_hang_detect(rb->device, + prev_reg_val))){ + KGSL_DRV_ERR(rb->device, + "Hang detected while waiting for freespace in" + "ringbuffer rptr: 0x%x, wptr: 0x%x\n", + rb->rptr, rb->wptr); + goto err; + } + } + if (time_after(jiffies, wait_time)) { KGSL_DRV_ERR(rb->device, "Timed out while waiting for freespace in ringbuffer " "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr); - if (!adreno_dump_and_recover(rb->device)) + goto err; + } + continue; +err: + if (!adreno_dump_and_recover(rb->device)) wait_time = jiffies + wait_timeout; else /* GPU is hung and we cannot recover */ BUG(); - } } } @@ -241,7 +272,7 @@ static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw[0]); - adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); + adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT); adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, @@ -433,7 +464,7 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram) adreno_ringbuffer_submit(rb); /* idle device to validate ME INIT */ - status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + status = adreno_idle(device); if (status == 0) rb->flags |= KGSL_FLAGS_STARTED; @@ -882,7 +913,7 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, * this is conservative but works reliably and is ok * even for performance simulations */ - adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + adreno_idle(device); #endif return 0; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 7a2857c3..9f615d3f 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -417,7 +417,7 @@ static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state) break; case KGSL_STATE_ACTIVE: /* Wait for the device to become idle */ - device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT); + device->ftbl->idle(device); case KGSL_STATE_NAP: case KGSL_STATE_SLEEP: /* Get the completion ready to be waited upon. */ diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index fdb4a6e2..ba3c3290 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -26,6 +26,7 @@ #define KGSL_TIMEOUT_NONE 0 #define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF +#define KGSL_TIMEOUT_PART 2000 /* 2 sec */ #define FIRST_TIMEOUT (HZ / 2) @@ -67,7 +68,7 @@ struct kgsl_functable { unsigned int offsetwords, unsigned int *value); void (*regwrite) (struct kgsl_device *device, unsigned int offsetwords, unsigned int value); - int (*idle) (struct kgsl_device *device, unsigned int timeout); + int (*idle) (struct kgsl_device *device); unsigned int (*isidle) (struct kgsl_device *device); int (*suspend_context) (struct kgsl_device *device); int (*start) (struct kgsl_device *device, unsigned int init_ram); @@ -262,9 +263,9 @@ static inline void kgsl_regwrite(struct kgsl_device *device, device->ftbl->regwrite(device, offsetwords, value); } -static inline int kgsl_idle(struct kgsl_device *device, unsigned int timeout) +static inline int kgsl_idle(struct kgsl_device *device) { - return device->ftbl->idle(device, timeout); + return device->ftbl->idle(device); } static inline unsigned int kgsl_gpuid(struct kgsl_device *device) diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c index 7458cf28..b1d7f319 100644 --- a/drivers/gpu/msm/kgsl_gpummu.c +++ b/drivers/gpu/msm/kgsl_gpummu.c @@ -520,7 +520,8 @@ static void kgsl_gpummu_default_setstate(struct kgsl_device *device, return; if (flags & KGSL_MMUFLAGS_PTUPDATE) { - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + + kgsl_idle(device); gpummu_pt = device->mmu.hwpagetable->priv; kgsl_regwrite(device, MH_MMU_PT_BASE, gpummu_pt->base.gpuaddr); @@ -615,7 +616,7 @@ static int kgsl_gpummu_start(struct kgsl_device *device) kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config); /* idle device */ - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); /* enable axi interrupts */ kgsl_regwrite(device, MH_INTERRUPT_MASK, diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 81e237f1..889d679b 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -154,7 +154,7 @@ static void kgsl_iommu_setstate(struct kgsl_device *device, * specified page table */ if (mmu->hwpagetable != pagetable) { - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); kgsl_detach_pagetable_iommu_domain(mmu); mmu->hwpagetable = pagetable; if (mmu->hwpagetable) @@ -308,11 +308,11 @@ kgsl_iommu_get_current_ptbase(struct kgsl_device *device) { /* Current base is always the hwpagetables domain as we * do not use per process pagetables right not for iommu. - * This will change when we switch to per process pagetables. - */ + * This will change when we switch to per process pagetables.*/ return (unsigned int)device->mmu.hwpagetable->priv; } + struct kgsl_mmu_ops iommu_ops = { .mmu_init = kgsl_iommu_init, .mmu_close = kgsl_iommu_close, diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index 975aa837..5dc5de31 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -526,7 +526,7 @@ void kgsl_mh_start(struct kgsl_device *device) struct kgsl_mh *mh = &device->mh; /* force mmu off to for now*/ kgsl_regwrite(device, MH_MMU_CONFIG, 0); - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); /* define physical memory range accessible by the core */ kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index e9f21438..45cfa704 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -64,6 +64,9 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, new_level >= pwr->thermal_pwrlevel && new_level != pwr->active_pwrlevel) { struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level]; + int diff = new_level - pwr->active_pwrlevel; + int d = (diff > 0) ? 1 : -1; + int level = pwr->active_pwrlevel; pwr->active_pwrlevel = new_level; if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) || (device->state == KGSL_STATE_NAP)) { @@ -73,9 +76,16 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, * Idle the gpu core before changing the clock freq. */ if (pwr->idle_needed == true) - device->ftbl->idle(device, - KGSL_TIMEOUT_DEFAULT); - clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq); + device->ftbl->idle(device); + + /* Don't shift by more than one level at a time to + * avoid glitches. + */ + while (level != new_level) { + level += d; + clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels[level].gpu_freq); + } } if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { if (pwr->pcl) @@ -346,23 +356,34 @@ void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, for (i = KGSL_MAX_CLKS - 1; i > 0; i--) if (pwr->grp_clks[i]) clk_disable(pwr->grp_clks[i]); + /* High latency clock maintenance. */ if ((pwr->pwrlevels[0].gpu_freq > 0) && - (requested_state != KGSL_STATE_NAP)) + (requested_state != KGSL_STATE_NAP)) { clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. gpu_freq); + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) + if (pwr->grp_clks[i]) + clk_unprepare(pwr->grp_clks[i]); + } kgsl_pwrctrl_busy_time(device, true); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) { trace_kgsl_clk(device, state); - if ((pwr->pwrlevels[0].gpu_freq > 0) && - (device->state != KGSL_STATE_NAP)) - clk_set_rate(pwr->grp_clks[0], - pwr->pwrlevels[pwr->active_pwrlevel]. + /* High latency clock maintenance. */ + if (device->state != KGSL_STATE_NAP) { + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) + if (pwr->grp_clks[i]) + clk_prepare(pwr->grp_clks[i]); + + if (pwr->pwrlevels[0].gpu_freq > 0) + clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels + [pwr->active_pwrlevel]. gpu_freq); - + } /* as last step, enable grp_clk this is to let GPU interrupt to come */ for (i = KGSL_MAX_CLKS - 1; i > 0; i--) @@ -852,9 +873,8 @@ void kgsl_pwrctrl_wake(struct kgsl_device *device) mod_timer(&device->idle_timer, jiffies + device->pwrctrl.interval_timeout); wake_lock(&device->idle_wakelock); - if (device->pwrctrl.restore_slumber == false) - pm_qos_update_request(&device->pm_qos_req_dma, - GPU_SWFI_LATENCY); + pm_qos_update_request(&device->pm_qos_req_dma, + GPU_SWFI_LATENCY); case KGSL_STATE_ACTIVE: break; default: diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c index 1b1ca0ba..fa536487 100644 --- a/drivers/gpu/msm/z180.c +++ b/drivers/gpu/msm/z180.c @@ -289,15 +289,22 @@ static int z180_setup_pt(struct kgsl_device *device, return result; } -static inline unsigned int rb_offset(unsigned int index) +static inline unsigned int rb_offset(unsigned int timestamp) { - return index*sizeof(unsigned int)*(Z180_PACKET_SIZE); + return (timestamp % Z180_PACKET_COUNT) + *sizeof(unsigned int)*(Z180_PACKET_SIZE); } -static void addmarker(struct z180_ringbuffer *rb, unsigned int index) +static inline unsigned int rb_gpuaddr(struct z180_device *z180_dev, + unsigned int timestamp) +{ + return z180_dev->ringbuffer.cmdbufdesc.gpuaddr + rb_offset(timestamp); +} + +static void addmarker(struct z180_ringbuffer *rb, unsigned int timestamp) { char *ptr = (char *)(rb->cmdbufdesc.hostptr); - unsigned int *p = (unsigned int *)(ptr + rb_offset(index)); + unsigned int *p = (unsigned int *)(ptr + rb_offset(timestamp)); *p++ = Z180_STREAM_PACKET; *p++ = (Z180_MARKER_CMD | 5); @@ -311,11 +318,11 @@ static void addmarker(struct z180_ringbuffer *rb, unsigned int index) *p++ = ADDR_VGV3_LAST << 24; } -static void addcmd(struct z180_ringbuffer *rb, unsigned int index, +static void addcmd(struct z180_ringbuffer *rb, unsigned int timestamp, unsigned int cmd, unsigned int nextcnt) { char * ptr = (char *)(rb->cmdbufdesc.hostptr); - unsigned int *p = (unsigned int *)(ptr + (rb_offset(index) + unsigned int *p = (unsigned int *)(ptr + (rb_offset(timestamp) + (Z180_MARKER_SIZE * sizeof(unsigned int)))); *p++ = Z180_STREAM_PACKET_CALL; @@ -338,7 +345,7 @@ static void z180_cmdstream_start(struct kgsl_device *device) z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4); z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR, - z180_dev->ringbuffer.cmdbufdesc.gpuaddr); + rb_gpuaddr(z180_dev, z180_dev->current_timestamp)); z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5); @@ -362,7 +369,7 @@ static int room_in_rb(struct z180_device *device) return ts_diff < Z180_PACKET_COUNT; } -static int z180_idle(struct kgsl_device *device, unsigned int timeout) +static int z180_idle(struct kgsl_device *device) { int status = 0; struct z180_device *z180_dev = Z180_DEVICE(device); @@ -370,7 +377,7 @@ static int z180_idle(struct kgsl_device *device, unsigned int timeout) if (timestamp_cmp(z180_dev->current_timestamp, z180_dev->timestamp) > 0) status = z180_wait(device, z180_dev->current_timestamp, - timeout); + Z180_IDLE_TIMEOUT); if (status) KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n"); @@ -389,9 +396,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, long result = 0; unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int); unsigned int cnt = 5; - unsigned int nextaddr = 0; - unsigned int index = 0; - unsigned int nextindex; + unsigned int old_timestamp = 0; unsigned int nextcnt = Z180_STREAM_END_CMD | 5; struct kgsl_mem_entry *entry = NULL; unsigned int cmd; @@ -461,26 +466,22 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, } result = 0; - index = z180_dev->current_timestamp % Z180_PACKET_COUNT; + old_timestamp = z180_dev->current_timestamp; z180_dev->current_timestamp++; - nextindex = z180_dev->current_timestamp % Z180_PACKET_COUNT; *timestamp = z180_dev->current_timestamp; z180_dev->ringbuffer.prevctx = context->id; - addcmd(&z180_dev->ringbuffer, index, cmd + ofs, cnt); + addcmd(&z180_dev->ringbuffer, old_timestamp, cmd + ofs, cnt); kgsl_pwrscale_busy(device); /* Make sure the next ringbuffer entry has a marker */ - addmarker(&z180_dev->ringbuffer, nextindex); - - nextaddr = z180_dev->ringbuffer.cmdbufdesc.gpuaddr - + rb_offset(nextindex); + addmarker(&z180_dev->ringbuffer, z180_dev->current_timestamp); /* monkey patch the IB so that it jumps back to the ringbuffer */ kgsl_sharedmem_writel(&entry->memdesc, - ((sizedwords + 1) * sizeof(unsigned int)), - nextaddr); + ((sizedwords + 1) * sizeof(unsigned int)), + rb_gpuaddr(z180_dev, z180_dev->current_timestamp)); kgsl_sharedmem_writel(&entry->memdesc, ((sizedwords + 2) * sizeof(unsigned int)), nextcnt); @@ -592,7 +593,7 @@ static int z180_start(struct kgsl_device *device, unsigned int init_ram) static int z180_stop(struct kgsl_device *device) { device->ftbl->irqctrl(device, 0); - z180_idle(device, KGSL_TIMEOUT_DEFAULT); + z180_idle(device); del_timer_sync(&device->idle_timer); @@ -859,7 +860,7 @@ z180_drawctxt_destroy(struct kgsl_device *device, { struct z180_device *z180_dev = Z180_DEVICE(device); - z180_idle(device, KGSL_TIMEOUT_DEFAULT); + z180_idle(device); if (z180_dev->ringbuffer.prevctx == context->id) { z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT; diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h index e5c5ef30..2962ccd8 100644 --- a/drivers/gpu/msm/z180.h +++ b/drivers/gpu/msm/z180.h @@ -21,6 +21,9 @@ #define Z180_DEFAULT_PWRSCALE_POLICY NULL +/* Wait a maximum of 10 seconds when trying to idle the core */ +#define Z180_IDLE_TIMEOUT (10 * 1000) + struct z180_ringbuffer { unsigned int prevctx; struct kgsl_memdesc cmdbufdesc; From ac9bb01c47040de9bed0ca4920e469ae6aab830e Mon Sep 17 00:00:00 2001 From: David Hays Date: Mon, 6 May 2013 18:44:59 -0500 Subject: [PATCH 093/117] Kbuild: copy msm headers Change-Id: If0139897c19254c3e40f6b8925b9351c1d9514ec --- include/Kbuild | 1 + include/linux/Kbuild | 19 + include/linux/msm_ion.h | 23 + include/linux/msm_mdp.h | 4 + include/linux/spi_aic3254.h | 177 ++++ include/linux/tpa2051d3.h | 77 ++ include/media/Kbuild | 1 + include/media/msm_camera.h | 1457 ++++++++---------------------- include/sound/Kbuild | 2 + include/sound/compress_offload.h | 81 ++ include/sound/compress_params.h | 241 +++++ 11 files changed, 1022 insertions(+), 1061 deletions(-) create mode 100644 include/linux/msm_ion.h create mode 100644 include/linux/spi_aic3254.h create mode 100644 include/linux/tpa2051d3.h create mode 100644 include/media/Kbuild create mode 100644 include/sound/compress_offload.h create mode 100644 include/sound/compress_params.h diff --git a/include/Kbuild b/include/Kbuild index 8d226bfa..5f65ac28 100644 --- a/include/Kbuild +++ b/include/Kbuild @@ -10,3 +10,4 @@ header-y += video/ header-y += drm/ header-y += xen/ header-y += scsi/ +header-y += media/ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 01f63627..adfcd107 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -399,3 +399,22 @@ header-y += wireless.h header-y += x25.h header-y += xattr.h header-y += xfrm.h +header-y += msm_audio_aac.h +header-y += msm_audio_acdb.h +header-y += msm_audio_amrnb.h +header-y += msm_audio_mvs.h +header-y += msm_audio.h +header-y += msm_audio_qcp.h +header-y += msm_ion.h +header-y += msm_kgsl.h +header-y += msm_mdp.h +header-y += msm_rotator.h +header-y += msm_vidc_dec.h +header-y += msm_vidc_enc.h +header-y += android_pmem.h +header-y += ashmem.h +header-y += genlock.h +header-y += ion.h +header-y += spi_aic3254.h +header-y += tpa2051d3.h +header-y += videodev2.h diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h new file mode 100644 index 00000000..6ba89e81 --- /dev/null +++ b/include/linux/msm_ion.h @@ -0,0 +1,23 @@ +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + *** To edit the content of this header, modify the corresponding + *** source file (e.g. under external/kernel-headers/original/) then + *** run bionic/libc/kernel/tools/update_all.py + *** + *** Any manual change here will be lost the next time this script will + *** be run. You've been warned! + *** + **************************************************************************** + ****************************************************************************/ +#ifndef _LINUX_MSM_ION_H +#define _LINUX_MSM_ION_H +#include +#endif + diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h index f04a1d75..188d31f5 100644 --- a/include/linux/msm_mdp.h +++ b/include/linux/msm_mdp.h @@ -150,6 +150,10 @@ enum { #define MDP_SOURCE_ROTATED_90 0x00100000 #define MDP_MEMORY_ID_TYPE_FB 0x00001000 #define MDP_DPP_HSIC 0x00080000 +#define MDP_BACKEND_COMPOSITION 0x00040000 +#define MDP_BORDERFILL_SUPPORTED 0x00010000 +#define MDP_SECURE_OVERLAY_SESSION 0x00008000 +#define MDP_MEMORY_ID_TYPE_FB 0x00001000 #define MDP_TRANSP_NOP 0xffffffff #define MDP_ALPHA_NOP 0xff diff --git a/include/linux/spi_aic3254.h b/include/linux/spi_aic3254.h new file mode 100644 index 00000000..1db8df38 --- /dev/null +++ b/include/linux/spi_aic3254.h @@ -0,0 +1,177 @@ +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + *** To edit the content of this header, modify the corresponding + *** source file (e.g. under external/kernel-headers/original/) then + *** run bionic/libc/kernel/tools/update_all.py + *** + *** Any manual change here will be lost the next time this script will + *** be run. You've been warned! + *** + **************************************************************************** + ****************************************************************************/ +#ifndef __SPI_AIC3254_H__ +#define __SPI_AIC3254_H__ +#include +typedef struct _CODEC_SPI_CMD { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned char act; + unsigned char reg; + unsigned char data; +} CODEC_SPI_CMD; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +typedef struct _CODEC_SPI_CMD_PARAM { + CODEC_SPI_CMD *data; + unsigned int len; +} CODEC_SPI_CMD_PARAM; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +struct AIC3254_PARAM { + unsigned int row_num; + unsigned int col_num; + void *cmd_data; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +struct CODEC_CFG { + unsigned char tb_idx; + unsigned char index; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +#define AIC3254_IOCTL_MAGIC 's' +#define AIC3254_SET_TX_PARAM _IOW(AIC3254_IOCTL_MAGIC, 0x10, unsigned) +#define AIC3254_SET_RX_PARAM _IOW(AIC3254_IOCTL_MAGIC, 0x11, unsigned) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define AIC3254_CONFIG_TX _IOW(AIC3254_IOCTL_MAGIC, 0x12, unsigned int) +#define AIC3254_CONFIG_RX _IOW(AIC3254_IOCTL_MAGIC, 0x13, unsigned int) +#define AIC3254_SET_DSP_PARAM _IOW(AIC3254_IOCTL_MAGIC, 0x20, unsigned) +#define AIC3254_CONFIG_MEDIA _IOW(AIC3254_IOCTL_MAGIC, 0x21, unsigned int) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define AIC3254_CONFIG_VOICE _IOW(AIC3254_IOCTL_MAGIC, 0x22, unsigned int) +#define AIC3254_CONFIG_VOLUME_L _IOW(AIC3254_IOCTL_MAGIC, 0x23, unsigned int) +#define AIC3254_CONFIG_VOLUME_R _IOW(AIC3254_IOCTL_MAGIC, 0x24, unsigned int) +#define AIC3254_POWERDOWN _IOW(AIC3254_IOCTL_MAGIC, 0x25, unsigned int) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define AIC3254_LOOPBACK _IOW(AIC3254_IOCTL_MAGIC, 0x26, unsigned int) +#define AIC3254_DUMP_PAGES _IOW(AIC3254_IOCTL_MAGIC, 0x30, unsigned int) +#define AIC3254_READ_REG _IOWR(AIC3254_IOCTL_MAGIC, 0x31, unsigned) +#define AIC3254_WRITE_REG _IOW(AIC3254_IOCTL_MAGIC, 0x32, unsigned) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define AIC3254_RESET _IOW(AIC3254_IOCTL_MAGIC, 0x33, unsigned int) +#define AIC3254_MAX_PAGES 255 +#define AIC3254_MAX_REGS 128 +#define AIC3254_MAX_RETRY 10 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define IO_CTL_ROW_MAX 64 +#define IO_CTL_COL_MAX 1024 +#define MINIDSP_ROW_MAX 32 +#define MINIDSP_COL_MAX 16384 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +enum aic3254_uplink_mode { + INITIAL = 0, + CALL_UPLINK_IMIC_RECEIVER = 1, + CALL_UPLINK_EMIC_HEADSET, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CALL_UPLINK_IMIC_HEADSET, + CALL_UPLINK_IMIC_SPEAKER, + CALL_UPLINK_IMIC_RECEIVER_DUALMIC, + CALL_UPLINK_EMIC_HEADSET_DUALMIC, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CALL_UPLINK_IMIC_SPEAKER_DUALMIC, + CALL_UPLINK_IMIC_RECIVER_TESTSIM, + CALL_UPLINK_EMIC_HEADSET_TESTSIM, + CALL_UPLINK_IMIC_SPEAKER_TESTSIM, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + VOICERECORD_IMIC = 15, + VOICERECORD_EMIC, + VIDEORECORD_IMIC, + VIDEORECORD_EMIC, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + VOICERECOGNITION_IMIC, + VOICERECOGNITION_EMIC, + FM_IN_SPEAKER, + FM_IN_HEADSET, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + TTY_IN_HCO, + TTY_IN_VCO, + TTY_IN_FULL, + UPLINK_OFF = 29, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + UPLINK_WAKEUP, + POWER_OFF, + SLEEP_WITH_HP_IN, + VOICERECORD_IMIC_PLAYBACK_SPEAKER, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + VOICERECORD_EMIC_PLAYBACK_HEADSET, + VOICERECORD_IMIC_PLAYBACK_HEADSET, +}; +enum aic3254_downlink_mode { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CALL_DOWNLINK_IMIC_RECEIVER = 1, + CALL_DOWNLINK_EMIC_HEADSET, + CALL_DOWNLINK_IMIC_HEADSET, + CALL_DOWNLINK_IMIC_SPEAKER, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CALL_DOWNLINK_IMIC_RECEIVER_DUALMIC, + CALL_DOWNLINK_EMIC_HEADSET_DUALMIC, + CALL_DOWNLINK_IMIC_SPEAKER_DUALMIC, + CALL_DOWNLINK_IMIC_RECIVER_TESTSIM, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CALL_DOWNLINK_EMIC_HEADSET_TESTSIM, + CALL_DOWNLINK_IMIC_SPEAKER_TESTSIM, + PLAYBACK_RECEIVER, + PLAYBACK_HEADSET, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + PLAYBACK_SPEAKER = 13, + RING_HEADSET_SPEAKER, + PLAYBACK_SPEAKER_ALT, + USB_AUDIO, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + FM_OUT_SPEAKER = 21, + FM_OUT_HEADSET, + TTY_OUT_HCO, + TTY_OUT_VCO, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + TTY_OUT_FULL, + MUSE, + HAC, + LPM_IMIC_RECEIVER, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + DOWNLINK_OFF = 29, + DOWNLINK_WAKEUP, +}; +struct aic3254_ctl_ops { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + void (*tx_amp_enable)(int en); + void (*rx_amp_enable)(int en); + int (*panel_sleep_in)(void); + void (*reset_3254)(void); +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + void (*spibus_enable)(int en); + CODEC_SPI_CMD_PARAM *downlink_off; + CODEC_SPI_CMD_PARAM *uplink_off; + CODEC_SPI_CMD_PARAM *downlink_on; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CODEC_SPI_CMD_PARAM *uplink_on; + CODEC_SPI_CMD_PARAM *lb_dsp_init; + CODEC_SPI_CMD_PARAM *lb_downlink_receiver; + CODEC_SPI_CMD_PARAM *lb_downlink_speaker; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CODEC_SPI_CMD_PARAM *lb_downlink_headset; + CODEC_SPI_CMD_PARAM *lb_uplink_imic; + CODEC_SPI_CMD_PARAM *lb_uplink_emic; + CODEC_SPI_CMD_PARAM *lb_receiver_imic; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CODEC_SPI_CMD_PARAM *lb_speaker_imic; + CODEC_SPI_CMD_PARAM *lb_headset_emic; + CODEC_SPI_CMD_PARAM *lb_receiver_bmic; + CODEC_SPI_CMD_PARAM *lb_speaker_bmic; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + CODEC_SPI_CMD_PARAM *lb_headset_bmic; +}; +#endif + diff --git a/include/linux/tpa2051d3.h b/include/linux/tpa2051d3.h new file mode 100644 index 00000000..041ddc18 --- /dev/null +++ b/include/linux/tpa2051d3.h @@ -0,0 +1,77 @@ +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + *** To edit the content of this header, modify the corresponding + *** source file (e.g. under external/kernel-headers/original/) then + *** run bionic/libc/kernel/tools/update_all.py + *** + *** Any manual change here will be lost the next time this script will + *** be run. You've been warned! + *** + **************************************************************************** + ****************************************************************************/ +#ifndef TPA2051D3_H +#define TPA2051D3_H +#include +#define TPA2051D3_I2C_NAME "tpa2051d3" +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SPKR_OUTPUT 0 +#define HEADSET_OUTPUT 1 +#define DUAL_OUTPUT 2 +#define HANDSET_OUTPUT 3 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define BEATS_ON_OUTPUT 4 +#define BEATS_OFF_OUTPUT 5 +#define LINEOUT_OUTPUT 6 +#define MODE_CMD_LEM 9 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +struct tpa2051d3_platform_data { + uint32_t gpio_tpa2051_spk_en; + unsigned char spkr_cmd[7]; + unsigned char hsed_cmd[7]; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned char rece_cmd[7]; + uint32_t gpio_tpa2051_spk_en_cpu; +}; +struct tpa2051_config_data { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned int data_len; + unsigned int mode_num; + unsigned char *cmd_data; +}; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +enum TPA2051_Mode { + TPA2051_MODE_OFF, + TPA2051_MODE_PLAYBACK_SPKR, + TPA2051_MODE_PLAYBACK_HEADSET, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + TPA2051_MODE_RING, + TPA2051_MODE_VOICECALL_SPKR, + TPA2051_MODE_VOICECALL_HEADSET, + TPA2051_MODE_FM_SPKR, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + TPA2051_MODE_FM_HEADSET, + TPA2051_MODE_PLAYBACK_REC, + TPA2051_MODE_VOICECALL_REC, + TPA2051_MODE_PLAYBACK_HEADSET_BEATS_ON, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + TPA2051_MODE_PLAYBACK_HEADSET_BEATS_OFF, + TPA2051_MODE_LINEOUT, + TPA2051_MAX_MODE +}; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define TPA2051_IOCTL_MAGIC 'a' +#define TPA2051_SET_CONFIG _IOW(TPA2051_IOCTL_MAGIC, 0x01, unsigned) +#define TPA2051_READ_CONFIG _IOW(TPA2051_IOCTL_MAGIC, 0x02, unsigned) +#define TPA2051_SET_MODE _IOW(TPA2051_IOCTL_MAGIC, 0x03, unsigned) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define TPA2051_SET_PARAM _IOW(TPA2051_IOCTL_MAGIC, 0x04, unsigned) +#define TPA2051_WRITE_REG _IOW(TPA2051_IOCTL_MAGIC, 0x07, unsigned) +#endif + diff --git a/include/media/Kbuild b/include/media/Kbuild new file mode 100644 index 00000000..75a11b28 --- /dev/null +++ b/include/media/Kbuild @@ -0,0 +1 @@ +header-y += msm_camera.h diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h index a3ba8adc..9dc98ec4 100644 --- a/include/media/msm_camera.h +++ b/include/media/msm_camera.h @@ -1,1135 +1,470 @@ +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + *** To edit the content of this header, modify the corresponding + *** source file (e.g. under external/kernel-headers/original/) then + *** run bionic/libc/kernel/tools/update_all.py + *** + *** Any manual change here will be lost the next time this script will + *** be run. You've been warned! + *** + **************************************************************************** + ****************************************************************************/ #ifndef __LINUX_MSM_CAMERA_H #define __LINUX_MSM_CAMERA_H - #ifdef MSM_CAMERA_BIONIC #include +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #endif #include +#include #include -#include +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #ifdef MSM_CAMERA_GCC #include #else #include +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #endif - +#define MAX_SENSOR_NUM 5 +#define MAX_SENSOR_NAME 32 #define MSM_CAM_IOCTL_MAGIC 'm' - -#define MSM_CAM_IOCTL_GET_SENSOR_INFO \ - _IOR(MSM_CAM_IOCTL_MAGIC, 1, struct msm_camsensor_info *) - -#define MSM_CAM_IOCTL_REGISTER_PMEM \ - _IOW(MSM_CAM_IOCTL_MAGIC, 2, struct msm_pmem_info *) - -#define MSM_CAM_IOCTL_UNREGISTER_PMEM \ - _IOW(MSM_CAM_IOCTL_MAGIC, 3, unsigned) - -#define MSM_CAM_IOCTL_CTRL_COMMAND \ - _IOW(MSM_CAM_IOCTL_MAGIC, 4, struct msm_ctrl_cmd *) - -#define MSM_CAM_IOCTL_CONFIG_VFE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 5, struct msm_camera_vfe_cfg_cmd *) - -#define MSM_CAM_IOCTL_GET_STATS \ - _IOR(MSM_CAM_IOCTL_MAGIC, 6, struct msm_camera_stats_event_ctrl *) - -#define MSM_CAM_IOCTL_GETFRAME \ - _IOR(MSM_CAM_IOCTL_MAGIC, 7, struct msm_camera_get_frame *) - -#define MSM_CAM_IOCTL_ENABLE_VFE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 8, struct camera_enable_cmd *) - -#define MSM_CAM_IOCTL_CTRL_CMD_DONE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 9, struct camera_cmd *) - -#define MSM_CAM_IOCTL_CONFIG_CMD \ - _IOW(MSM_CAM_IOCTL_MAGIC, 10, struct camera_cmd *) - -#define MSM_CAM_IOCTL_DISABLE_VFE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 11, struct camera_enable_cmd *) - -#define MSM_CAM_IOCTL_PAD_REG_RESET2 \ - _IOW(MSM_CAM_IOCTL_MAGIC, 12, struct camera_enable_cmd *) - -#define MSM_CAM_IOCTL_VFE_APPS_RESET \ - _IOW(MSM_CAM_IOCTL_MAGIC, 13, struct camera_enable_cmd *) - -#define MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER \ - _IOW(MSM_CAM_IOCTL_MAGIC, 14, struct camera_enable_cmd *) - -#define MSM_CAM_IOCTL_RELEASE_STATS_BUFFER \ - _IOW(MSM_CAM_IOCTL_MAGIC, 15, struct msm_stats_buf *) - -#define MSM_CAM_IOCTL_AXI_CONFIG \ - _IOW(MSM_CAM_IOCTL_MAGIC, 16, struct msm_camera_vfe_cfg_cmd *) - -#define MSM_CAM_IOCTL_GET_PICTURE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 17, struct msm_frame *) - -#define MSM_CAM_IOCTL_SET_CROP \ - _IOW(MSM_CAM_IOCTL_MAGIC, 18, struct crop_info *) - -#define MSM_CAM_IOCTL_PICT_PP \ - _IOW(MSM_CAM_IOCTL_MAGIC, 19, uint8_t *) - -#define MSM_CAM_IOCTL_PICT_PP_DONE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 20, struct msm_snapshot_pp_status *) - -#define MSM_CAM_IOCTL_SENSOR_IO_CFG \ - _IOW(MSM_CAM_IOCTL_MAGIC, 21, struct sensor_cfg_data *) - -#define MSM_CAM_IOCTL_FLASH_LED_CFG \ - _IOW(MSM_CAM_IOCTL_MAGIC, 22, unsigned *) - -#define MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME \ - _IO(MSM_CAM_IOCTL_MAGIC, 23) - -#define MSM_CAM_IOCTL_CTRL_COMMAND_2 \ - _IOW(MSM_CAM_IOCTL_MAGIC, 24, struct msm_ctrl_cmd *) - -#define MSM_CAM_IOCTL_AF_CTRL \ - _IOR(MSM_CAM_IOCTL_MAGIC, 25, struct msm_ctrl_cmt_t *) - -#define MSM_CAM_IOCTL_AF_CTRL_DONE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 26, struct msm_ctrl_cmt_t *) - -#define MSM_CAM_IOCTL_CONFIG_VPE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 27, struct msm_camera_vpe_cfg_cmd *) - -#define MSM_CAM_IOCTL_AXI_VPE_CONFIG \ - _IOW(MSM_CAM_IOCTL_MAGIC, 28, struct msm_camera_vpe_cfg_cmd *) - -#define MSM_CAM_IOCTL_STROBE_FLASH_CFG \ - _IOW(MSM_CAM_IOCTL_MAGIC, 29, uint32_t *) - -#define MSM_CAM_IOCTL_STROBE_FLASH_CHARGE \ - _IOW(MSM_CAM_IOCTL_MAGIC, 30, uint32_t *) - -#define MSM_CAM_IOCTL_STROBE_FLASH_RELEASE \ - _IO(MSM_CAM_IOCTL_MAGIC, 31) - -#define MSM_CAM_IOCTL_FLASH_CTRL \ - _IOW(MSM_CAM_IOCTL_MAGIC, 32, struct flash_ctrl_data *) - -#define MSM_CAM_IOCTL_ERROR_CONFIG \ - _IOW(MSM_CAM_IOCTL_MAGIC, 33, uint32_t *) - -#define MSM_CAM_IOCTL_ABORT_CAPTURE \ - _IO(MSM_CAM_IOCTL_MAGIC, 34) - -#define MSM_CAM_IOCTL_SET_FD_ROI \ - _IOW(MSM_CAM_IOCTL_MAGIC, 35, struct fd_roi_info *) - -#define MSM_CAM_IOCTL_GET_CAMERA_INFO \ - _IOR(MSM_CAM_IOCTL_MAGIC, 36, struct msm_camera_info *) - -#define MSM_CAM_IOCTL_UNBLOCK_POLL_PIC_FRAME \ - _IO(MSM_CAM_IOCTL_MAGIC, 37) - -#define MSM_CAM_IOCTL_RELEASE_PIC_BUFFER \ - _IOW(MSM_CAM_IOCTL_MAGIC, 38, struct camera_enable_cmd *) - -#define MSM_CAM_IOCTL_PUT_ST_FRAME \ - _IOW(MSM_CAM_IOCTL_MAGIC, 39, struct msm_camera_st_frame *) - -#define MSM_CAM_IOCTL_GET_CONFIG_INFO \ - _IOR(MSM_CAM_IOCTL_MAGIC, 40, struct msm_cam_config_dev_info *) - -#define MSM_CAM_IOCTL_V4L2_EVT_NOTIFY \ - _IOR(MSM_CAM_IOCTL_MAGIC, 41, struct v4l2_event *) - -#define MSM_CAM_IOCTL_SET_MEM_MAP_INFO \ - _IOR(MSM_CAM_IOCTL_MAGIC, 42, struct msm_mem_map_info *) - -#define MSM_CAM_IOCTL_ACTUATOR_IO_CFG \ - _IOW(MSM_CAM_IOCTL_MAGIC, 43, struct msm_actuator_cfg_data *) - -#define MSM_CAM_IOCTL_MCTL_POST_PROC \ - _IOW(MSM_CAM_IOCTL_MAGIC, 44, struct msm_mctl_post_proc_cmd *) - -#define MSM_CAM_IOCTL_RESERVE_FREE_FRAME \ - _IOW(MSM_CAM_IOCTL_MAGIC, 45, struct msm_cam_evt_divert_frame *) - -#define MSM_CAM_IOCTL_RELEASE_FREE_FRAME \ - _IOR(MSM_CAM_IOCTL_MAGIC, 46, struct msm_cam_evt_divert_frame *) - -struct msm_mctl_pp_cmd { - int32_t id; - uint16_t length; - void *value; -}; - -struct msm_mctl_post_proc_cmd { - int32_t type; - struct msm_mctl_pp_cmd cmd; -}; - -#define MSM_CAMERA_LED_OFF 0 -#define MSM_CAMERA_LED_LOW 1 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_GET_SENSOR_INFO _IOR(MSM_CAM_IOCTL_MAGIC, 1, struct msm_camsensor_info *) +#define MSM_CAM_IOCTL_REGISTER_PMEM _IOW(MSM_CAM_IOCTL_MAGIC, 2, struct msm_pmem_info *) +#define MSM_CAM_IOCTL_UNREGISTER_PMEM _IOW(MSM_CAM_IOCTL_MAGIC, 3, unsigned) +#define MSM_CAM_IOCTL_CTRL_COMMAND _IOW(MSM_CAM_IOCTL_MAGIC, 4, struct msm_ctrl_cmd *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_CONFIG_VFE _IOW(MSM_CAM_IOCTL_MAGIC, 5, struct msm_camera_vfe_cfg_cmd *) +#define MSM_CAM_IOCTL_GET_STATS _IOR(MSM_CAM_IOCTL_MAGIC, 6, struct msm_camera_stats_event_ctrl *) +#define MSM_CAM_IOCTL_GETFRAME _IOR(MSM_CAM_IOCTL_MAGIC, 7, struct msm_camera_get_frame *) +#define MSM_CAM_IOCTL_ENABLE_VFE _IOW(MSM_CAM_IOCTL_MAGIC, 8, struct camera_enable_cmd *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_CTRL_CMD_DONE _IOW(MSM_CAM_IOCTL_MAGIC, 9, struct camera_cmd *) +#define MSM_CAM_IOCTL_CONFIG_CMD _IOW(MSM_CAM_IOCTL_MAGIC, 10, struct camera_cmd *) +#define MSM_CAM_IOCTL_DISABLE_VFE _IOW(MSM_CAM_IOCTL_MAGIC, 11, struct camera_enable_cmd *) +#define MSM_CAM_IOCTL_PAD_REG_RESET2 _IOW(MSM_CAM_IOCTL_MAGIC, 12, struct camera_enable_cmd *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_VFE_APPS_RESET _IOW(MSM_CAM_IOCTL_MAGIC, 13, struct camera_enable_cmd *) +#define MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER _IOW(MSM_CAM_IOCTL_MAGIC, 14, struct camera_enable_cmd *) +#define MSM_CAM_IOCTL_RELEASE_STATS_BUFFER _IOW(MSM_CAM_IOCTL_MAGIC, 15, struct msm_stats_buf *) +#define MSM_CAM_IOCTL_AXI_CONFIG _IOW(MSM_CAM_IOCTL_MAGIC, 16, struct msm_camera_vfe_cfg_cmd *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_GET_PICTURE _IOW(MSM_CAM_IOCTL_MAGIC, 17, struct msm_camera_ctrl_cmd *) +#define MSM_CAM_IOCTL_SET_CROP _IOW(MSM_CAM_IOCTL_MAGIC, 18, struct crop_info *) +#define MSM_CAM_IOCTL_PICT_PP _IOW(MSM_CAM_IOCTL_MAGIC, 19, uint8_t *) +#define MSM_CAM_IOCTL_PICT_PP_DONE _IOW(MSM_CAM_IOCTL_MAGIC, 20, struct msm_snapshot_pp_status *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_SENSOR_IO_CFG _IOW(MSM_CAM_IOCTL_MAGIC, 21, struct sensor_cfg_data *) +#define MSM_CAM_IOCTL_FLASH_LED_CFG _IOW(MSM_CAM_IOCTL_MAGIC, 22, unsigned *) +#define MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME _IO(MSM_CAM_IOCTL_MAGIC, 23) +#define MSM_CAM_IOCTL_CTRL_COMMAND_2 _IOW(MSM_CAM_IOCTL_MAGIC, 24, struct msm_ctrl_cmd *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_AF_CTRL _IOR(MSM_CAM_IOCTL_MAGIC, 25, struct msm_ctrl_cmt_t *) +#define MSM_CAM_IOCTL_AF_CTRL_DONE _IOW(MSM_CAM_IOCTL_MAGIC, 26, struct msm_ctrl_cmt_t *) +#define MSM_CAM_IOCTL_CONFIG_VPE _IOW(MSM_CAM_IOCTL_MAGIC, 27, struct msm_camera_vpe_cfg_cmd *) +#define MSM_CAM_IOCTL_AXI_VPE_CONFIG _IOW(MSM_CAM_IOCTL_MAGIC, 28, struct msm_camera_vpe_cfg_cmd *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_STROBE_FLASH_CFG _IOW(MSM_CAM_IOCTL_MAGIC, 29, uint32_t *) +#define MSM_CAM_IOCTL_STROBE_FLASH_CHARGE _IOW(MSM_CAM_IOCTL_MAGIC, 30, uint32_t *) +#define MSM_CAM_IOCTL_STROBE_FLASH_RELEASE _IO(MSM_CAM_IOCTL_MAGIC, 31) +#define MSM_CAM_IOCTL_FLASH_CTRL _IOW(MSM_CAM_IOCTL_MAGIC, 32, struct flash_ctrl_data *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_ERROR_CONFIG _IOW(MSM_CAM_IOCTL_MAGIC, 33, uint32_t *) +#define MSM_CAM_IOCTL_ABORT_CAPTURE _IO(MSM_CAM_IOCTL_MAGIC, 34) +#define MSM_CAM_IOCTL_SET_FD_ROI _IOW(MSM_CAM_IOCTL_MAGIC, 35, struct fd_roi_info *) +#define MSM_CAM_IOCTL_GET_CAMERA_INFO _IOR(MSM_CAM_IOCTL_MAGIC, 36, struct msm_camera_info *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_IOCTL_PUT_ST_FRAME _IOW(MSM_CAM_IOCTL_MAGIC, 37, struct msm_camera_st_frame *) +#define MSM_CAM_IOCTL_SET_CONFIG_CAMERA_ZSL _IOW(MSM_CAM_IOCTL_MAGIC, 41, bool *) +#define MSM_CAM_IOCTL_EFFECT_STATE_CFG _IOW(MSM_CAM_IOCTL_MAGIC, 43, int32_t *) +#define MSM_CAMERA_LED_OFF 0 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAMERA_LED_LOW 1 #define MSM_CAMERA_LED_HIGH 2 -#define MSM_CAMERA_LED_INIT 3 -#define MSM_CAMERA_LED_RELEASE 4 - #define MSM_CAMERA_STROBE_FLASH_NONE 0 #define MSM_CAMERA_STROBE_FLASH_XENON 1 - -#define MSM_MAX_CAMERA_SENSORS 5 -#define MAX_SENSOR_NAME 32 - -#define MSM_MAX_CAMERA_CONFIGS 2 - -#define PP_SNAP 0x01 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define PP_SNAP 0x01 #define PP_RAW_SNAP ((0x01)<<1) -#define PP_PREV ((0x01)<<2) -#define PP_MASK (PP_SNAP|PP_RAW_SNAP|PP_PREV) - -#define MSM_CAM_CTRL_CMD_DONE 0 +#define PP_PREV ((0x01)<<2) +#define PP_MASK (PP_SNAP|PP_RAW_SNAP|PP_PREV) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_CTRL_CMD_DONE 0 #define MSM_CAM_SENSOR_VFE_CMD 1 - -/* Should be same as VIDEO_MAX_PLANES in videodev2.h */ -#define MAX_PLANES 8 - -/***************************************************** - * structure - *****************************************************/ - -/* define five type of structures for userspace <==> kernel - * space communication: - * command 1 - 2 are from userspace ==> kernel - * command 3 - 4 are from kernel ==> userspace - * - * 1. control command: control command(from control thread), - * control status (from config thread); - */ struct msm_ctrl_cmd { - uint16_t type; - uint16_t length; - void *value; - uint16_t status; - uint32_t timeout_ms; - int resp_fd; /* FIXME: to be used by the kernel, pass-through for now */ - int vnode_id; /* video dev id. Can we overload resp_fd? */ - uint32_t stream_type; /* used to pass value to qcamera server */ - int config_ident; /*used as identifier for config node*/ + uint16_t type; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint16_t length; + void *value; + uint16_t status; + uint32_t timeout_ms; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int resp_fd; }; - struct msm_cam_evt_msg { - unsigned short type; /* 1 == event (RPC), 0 == message (adsp) */ - unsigned short msg_id; - unsigned int len; /* size in, number of bytes out */ - uint32_t frame_id; - void *data; -}; - -struct msm_pp_frame_sp { - /* phy addr of the buffer */ - unsigned long phy_addr; - uint32_t y_off; - uint32_t cbcr_off; - /* buffer length */ - uint32_t length; - int32_t fd; - uint32_t addr_offset; - /* mapped addr */ - unsigned long vaddr; -}; - -struct msm_pp_frame_mp { - /* phy addr of the plane */ - unsigned long phy_addr; - /* offset of plane data */ - uint32_t data_offset; - /* plane length */ - uint32_t length; - int32_t fd; - uint32_t addr_offset; - /* mapped addr */ - unsigned long vaddr; -}; - -struct msm_pp_frame { - uint32_t handle; /* stores vb cookie */ - uint32_t frame_id; - int path; - unsigned short image_type; - unsigned short num_planes; /* 1 for sp */ - struct timeval timestamp; - union { - struct msm_pp_frame_sp sp; - struct msm_pp_frame_mp mp[MAX_PLANES]; - }; -}; - -struct msm_cam_evt_divert_frame { - unsigned short image_mode; - unsigned short op_mode; - unsigned short inst_idx; - unsigned short node_idx; - struct msm_pp_frame frame; - int do_pp; -}; - -struct msm_mctl_pp_cmd_ack_event { - uint32_t cmd; /* VPE_CMD_ZOOM? */ - int status; /* 0 done, < 0 err */ - uint32_t cookie; /* daemon's cookie */ -}; - -struct msm_mctl_pp_event_info { - int32_t event; - union { - struct msm_mctl_pp_cmd_ack_event ack; - }; -}; - -struct msm_isp_event_ctrl { - unsigned short resptype; - union { - struct msm_cam_evt_msg isp_msg; - struct msm_ctrl_cmd ctrl; - struct msm_cam_evt_divert_frame div_frame; - struct msm_mctl_pp_event_info pp_event_info; - } isp_data; -}; - -#define MSM_CAM_RESP_CTRL 0 -#define MSM_CAM_RESP_STAT_EVT_MSG 1 -#define MSM_CAM_RESP_STEREO_OP_1 2 -#define MSM_CAM_RESP_STEREO_OP_2 3 -#define MSM_CAM_RESP_V4L2 4 -#define MSM_CAM_RESP_DIV_FRAME_EVT_MSG 5 -#define MSM_CAM_RESP_DONE_EVENT 6 -#define MSM_CAM_RESP_MCTL_PP_EVENT 7 -#define MSM_CAM_RESP_MAX 8 - -#define MSM_CAM_APP_NOTIFY_EVENT 0 - -/* this one is used to send ctrl/status up to config thread */ - + unsigned short type; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned short msg_id; + unsigned int len; + uint32_t frame_id; + void *data; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +struct msm_isp_evt_msg { + unsigned short type; + unsigned short msg_id; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned int len; + uint8_t data[48]; +}; +struct msm_vpe_evt_msg { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned short type; + unsigned short msg_id; + unsigned int len; + uint32_t frame_id; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + void *data; +}; +struct msm_isp_stats_event_ctrl { + unsigned short resptype; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + union { + struct msm_isp_evt_msg isp_msg; + struct msm_ctrl_cmd ctrl; + } isp_data; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +#define MSM_CAM_RESP_CTRL 0 +#define MSM_CAM_RESP_STAT_EVT_MSG 1 +#define MSM_CAM_RESP_STEREO_OP_1 2 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_CAM_RESP_STEREO_OP_2 3 +#define MSM_CAM_RESP_V4L2 4 +#define MSM_CAM_RESP_MAX 5 struct msm_stats_event_ctrl { - /* 0 - ctrl_cmd from control thread, - * 1 - stats/event kernel, - * 2 - V4L control or read request */ - int resptype; - int timeout_ms; - struct msm_ctrl_cmd ctrl_cmd; - /* struct vfe_event_t stats_event; */ - struct msm_cam_evt_msg stats_event; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int resptype; + int timeout_ms; + struct msm_ctrl_cmd ctrl_cmd; + struct msm_cam_evt_msg stats_event; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; - -/* 2. config command: config command(from config thread); */ struct msm_camera_cfg_cmd { - /* what to config: - * 1 - sensor config, 2 - vfe config */ - uint16_t cfg_type; - - /* sensor config type */ - uint16_t cmd_type; - uint16_t queue; - uint16_t length; - void *value; -}; - -#define CMD_GENERAL 0 -#define CMD_AXI_CFG_OUT1 1 -#define CMD_AXI_CFG_SNAP_O1_AND_O2 2 -#define CMD_AXI_CFG_OUT2 3 -#define CMD_PICT_T_AXI_CFG 4 -#define CMD_PICT_M_AXI_CFG 5 -#define CMD_RAW_PICT_AXI_CFG 6 - -#define CMD_FRAME_BUF_RELEASE 7 -#define CMD_PREV_BUF_CFG 8 -#define CMD_SNAP_BUF_RELEASE 9 -#define CMD_SNAP_BUF_CFG 10 -#define CMD_STATS_DISABLE 11 -#define CMD_STATS_AEC_AWB_ENABLE 12 -#define CMD_STATS_AF_ENABLE 13 -#define CMD_STATS_AEC_ENABLE 14 -#define CMD_STATS_AWB_ENABLE 15 -#define CMD_STATS_ENABLE 16 - -#define CMD_STATS_AXI_CFG 17 -#define CMD_STATS_AEC_AXI_CFG 18 -#define CMD_STATS_AF_AXI_CFG 19 -#define CMD_STATS_AWB_AXI_CFG 20 -#define CMD_STATS_RS_AXI_CFG 21 -#define CMD_STATS_CS_AXI_CFG 22 -#define CMD_STATS_IHIST_AXI_CFG 23 -#define CMD_STATS_SKIN_AXI_CFG 24 - -#define CMD_STATS_BUF_RELEASE 25 -#define CMD_STATS_AEC_BUF_RELEASE 26 -#define CMD_STATS_AF_BUF_RELEASE 27 -#define CMD_STATS_AWB_BUF_RELEASE 28 -#define CMD_STATS_RS_BUF_RELEASE 29 -#define CMD_STATS_CS_BUF_RELEASE 30 -#define CMD_STATS_IHIST_BUF_RELEASE 31 -#define CMD_STATS_SKIN_BUF_RELEASE 32 - -#define UPDATE_STATS_INVALID 33 -#define CMD_AXI_CFG_SNAP_GEMINI 34 -#define CMD_AXI_CFG_SNAP 35 -#define CMD_AXI_CFG_PREVIEW 36 -#define CMD_AXI_CFG_VIDEO 37 - + uint16_t cfg_type; + uint16_t cmd_type; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint16_t queue; + uint16_t length; + void *value; +}; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_GENERAL 0 +#define CMD_AXI_CFG_OUT1 1 +#define CMD_AXI_CFG_SNAP_O1_AND_O2 2 +#define CMD_AXI_CFG_OUT2 3 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_PICT_T_AXI_CFG 4 +#define CMD_PICT_M_AXI_CFG 5 +#define CMD_RAW_PICT_AXI_CFG 6 +#define CMD_FRAME_BUF_RELEASE 7 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_PREV_BUF_CFG 8 +#define CMD_SNAP_BUF_RELEASE 9 +#define CMD_SNAP_BUF_CFG 10 +#define CMD_STATS_DISABLE 11 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_STATS_AEC_AWB_ENABLE 12 +#define CMD_STATS_AF_ENABLE 13 +#define CMD_STATS_AEC_ENABLE 14 +#define CMD_STATS_AWB_ENABLE 15 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_STATS_ENABLE 16 +#define CMD_STATS_AXI_CFG 17 +#define CMD_STATS_AEC_AXI_CFG 18 +#define CMD_STATS_AF_AXI_CFG 19 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_STATS_AWB_AXI_CFG 20 +#define CMD_STATS_RS_AXI_CFG 21 +#define CMD_STATS_CS_AXI_CFG 22 +#define CMD_STATS_IHIST_AXI_CFG 23 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_STATS_SKIN_AXI_CFG 24 +#define CMD_STATS_BUF_RELEASE 25 +#define CMD_STATS_AEC_BUF_RELEASE 26 +#define CMD_STATS_AF_BUF_RELEASE 27 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_STATS_AWB_BUF_RELEASE 28 +#define CMD_STATS_RS_BUF_RELEASE 29 +#define CMD_STATS_CS_BUF_RELEASE 30 +#define CMD_STATS_IHIST_BUF_RELEASE 31 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_STATS_SKIN_BUF_RELEASE 32 +#define UPDATE_STATS_INVALID 33 +#define CMD_AXI_CFG_SNAP_GEMINI 34 +#define CMD_AXI_CFG_SNAP 35 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_AXI_CFG_PREVIEW 36 +#define CMD_AXI_CFG_VIDEO 37 #define CMD_STATS_IHIST_ENABLE 38 #define CMD_STATS_RS_ENABLE 39 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #define CMD_STATS_CS_ENABLE 40 #define CMD_VPE 41 #define CMD_AXI_CFG_VPE 42 -#define CMD_AXI_CFG_ZSL 43 -#define CMD_AXI_CFG_SNAP_VPE 44 -#define CMD_AXI_CFG_SNAP_THUMB_VPE 45 -#define CMD_CONFIG_PING_ADDR 46 -#define CMD_CONFIG_PONG_ADDR 47 -#define CMD_CONFIG_FREE_BUF_ADDR 48 - -/* vfe config command: config command(from config thread)*/ +#define CMD_AXI_CFG_SNAP_VPE 43 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define CMD_AXI_CFG_SNAP_THUMB_VPE 44 struct msm_vfe_cfg_cmd { - int cmd_type; - uint16_t length; - void *value; + int cmd_type; + uint16_t length; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + void *value; }; - struct msm_vpe_cfg_cmd { - int cmd_type; - uint16_t length; - void *value; + int cmd_type; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint16_t length; + void *value; }; - #define MAX_CAMERA_ENABLE_NAME_LEN 32 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ struct camera_enable_cmd { - char name[MAX_CAMERA_ENABLE_NAME_LEN]; -}; - -#define MSM_PMEM_OUTPUT1 0 -#define MSM_PMEM_OUTPUT2 1 -#define MSM_PMEM_OUTPUT1_OUTPUT2 2 -#define MSM_PMEM_THUMBNAIL 3 -#define MSM_PMEM_MAINIMG 4 -#define MSM_PMEM_RAW_MAINIMG 5 -#define MSM_PMEM_AEC_AWB 6 -#define MSM_PMEM_AF 7 -#define MSM_PMEM_AEC 8 -#define MSM_PMEM_AWB 9 -#define MSM_PMEM_RS 10 -#define MSM_PMEM_CS 11 -#define MSM_PMEM_IHIST 12 -#define MSM_PMEM_SKIN 13 -#define MSM_PMEM_VIDEO 14 -#define MSM_PMEM_PREVIEW 15 -#define MSM_PMEM_VIDEO_VPE 16 -#define MSM_PMEM_C2D 17 -#define MSM_PMEM_MAINIMG_VPE 18 -#define MSM_PMEM_THUMBNAIL_VPE 19 -#define MSM_PMEM_MAX 20 - -#define STAT_AEAW 0 -#define STAT_AEC 1 -#define STAT_AF 2 -#define STAT_AWB 3 -#define STAT_RS 4 -#define STAT_CS 5 -#define STAT_IHIST 6 -#define STAT_SKIN 7 -#define STAT_MAX 8 - -#define FRAME_PREVIEW_OUTPUT1 0 -#define FRAME_PREVIEW_OUTPUT2 1 -#define FRAME_SNAPSHOT 2 -#define FRAME_THUMBNAIL 3 -#define FRAME_RAW_SNAPSHOT 4 -#define FRAME_MAX 5 - + char name[MAX_CAMERA_ENABLE_NAME_LEN]; +}; +#define MSM_PMEM_OUTPUT1 0 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_PMEM_OUTPUT2 1 +#define MSM_PMEM_OUTPUT1_OUTPUT2 2 +#define MSM_PMEM_THUMBNAIL 3 +#define MSM_PMEM_MAINIMG 4 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_PMEM_RAW_MAINIMG 5 +#define MSM_PMEM_AEC_AWB 6 +#define MSM_PMEM_AF 7 +#define MSM_PMEM_AEC 8 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_PMEM_AWB 9 +#define MSM_PMEM_RS 10 +#define MSM_PMEM_CS 11 +#define MSM_PMEM_IHIST 12 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_PMEM_SKIN 13 +#define MSM_PMEM_VIDEO 14 +#define MSM_PMEM_PREVIEW 15 +#define MSM_PMEM_VIDEO_VPE 16 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_PMEM_C2D 17 +#define MSM_PMEM_MAINIMG_VPE 18 +#define MSM_PMEM_THUMBNAIL_VPE 19 +#define MSM_PMEM_MAX 20 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define STAT_AEAW 0 +#define STAT_AEC 1 +#define STAT_AF 2 +#define STAT_AWB 3 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define STAT_RS 4 +#define STAT_CS 5 +#define STAT_IHIST 6 +#define STAT_SKIN 7 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define STAT_MAX 8 +#define FRAME_PREVIEW_OUTPUT1 0 +#define FRAME_PREVIEW_OUTPUT2 1 +#define FRAME_SNAPSHOT 2 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define FRAME_THUMBNAIL 3 +#define FRAME_RAW_SNAPSHOT 4 +#define FRAME_MAX 5 struct msm_pmem_info { - int type; - int fd; - void *vaddr; - uint32_t offset; - uint32_t len; - uint32_t y_off; - uint32_t cbcr_off; - uint8_t active; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int type; + int fd; + void *vaddr; + uint32_t offset; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint32_t len; + uint32_t y_off; + uint32_t cbcr_off; + uint8_t active; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; - struct outputCfg { - uint32_t height; - uint32_t width; - - uint32_t window_height_firstline; - uint32_t window_height_lastline; -}; - -#define OUTPUT_1 0 -#define OUTPUT_2 1 -#define OUTPUT_1_AND_2 2 /* snapshot only */ -#define OUTPUT_1_AND_3 3 /* video */ + uint32_t height; + uint32_t width; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint32_t window_height_firstline; + uint32_t window_height_lastline; +}; +#define OUTPUT_1 0 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define OUTPUT_2 1 +#define OUTPUT_1_AND_2 2 +#define OUTPUT_1_AND_3 3 #define CAMIF_TO_AXI_VIA_OUTPUT_2 4 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #define OUTPUT_1_AND_CAMIF_TO_AXI_VIA_OUTPUT_2 5 #define OUTPUT_2_AND_CAMIF_TO_AXI_VIA_OUTPUT_1 6 -#define OUTPUT_1_2_AND_3 7 -#define LAST_AXI_OUTPUT_MODE_ENUM = OUTPUT_1_2_AND_3 7 - -#define MSM_FRAME_PREV_1 0 -#define MSM_FRAME_PREV_2 1 -#define MSM_FRAME_ENC 2 - -#define OUTPUT_TYPE_P (1<<0) -#define OUTPUT_TYPE_T (1<<1) -#define OUTPUT_TYPE_S (1<<2) -#define OUTPUT_TYPE_V (1<<3) -#define OUTPUT_TYPE_L (1<<4) +#define LAST_AXI_OUTPUT_MODE_ENUM = OUTPUT_2_AND_CAMIF_TO_AXI_VIA_OUTPUT_1 7 +#define MSM_FRAME_PREV_1 0 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_FRAME_PREV_2 1 +#define MSM_FRAME_ENC 2 +#define OUTPUT_TYPE_P (1<<0) +#define OUTPUT_TYPE_T (1<<1) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define OUTPUT_TYPE_S (1<<2) +#define OUTPUT_TYPE_V (1<<3) +#define OUTPUT_TYPE_L (1<<4) #define OUTPUT_TYPE_ST_L (1<<5) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #define OUTPUT_TYPE_ST_R (1<<6) #define OUTPUT_TYPE_ST_D (1<<7) - struct fd_roi_info { - void *info; - int info_len; + void *info; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int info_len; }; - -struct msm_mem_map_info { - uint32_t cookie; - uint32_t length; - uint32_t mem_type; -}; - -#define MSM_MEM_MMAP 0 -#define MSM_MEM_USERPTR 1 -#define MSM_PLANE_MAX 8 -#define MSM_PLANE_Y 0 -#define MSM_PLANE_UV 1 - struct msm_frame { - struct timespec ts; - int path; - int type; - unsigned long buffer; - uint32_t phy_offset; - uint32_t y_off; - uint32_t cbcr_off; - int fd; - - void *cropinfo; - int croplen; - uint32_t error_code; - struct fd_roi_info roi_info; - uint32_t frame_id; - int stcam_quality_ind; - uint32_t stcam_conv_value; + struct timespec ts; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int path; + int type; + unsigned long buffer; + uint32_t phy_offset; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint32_t y_off; + uint32_t cbcr_off; + int fd; + void *cropinfo; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int croplen; + uint32_t error_code; + struct fd_roi_info roi_info; + uint32_t frame_id; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; - enum msm_st_frame_packing { - SIDE_BY_SIDE_HALF, - SIDE_BY_SIDE_FULL, - TOP_DOWN_HALF, - TOP_DOWN_FULL, + SIDE_BY_SIDE_HALF, + SIDE_BY_SIDE_FULL, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + TOP_DOWN_HALF, + TOP_DOWN_FULL, }; - struct msm_st_crop { - uint32_t in_w; - uint32_t in_h; - uint32_t out_w; - uint32_t out_h; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint32_t in_w; + uint32_t in_h; + uint32_t out_w; + uint32_t out_h; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; - struct msm_st_half { - uint32_t buf_y_off; - uint32_t buf_cbcr_off; - uint32_t buf_y_stride; - uint32_t buf_cbcr_stride; - uint32_t pix_x_off; - uint32_t pix_y_off; - struct msm_st_crop stCropInfo; + uint32_t buf_y_off; + uint32_t buf_cbcr_off; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint32_t buf_y_stride; + uint32_t buf_cbcr_stride; + uint32_t pix_x_off; + uint32_t pix_y_off; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct msm_st_crop stCropInfo; }; - struct msm_st_frame { - struct msm_frame buf_info; - int type; - enum msm_st_frame_packing packing; - struct msm_st_half L; - struct msm_st_half R; - int frame_id; + struct msm_frame buf_info; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int type; + enum msm_st_frame_packing packing; + struct msm_st_half L; + struct msm_st_half R; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int frame_id; }; - #define MSM_CAMERA_ERR_MASK (0xFFFFFFFF & 1) - -struct stats_buff { - unsigned long buff; - int fd; -}; - struct msm_stats_buf { - uint8_t awb_ymin; - struct stats_buff aec; - struct stats_buff awb; - struct stats_buff af; - struct stats_buff ihist; - struct stats_buff rs; - struct stats_buff cs; - struct stats_buff skin; - int type; - uint32_t status_bits; - unsigned long buffer; - int fd; - uint32_t frame_id; -}; -#define MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT 0 -/* video capture mode in VIDIOC_S_PARM */ -#define MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW \ - (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+1) -/* extendedmode for video recording in VIDIOC_S_PARM */ -#define MSM_V4L2_EXT_CAPTURE_MODE_VIDEO \ - (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+2) -/* extendedmode for the full size main image in VIDIOC_S_PARM */ -#define MSM_V4L2_EXT_CAPTURE_MODE_MAIN (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+3) -/* extendedmode for the thumb nail image in VIDIOC_S_PARM */ -#define MSM_V4L2_EXT_CAPTURE_MODE_THUMBNAIL \ - (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+4) -#define MSM_V4L2_EXT_CAPTURE_MODE_RAW \ - (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+5) -#define MSM_V4L2_EXT_CAPTURE_MODE_MAX (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+6) - - -#define MSM_V4L2_PID_MOTION_ISO V4L2_CID_PRIVATE_BASE -#define MSM_V4L2_PID_EFFECT (V4L2_CID_PRIVATE_BASE+1) -#define MSM_V4L2_PID_HJR (V4L2_CID_PRIVATE_BASE+2) -#define MSM_V4L2_PID_LED_MODE (V4L2_CID_PRIVATE_BASE+3) -#define MSM_V4L2_PID_PREP_SNAPSHOT (V4L2_CID_PRIVATE_BASE+4) -#define MSM_V4L2_PID_EXP_METERING (V4L2_CID_PRIVATE_BASE+5) -#define MSM_V4L2_PID_ISO (V4L2_CID_PRIVATE_BASE+6) -#define MSM_V4L2_PID_CAM_MODE (V4L2_CID_PRIVATE_BASE+7) -#define MSM_V4L2_PID_LUMA_ADAPTATION (V4L2_CID_PRIVATE_BASE+8) -#define MSM_V4L2_PID_BEST_SHOT (V4L2_CID_PRIVATE_BASE+9) -#define MSM_V4L2_PID_FOCUS_MODE (V4L2_CID_PRIVATE_BASE+10) -#define MSM_V4L2_PID_BL_DETECTION (V4L2_CID_PRIVATE_BASE+11) -#define MSM_V4L2_PID_SNOW_DETECTION (V4L2_CID_PRIVATE_BASE+12) -#define MSM_V4L2_PID_CTRL_CMD (V4L2_CID_PRIVATE_BASE+13) -#define MSM_V4L2_PID_EVT_SUB_INFO (V4L2_CID_PRIVATE_BASE+14) -#define MSM_V4L2_PID_STROBE_FLASH (V4L2_CID_PRIVATE_BASE+15) -#define MSM_V4L2_PID_MMAP_ENTRY (V4L2_CID_PRIVATE_BASE+16) -#define MSM_V4L2_PID_MMAP_INST (V4L2_CID_PRIVATE_BASE+17) -#define MSM_V4L2_PID_MAX MSM_V4L2_PID_MMAP_INST - -/* camera operation mode for video recording - two frame output queues */ -#define MSM_V4L2_CAM_OP_DEFAULT 0 -/* camera operation mode for video recording - two frame output queues */ -#define MSM_V4L2_CAM_OP_PREVIEW (MSM_V4L2_CAM_OP_DEFAULT+1) -/* camera operation mode for video recording - two frame output queues */ -#define MSM_V4L2_CAM_OP_VIDEO (MSM_V4L2_CAM_OP_DEFAULT+2) -/* camera operation mode for standard shapshot - two frame output queues */ -#define MSM_V4L2_CAM_OP_CAPTURE (MSM_V4L2_CAM_OP_DEFAULT+3) -/* camera operation mode for zsl shapshot - three output queues */ -#define MSM_V4L2_CAM_OP_ZSL (MSM_V4L2_CAM_OP_DEFAULT+4) -/* camera operation mode for raw snapshot - one frame output queue */ -#define MSM_V4L2_CAM_OP_RAW (MSM_V4L2_CAM_OP_DEFAULT+5) - -#define MSM_V4L2_VID_CAP_TYPE 0 -#define MSM_V4L2_STREAM_ON 1 -#define MSM_V4L2_STREAM_OFF 2 -#define MSM_V4L2_SNAPSHOT 3 -#define MSM_V4L2_QUERY_CTRL 4 -#define MSM_V4L2_GET_CTRL 5 -#define MSM_V4L2_SET_CTRL 6 -#define MSM_V4L2_QUERY 7 -#define MSM_V4L2_GET_CROP 8 -#define MSM_V4L2_SET_CROP 9 -#define MSM_V4L2_OPEN 10 -#define MSM_V4L2_CLOSE 11 -#define MSM_V4L2_SET_CTRL_CMD 12 -#define MSM_V4L2_EVT_SUB_MASK 13 -#define MSM_V4L2_MAX 14 -#define V4L2_CAMERA_EXIT 43 - +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int type; + unsigned long buffer; + int fd; + uint32_t frame_id; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +#define MSM_V4L2_VID_CAP_TYPE 0 +#define MSM_V4L2_STREAM_ON 1 +#define MSM_V4L2_STREAM_OFF 2 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_V4L2_SNAPSHOT 3 +#define MSM_V4L2_QUERY_CTRL 4 +#define MSM_V4L2_GET_CTRL 5 +#define MSM_V4L2_SET_CTRL 6 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_V4L2_QUERY 7 +#define MSM_V4L2_GET_CROP 8 +#define MSM_V4L2_SET_CROP 9 +#define MSM_V4L2_MAX 10 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define V4L2_CAMERA_EXIT 43 struct crop_info { - void *info; - int len; + void *info; + int len; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; - struct msm_postproc { - int ftnum; - struct msm_frame fthumnail; - int fmnum; - struct msm_frame fmain; -}; - -struct msm_snapshot_pp_status { - void *status; -}; - -#define CFG_SET_MODE 0 -#define CFG_SET_EFFECT 1 -#define CFG_START 2 -#define CFG_PWR_UP 3 -#define CFG_PWR_DOWN 4 -#define CFG_WRITE_EXPOSURE_GAIN 5 -#define CFG_SET_DEFAULT_FOCUS 6 -#define CFG_MOVE_FOCUS 7 -#define CFG_REGISTER_TO_REAL_GAIN 8 -#define CFG_REAL_TO_REGISTER_GAIN 9 -#define CFG_SET_FPS 10 -#define CFG_SET_PICT_FPS 11 -#define CFG_SET_BRIGHTNESS 12 -#define CFG_SET_CONTRAST 13 -#define CFG_SET_ZOOM 14 -#define CFG_SET_EXPOSURE_MODE 15 -#define CFG_SET_WB 16 -#define CFG_SET_ANTIBANDING 17 -#define CFG_SET_EXP_GAIN 18 -#define CFG_SET_PICT_EXP_GAIN 19 -#define CFG_SET_LENS_SHADING 20 -#define CFG_GET_PICT_FPS 21 -#define CFG_GET_PREV_L_PF 22 -#define CFG_GET_PREV_P_PL 23 -#define CFG_GET_PICT_L_PF 24 -#define CFG_GET_PICT_P_PL 25 -#define CFG_GET_AF_MAX_STEPS 26 -#define CFG_GET_PICT_MAX_EXP_LC 27 -#define CFG_SEND_WB_INFO 28 -#define CFG_SENSOR_INIT 29 -#define CFG_GET_3D_CALI_DATA 30 -#define CFG_GET_CALIB_DATA 31 -#define CFG_GET_OUTPUT_INFO 32 -#define CFG_GET_EEPROM_DATA 33 -#define CFG_SET_ACTUATOR_INFO 34 -#define CFG_GET_ACTUATOR_INFO 35 -#if 1 /* HTC_START Hayden Huang 20111006 YUV Sensor */ -#define CFG_SET_SHARPNESS 36 -#define CFG_SET_SATURATION 37 -#define CFG_SET_OV_LSC_RAW_CAPTURE 38 -#define CFG_SET_ISO 39 -#define CFG_SET_COORDINATE 40 -#define CFG_RUN_AUTO_FOCUS 41 -#define CFG_CANCEL_AUTO_FOCUS 42 -#define CFG_GET_EXP_FOR_LED 43 -#define CFG_UPDATE_AEC_FOR_LED 44 -#define CFG_SET_FRONT_CAMERA_MODE 45 -#define CFG_SET_QCT_LSC_RAW_CAPTURE 46 -#define CFG_SET_QTR_SIZE_MODE 47 -#define CFG_GET_AF_STATE 48 -#define CFG_SET_DMODE 49 -#define CFG_SET_CALIBRATION 50 -#define CFG_SET_AF_MODE 51 -#define CFG_GET_SP3D_L_FRAME 52 -#define CFG_GET_SP3D_R_FRAME 53 -#define CFG_SET_FLASHLIGHT 54 -#define CFG_SET_FLASHLIGHT_EXP_DIV 55 -#define CFG_GET_ISO 56 -#define CFG_GET_EXP_GAIN 57 -#define CFG_SET_FRAMERATE 58 -#endif /* HTC_END Hayden Huang 20111006 YUV Sensor */ -#define CFG_MAX 59 - -/* HTC_START */ -// Ray add for read fuse id command -#define CFG_I2C_IOCTL_R_OTP 70 -/* HTC_END */ - -#define MOVE_NEAR 0 -#define MOVE_FAR 1 - -/* HTC_START Angie 20111019 - Full Size Preview */ -#define SENSOR_PREVIEW_MODE 0 /* SENSOR_MODE_PREVIEW, SENSOR_MODE_VIDEO, SENSOR_MODE_FULL_SIZE_PREVIEW */ -#define SENSOR_SNAPSHOT_MODE 1 /* SENSOR_MODE_SNAPSHOT */ -#define SENSOR_RAW_SNAPSHOT_MODE 2 /* SENSOR_MODE_RAW_SNAPSHOT */ -/* HTC_END */ -#define SENSOR_HFR_60FPS_MODE 3 -#define SENSOR_HFR_90FPS_MODE 4 -#define SENSOR_HFR_120FPS_MODE 5 - -#define SENSOR_QTR_SIZE 0 -#define SENSOR_FULL_SIZE 1 -#define SENSOR_QVGA_SIZE 2 -#define SENSOR_INVALID_SIZE 3 - -#define CAMERA_EFFECT_OFF 0 -#define CAMERA_EFFECT_MONO 1 -#define CAMERA_EFFECT_NEGATIVE 2 -#define CAMERA_EFFECT_SOLARIZE 3 -#define CAMERA_EFFECT_SEPIA 4 -#define CAMERA_EFFECT_POSTERIZE 5 -#define CAMERA_EFFECT_WHITEBOARD 6 -#define CAMERA_EFFECT_BLACKBOARD 7 -#define CAMERA_EFFECT_AQUA 8 -#define CAMERA_EFFECT_EMBOSS 9 -#define CAMERA_EFFECT_SKETCH 10 -#define CAMERA_EFFECT_NEON 11 -#define CAMERA_EFFECT_MAX 12 - -struct sensor_pict_fps { - uint16_t prevfps; - uint16_t pictfps; -}; - -struct exp_gain_cfg { - uint16_t gain; - uint32_t line; -}; - -struct focus_cfg { - int32_t steps; - int dir; + int ftnum; + struct msm_frame fthumnail; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int fmnum; + struct msm_frame fmain; }; - -struct fps_cfg { - uint16_t f_mult; - uint16_t fps_div; - uint32_t pict_fps_div; -}; -struct wb_info_cfg { - uint16_t red_gain; - uint16_t green_gain; - uint16_t blue_gain; -}; -struct sensor_3d_exp_cfg { - uint16_t gain; - uint32_t line; - uint16_t r_gain; - uint16_t b_gain; - uint16_t gr_gain; - uint16_t gb_gain; - uint16_t gain_adjust; -}; -struct sensor_3d_cali_data_t{ - unsigned char left_p_matrix[3][4][8]; - unsigned char right_p_matrix[3][4][8]; - unsigned char square_len[8]; - unsigned char focal_len[8]; - unsigned char pixel_pitch[8]; - uint16_t left_r; - uint16_t left_b; - uint16_t left_gb; - uint16_t left_af_far; - uint16_t left_af_mid; - uint16_t left_af_short; - uint16_t left_af_5um; - uint16_t left_af_50up; - uint16_t left_af_50down; - uint16_t right_r; - uint16_t right_b; - uint16_t right_gb; - uint16_t right_af_far; - uint16_t right_af_mid; - uint16_t right_af_short; - uint16_t right_af_5um; - uint16_t right_af_50up; - uint16_t right_af_50down; -}; -struct sensor_init_cfg { - uint8_t prev_res; - uint8_t pict_res; -}; - -struct sensor_calib_data { - /* Color Related Measurements */ - uint16_t r_over_g; - uint16_t b_over_g; - uint16_t gr_over_gb; - - /* Lens Related Measurements */ - uint16_t macro_2_inf; - uint16_t inf_2_macro; - uint16_t stroke_amt; - uint16_t af_pos_1m; - uint16_t af_pos_inf; -}; - -enum msm_sensor_resolution_t { - MSM_SENSOR_RES_FULL, - MSM_SENSOR_RES_QTR, - MSM_SENSOR_RES_VIDEO, - MSM_SENSOR_RES_VIDEO_HFR, - MSM_SENSOR_RES_4, - MSM_SENSOR_RES_5, - MSM_SENSOR_RES_6, - MSM_SENSOR_RES_7, - MSM_SENSOR_INVALID_RES, -}; - -struct msm_sensor_output_info_t { - uint16_t x_output; - uint16_t y_output; - uint16_t line_length_pclk; - uint16_t frame_length_lines; - uint32_t vt_pixel_clk; - uint32_t op_pixel_clk; - uint16_t binning_factor; -}; - -struct sensor_output_info_t { - struct msm_sensor_output_info_t *output_info; - uint16_t num_info; - /* HTC_START Angie 20111019 - Fix FPS */ - uint16_t vert_offset; - uint16_t min_vert; - int mirror_flip; -/* HTC_END */ -}; - -struct sensor_eeprom_data_t { - void *eeprom_data; - uint16_t index; -}; - -#if 1 /* HTC_START Hayden Huang 20111006 YUV Sensor */ -enum antibanding_mode{ - CAMERA_ANTI_BANDING_50HZ, - CAMERA_ANTI_BANDING_60HZ, - CAMERA_ANTI_BANDING_AUTO, -}; - -enum brightness_t{ - CAMERA_BRIGHTNESS_N3, - CAMERA_BRIGHTNESS_N2, - CAMERA_BRIGHTNESS_N1, - CAMERA_BRIGHTNESS_D, - CAMERA_BRIGHTNESS_P1, - CAMERA_BRIGHTNESS_P2, - CAMERA_BRIGHTNESS_P3, - CAMERA_BRIGHTNESS_P4, - CAMERA_BRIGHTNESS_N4, -}; - -enum frontcam_t{ - CAMERA_MIRROR, - CAMERA_REVERSE, - CAMERA_PORTRAIT_REVERSE, /* 0916 for 3rd party */ -}; - -enum wb_mode{ - CAMERA_AWB_AUTO,/*auto*/ - CAMERA_AWB_CLOUDY,/*Cloudy*/ - CAMERA_AWB_INDOOR_HOME,/*Fluorescent*/ - CAMERA_AWB_INDOOR_OFFICE,/*Incandescent*/ - CAMERA_AWB_SUNNY,/*daylight*/ -}; - -enum iso_mode{ - CAMERA_ISO_AUTO = 0, - CAMERA_ISO_DEBLUR, - CAMERA_ISO_100, - CAMERA_ISO_200, - CAMERA_ISO_400, - CAMERA_ISO_800, - CAMERA_ISO_1250, - CAMERA_ISO_1600, - CAMERA_ISO_MAX -}; - -enum sharpness_mode{ - CAMERA_SHARPNESS_X0, - CAMERA_SHARPNESS_X1, - CAMERA_SHARPNESS_X2, - CAMERA_SHARPNESS_X3, - CAMERA_SHARPNESS_X4, - CAMERA_SHARPNESS_X5, - CAMERA_SHARPNESS_X6, -}; - -enum saturation_mode{ - CAMERA_SATURATION_X0, - CAMERA_SATURATION_X05, - CAMERA_SATURATION_X1, - CAMERA_SATURATION_X15, - CAMERA_SATURATION_X2, -}; - -enum contrast_mode{ - CAMERA_CONTRAST_P2, - CAMERA_CONTRAST_P1, - CAMERA_CONTRAST_D, - CAMERA_CONTRAST_N1, - CAMERA_CONTRAST_N2, -}; - -enum qtr_size_mode{ - NORMAL_QTR_SIZE_MODE, - LARGER_QTR_SIZE_MODE, -}; - -enum sensor_af_mode{ - SENSOR_AF_MODE_AUTO, - SENSOR_AF_MODE_NORMAL, - SENSOR_AF_MODE_MACRO, -}; -#endif /* HTC_END Hayden Huang 20111006 YUV Sensor */ - -/* HTC_START */ -// Ray add fuse id structure -struct fuse_id{ - uint32_t fuse_id_word1; - uint32_t fuse_id_word2; - uint32_t fuse_id_word3; - uint32_t fuse_id_word4; -}; -/* HTC_END */ -struct sensor_cfg_data { - int cfgtype; - int mode; - int rs; - uint8_t max_steps; - - union { - int8_t effect; - uint8_t lens_shading; - uint16_t prevl_pf; - uint16_t prevp_pl; - uint16_t pictl_pf; - uint16_t pictp_pl; - uint32_t pict_max_exp_lc; - uint16_t p_fps; - struct sensor_init_cfg init_info; - struct sensor_pict_fps gfps; - struct exp_gain_cfg exp_gain; - struct focus_cfg focus; - struct fps_cfg fps; - struct wb_info_cfg wb_info; - struct sensor_3d_exp_cfg sensor_3d_exp; - struct sensor_calib_data calib_info; - struct sensor_output_info_t output_info; - struct sensor_eeprom_data_t eeprom_data; - /* HTC_START */ - /* Ray add fuse to member */ - struct fuse_id fuse; - /* HTC_END */ -#if 1 /* HTC_START Hayden Huang 20111006 YUV Sensor */ - enum antibanding_mode antibanding_value; - enum brightness_t brightness_value; - enum frontcam_t frontcam_value; - enum wb_mode wb_value; - enum iso_mode iso_value; - enum sharpness_mode sharpness_value; - enum saturation_mode saturation_value; - enum contrast_mode contrast_value; - enum qtr_size_mode qtr_size_mode_value; - enum sensor_af_mode af_mode_value; -#endif /* HTC_END Hayden Huang 20111006 YUV Sensor */ - } cfg; -}; - -struct msm_actuator_move_params_t { - int8_t dir; - int32_t num_steps; -}; - -struct msm_actuator_set_info_t { - uint32_t total_steps; - uint16_t gross_steps; - uint16_t fine_steps; -}; - -struct msm_actuator_get_info_t { - uint32_t focal_length_num; - uint32_t focal_length_den; - uint32_t f_number_num; - uint32_t f_number_den; - uint32_t f_pix_num; - uint32_t f_pix_den; - uint32_t total_f_dist_num; - uint32_t total_f_dist_den; -}; - -struct msm_actuator_cfg_data { - int cfgtype; - uint8_t is_af_supported; - union { - struct msm_actuator_move_params_t move; - struct msm_actuator_set_info_t set_info; - struct msm_actuator_get_info_t get_info; - } cfg; -}; - -struct sensor_large_data { - int cfgtype; - union { - struct sensor_3d_cali_data_t sensor_3d_cali_data; - } data; -}; - -enum sensor_type_t { - BAYER, - YUV, - JPEG_SOC, -}; - enum flash_type { - LED_FLASH, - STROBE_FLASH, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + LED_FLASH, + STROBE_FLASH, }; - enum strobe_flash_ctrl_type { - STROBE_FLASH_CTRL_INIT, - STROBE_FLASH_CTRL_CHARGE, - STROBE_FLASH_CTRL_RELEASE +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + STROBE_FLASH_CTRL_INIT, + STROBE_FLASH_CTRL_CHARGE, + STROBE_FLASH_CTRL_RELEASE }; - +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ struct strobe_flash_ctrl_data { - enum strobe_flash_ctrl_type type; - int charge_en; -}; - -struct msm_camera_info { - int num_cameras; - uint8_t has_3d_support[MSM_MAX_CAMERA_SENSORS]; - uint8_t is_internal_cam[MSM_MAX_CAMERA_SENSORS]; - uint32_t s_mount_angle[MSM_MAX_CAMERA_SENSORS]; - const char *video_dev_name[MSM_MAX_CAMERA_SENSORS]; - enum sensor_type_t sensor_type[MSM_MAX_CAMERA_SENSORS]; - + enum strobe_flash_ctrl_type type; + int charge_en; }; - -struct msm_cam_config_dev_info { - int num_config_nodes; - const char *config_dev_name[MSM_MAX_CAMERA_CONFIGS]; - int config_dev_id[MSM_MAX_CAMERA_CONFIGS]; -}; - +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ struct flash_ctrl_data { - int flashtype; - union { - int led_state; - struct strobe_flash_ctrl_data strobe_ctrl; - } ctrl_data; + int flashtype; + union { + int led_state; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct strobe_flash_ctrl_data strobe_ctrl; + } ctrl_data; }; - -#define GET_NAME 0 -#define GET_PREVIEW_LINE_PER_FRAME 1 -#define GET_PREVIEW_PIXELS_PER_LINE 2 -#define GET_SNAPSHOT_LINE_PER_FRAME 3 -#define GET_SNAPSHOT_PIXELS_PER_LINE 4 -#define GET_SNAPSHOT_FPS 5 -#define GET_SNAPSHOT_MAX_EP_LINE_CNT 6 - -struct msm_camsensor_info { - char name[MAX_SENSOR_NAME]; - uint8_t flash_enabled; - int8_t total_steps; - uint8_t support_3d; +struct msm_snapshot_pp_status { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + void *status; }; - -#define V4L2_SINGLE_PLANE 0 -#define V4L2_MULTI_PLANE_Y 0 -#define V4L2_MULTI_PLANE_CBCR 1 -#define V4L2_MULTI_PLANE_CB 1 -#define V4L2_MULTI_PLANE_CR 2 - -struct plane_data { - int plane_id; - uint32_t offset; - unsigned long size; +struct msm_camsensor_info { + char name[MAX_SENSOR_NAME]; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + uint8_t flash_enabled; + int8_t total_steps; }; - -struct img_plane_info { - uint32_t width; - uint32_t height; - uint32_t pixelformat; - uint8_t buffer_type; /*Single/Multi planar*/ - uint8_t output_port; - uint32_t ext_mode; - uint8_t num_planes; - struct plane_data plane[MAX_PLANES]; - uint8_t vpe_can_use; +struct msm_camera_info { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + int num_cameras; + uint8_t has_3d_support[MAX_SENSOR_NUM]; + uint8_t is_internal_cam[MAX_SENSOR_NUM]; }; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#endif -#endif /* __LINUX_MSM_CAMERA_H */ diff --git a/include/sound/Kbuild b/include/sound/Kbuild index 802947f6..a98e10d2 100644 --- a/include/sound/Kbuild +++ b/include/sound/Kbuild @@ -6,3 +6,5 @@ header-y += hdsp.h header-y += hdspm.h header-y += sb16_csp.h header-y += sfnt_info.h +header-y += compress_offload.h +header-y += compress_params.h diff --git a/include/sound/compress_offload.h b/include/sound/compress_offload.h new file mode 100644 index 00000000..c43ff12a --- /dev/null +++ b/include/sound/compress_offload.h @@ -0,0 +1,81 @@ +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + *** To edit the content of this header, modify the corresponding + *** source file (e.g. under external/kernel-headers/original/) then + *** run bionic/libc/kernel/tools/update_all.py + *** + *** Any manual change here will be lost the next time this script will + *** be run. You've been warned! + *** + **************************************************************************** + ****************************************************************************/ +#ifndef __COMPRESS_OFFLOAD_H +#define __COMPRESS_OFFLOAD_H +#include +struct snd_compressed_buffer { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + size_t fragment_size; + int fragments; +}; +struct snd_compr_params { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct snd_compressed_buffer buffer; + struct snd_codec codec; +}; +struct snd_compr_tstamp { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + size_t copied_bytes; + size_t copied_total; + size_t decoded; + size_t rendered; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 sampling_rate; + uint64_t timestamp; +}; +struct snd_compr_avail { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + size_t avail; + struct snd_compr_tstamp tstamp; +}; +struct snd_compr_caps { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 num_codecs; + __u32 min_fragment_size; + __u32 max_fragment_size; + __u32 min_fragments; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 max_fragments; + __u32 codecs[MAX_NUM_CODECS]; + __u32 reserved[11]; +}; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +struct snd_compr_codec_caps { + __u32 codec; + __u32 num_descriptors; + struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS]; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +#define SNDRV_COMPRESS_GET_CAPS _IOWR('C', 0x00, struct snd_compr_caps *) +#define SNDRV_COMPRESS_GET_CODEC_CAPS _IOWR('C', 0x01, struct snd_compr_codec_caps *) +#define SNDRV_COMPRESS_SET_PARAMS _IOW('C', 0x02, struct snd_compr_params *) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SNDRV_COMPRESS_GET_PARAMS _IOR('C', 0x03, struct snd_compr_params *) +#define SNDRV_COMPRESS_TSTAMP _IOR('C', 0x10, struct snd_compr_tstamp *) +#define SNDRV_COMPRESS_AVAIL _IOR('C', 0x11, struct snd_compr_avail *) +#define SNDRV_COMPRESS_PAUSE _IO('C', 0x20) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SNDRV_COMPRESS_RESUME _IO('C', 0x21) +#define SNDRV_COMPRESS_START _IO('C', 0x22) +#define SNDRV_COMPRESS_STOP _IO('C', 0x23) +#define SNDRV_COMPRESS_DRAIN _IO('C', 0x24) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_COMPR_TRIGGER_DRAIN 7 +#endif + diff --git a/include/sound/compress_params.h b/include/sound/compress_params.h new file mode 100644 index 00000000..a86b94d3 --- /dev/null +++ b/include/sound/compress_params.h @@ -0,0 +1,241 @@ +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + *** To edit the content of this header, modify the corresponding + *** source file (e.g. under external/kernel-headers/original/) then + *** run bionic/libc/kernel/tools/update_all.py + *** + *** Any manual change here will be lost the next time this script will + *** be run. You've been warned! + *** + **************************************************************************** + ****************************************************************************/ +#ifndef __SND_COMPRESS_PARAMS_H +#define __SND_COMPRESS_PARAMS_H +#define MAX_NUM_CODECS 32 +#define MAX_NUM_CODEC_DESCRIPTORS 32 +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MAX_NUM_BITRATES 32 +#define SND_AUDIOCODEC_PCM ((__u32) 0x00000001) +#define SND_AUDIOCODEC_MP3 ((__u32) 0x00000002) +#define SND_AUDIOCODEC_AMR ((__u32) 0x00000003) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOCODEC_AMRWB ((__u32) 0x00000004) +#define SND_AUDIOCODEC_AMRWBPLUS ((__u32) 0x00000005) +#define SND_AUDIOCODEC_AAC ((__u32) 0x00000006) +#define SND_AUDIOCODEC_WMA ((__u32) 0x00000007) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOCODEC_REAL ((__u32) 0x00000008) +#define SND_AUDIOCODEC_VORBIS ((__u32) 0x00000009) +#define SND_AUDIOCODEC_FLAC ((__u32) 0x0000000A) +#define SND_AUDIOCODEC_IEC61937 ((__u32) 0x0000000B) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOCODEC_G723_1 ((__u32) 0x0000000C) +#define SND_AUDIOCODEC_G729 ((__u32) 0x0000000D) +#define SND_AUDIOPROFILE_PCM ((__u32) 0x00000001) +#define SND_AUDIOCHANMODE_MP3_MONO ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOCHANMODE_MP3_STEREO ((__u32) 0x00000002) +#define SND_AUDIOCHANMODE_MP3_JOINTSTEREO ((__u32) 0x00000004) +#define SND_AUDIOCHANMODE_MP3_DUAL ((__u32) 0x00000008) +#define SND_AUDIOPROFILE_AMR ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_AMR_DTX_OFF ((__u32) 0x00000001) +#define SND_AUDIOMODE_AMR_VAD1 ((__u32) 0x00000002) +#define SND_AUDIOMODE_AMR_VAD2 ((__u32) 0x00000004) +#define SND_AUDIOSTREAMFORMAT_UNDEFINED ((__u32) 0x00000000) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOSTREAMFORMAT_CONFORMANCE ((__u32) 0x00000001) +#define SND_AUDIOSTREAMFORMAT_IF1 ((__u32) 0x00000002) +#define SND_AUDIOSTREAMFORMAT_IF2 ((__u32) 0x00000004) +#define SND_AUDIOSTREAMFORMAT_FSF ((__u32) 0x00000008) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOSTREAMFORMAT_RTPPAYLOAD ((__u32) 0x00000010) +#define SND_AUDIOSTREAMFORMAT_ITU ((__u32) 0x00000020) +#define SND_AUDIOPROFILE_AMRWB ((__u32) 0x00000001) +#define SND_AUDIOMODE_AMRWB_DTX_OFF ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_AMRWB_VAD1 ((__u32) 0x00000002) +#define SND_AUDIOMODE_AMRWB_VAD2 ((__u32) 0x00000004) +#define SND_AUDIOPROFILE_AMRWBPLUS ((__u32) 0x00000001) +#define SND_AUDIOPROFILE_AAC ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_AAC_MAIN ((__u32) 0x00000001) +#define SND_AUDIOMODE_AAC_LC ((__u32) 0x00000002) +#define SND_AUDIOMODE_AAC_SSR ((__u32) 0x00000004) +#define SND_AUDIOMODE_AAC_LTP ((__u32) 0x00000008) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_AAC_HE ((__u32) 0x00000010) +#define SND_AUDIOMODE_AAC_SCALABLE ((__u32) 0x00000020) +#define SND_AUDIOMODE_AAC_ERLC ((__u32) 0x00000040) +#define SND_AUDIOMODE_AAC_LD ((__u32) 0x00000080) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_AAC_HE_PS ((__u32) 0x00000100) +#define SND_AUDIOMODE_AAC_HE_MPS ((__u32) 0x00000200) +#define SND_AUDIOSTREAMFORMAT_MP2ADTS ((__u32) 0x00000001) +#define SND_AUDIOSTREAMFORMAT_MP4ADTS ((__u32) 0x00000002) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOSTREAMFORMAT_MP4LOAS ((__u32) 0x00000004) +#define SND_AUDIOSTREAMFORMAT_MP4LATM ((__u32) 0x00000008) +#define SND_AUDIOSTREAMFORMAT_ADIF ((__u32) 0x00000010) +#define SND_AUDIOSTREAMFORMAT_MP4FF ((__u32) 0x00000020) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOSTREAMFORMAT_RAW ((__u32) 0x00000040) +#define SND_AUDIOPROFILE_WMA7 ((__u32) 0x00000001) +#define SND_AUDIOPROFILE_WMA8 ((__u32) 0x00000002) +#define SND_AUDIOPROFILE_WMA9 ((__u32) 0x00000004) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOPROFILE_WMA10 ((__u32) 0x00000008) +#define SND_AUDIOMODE_WMA_LEVEL1 ((__u32) 0x00000001) +#define SND_AUDIOMODE_WMA_LEVEL2 ((__u32) 0x00000002) +#define SND_AUDIOMODE_WMA_LEVEL3 ((__u32) 0x00000004) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_WMA_LEVEL4 ((__u32) 0x00000008) +#define SND_AUDIOMODE_WMAPRO_LEVELM0 ((__u32) 0x00000010) +#define SND_AUDIOMODE_WMAPRO_LEVELM1 ((__u32) 0x00000020) +#define SND_AUDIOMODE_WMAPRO_LEVELM2 ((__u32) 0x00000040) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_WMAPRO_LEVELM3 ((__u32) 0x00000080) +#define SND_AUDIOSTREAMFORMAT_WMA_ASF ((__u32) 0x00000001) +#define SND_AUDIOSTREAMFORMAT_WMA_NOASF_HDR ((__u32) 0x00000002) +#define SND_AUDIOPROFILE_REALAUDIO ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_REALAUDIO_G2 ((__u32) 0x00000001) +#define SND_AUDIOMODE_REALAUDIO_8 ((__u32) 0x00000002) +#define SND_AUDIOMODE_REALAUDIO_10 ((__u32) 0x00000004) +#define SND_AUDIOMODE_REALAUDIO_SURROUND ((__u32) 0x00000008) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOPROFILE_VORBIS ((__u32) 0x00000001) +#define SND_AUDIOMODE_VORBIS ((__u32) 0x00000001) +#define SND_AUDIOPROFILE_FLAC ((__u32) 0x00000001) +#define SND_AUDIOMODE_FLAC_LEVEL0 ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_FLAC_LEVEL1 ((__u32) 0x00000002) +#define SND_AUDIOMODE_FLAC_LEVEL2 ((__u32) 0x00000004) +#define SND_AUDIOMODE_FLAC_LEVEL3 ((__u32) 0x00000008) +#define SND_AUDIOMODE_FLAC_LEVEL4 ((__u32) 0x00000010) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_FLAC_LEVEL5 ((__u32) 0x00000020) +#define SND_AUDIOMODE_FLAC_LEVEL6 ((__u32) 0x00000040) +#define SND_AUDIOMODE_FLAC_LEVEL7 ((__u32) 0x00000080) +#define SND_AUDIOMODE_FLAC_LEVEL8 ((__u32) 0x00000100) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOSTREAMFORMAT_FLAC ((__u32) 0x00000001) +#define SND_AUDIOSTREAMFORMAT_FLAC_OGG ((__u32) 0x00000002) +#define SND_AUDIOPROFILE_IEC61937 ((__u32) 0x00000001) +#define SND_AUDIOPROFILE_IEC61937_SPDIF ((__u32) 0x00000002) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_IEC_REF_STREAM_HEADER ((__u32) 0x00000000) +#define SND_AUDIOMODE_IEC_LPCM ((__u32) 0x00000001) +#define SND_AUDIOMODE_IEC_AC3 ((__u32) 0x00000002) +#define SND_AUDIOMODE_IEC_MPEG1 ((__u32) 0x00000004) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_IEC_MP3 ((__u32) 0x00000008) +#define SND_AUDIOMODE_IEC_MPEG2 ((__u32) 0x00000010) +#define SND_AUDIOMODE_IEC_AACLC ((__u32) 0x00000020) +#define SND_AUDIOMODE_IEC_DTS ((__u32) 0x00000040) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_IEC_ATRAC ((__u32) 0x00000080) +#define SND_AUDIOMODE_IEC_SACD ((__u32) 0x00000100) +#define SND_AUDIOMODE_IEC_EAC3 ((__u32) 0x00000200) +#define SND_AUDIOMODE_IEC_DTS_HD ((__u32) 0x00000400) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_IEC_MLP ((__u32) 0x00000800) +#define SND_AUDIOMODE_IEC_DST ((__u32) 0x00001000) +#define SND_AUDIOMODE_IEC_WMAPRO ((__u32) 0x00002000) +#define SND_AUDIOMODE_IEC_REF_CXT ((__u32) 0x00004000) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_IEC_HE_AAC ((__u32) 0x00008000) +#define SND_AUDIOMODE_IEC_HE_AAC2 ((__u32) 0x00010000) +#define SND_AUDIOMODE_IEC_MPEG_SURROUND ((__u32) 0x00020000) +#define SND_AUDIOPROFILE_G723_1 ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_G723_1_ANNEX_A ((__u32) 0x00000001) +#define SND_AUDIOMODE_G723_1_ANNEX_B ((__u32) 0x00000002) +#define SND_AUDIOMODE_G723_1_ANNEX_C ((__u32) 0x00000004) +#define SND_AUDIOPROFILE_G729 ((__u32) 0x00000001) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define SND_AUDIOMODE_G729_ANNEX_A ((__u32) 0x00000001) +#define SND_AUDIOMODE_G729_ANNEX_B ((__u32) 0x00000002) +#define SND_RATECONTROLMODE_CONSTANTBITRATE ((__u32) 0x00000001) +#define SND_RATECONTROLMODE_VARIABLEBITRATE ((__u32) 0x00000002) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +struct snd_enc_wma { + __u32 super_block_align; +}; +struct snd_enc_vorbis { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __s32 quality; + __u32 managed; + __u32 max_bit_rate; + __u32 min_bit_rate; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 downmix; +}; +struct snd_enc_real { + __u32 quant_bits; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 start_region; + __u32 num_regions; +}; +struct snd_enc_flac { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 num; + __u32 gain; +}; +struct snd_enc_generic { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 bw; + __s32 reserved[15]; +}; +union snd_codec_options { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct snd_enc_wma wma; + struct snd_enc_vorbis vorbis; + struct snd_enc_real real; + struct snd_enc_flac flac; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct snd_enc_generic generic; +}; +struct snd_codec_desc { + __u32 max_ch; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 sample_rates; + __u32 bit_rate[MAX_NUM_BITRATES]; + __u32 num_bitrates; + __u32 rate_control; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 profiles; + __u32 modes; + __u32 formats; + __u32 min_buffer; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 reserved[15]; +}; +struct snd_codec { + __u32 id; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 ch_in; + __u32 ch_out; + __u32 sample_rate; + __u32 bit_rate; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 rate_control; + __u32 profile; + __u32 level; + __u32 ch_mode; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + __u32 format; + __u32 align; + union snd_codec_options options; + __u32 reserved[3]; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +#endif + From 2be9114a934d9700959e7ae314d5b2dbdd157694 Mon Sep 17 00:00:00 2001 From: Jeff Boody Date: Fri, 19 Oct 2012 16:42:34 -0600 Subject: [PATCH 094/117] msm: kgsl: Add support for Android's sync point The Android sync point framework will replace the synchronization previously implemented by genlock. This change implements the KGSL component of the sync point framework by creating a fence that is automatically signaled by KGSL when it's timestamp expires. The fence FD is returned to the user driver so that another process can wait for the sync point. Change-Id: Ifee38dfde00e551f3524f7a37833938dcdb64905 Signed-off-by: Jeff Boody Signed-off-by: Siddhartha Agrawal --- drivers/gpu/msm/Makefile | 1 + drivers/gpu/msm/kgsl.c | 25 ++++- drivers/gpu/msm/kgsl.h | 6 + drivers/gpu/msm/kgsl_device.h | 9 +- drivers/gpu/msm/kgsl_sync.c | 201 ++++++++++++++++++++++++++++++++++ drivers/gpu/msm/kgsl_sync.h | 75 +++++++++++++ include/linux/msm_kgsl.h | 16 ++- 7 files changed, 326 insertions(+), 7 deletions(-) create mode 100644 drivers/gpu/msm/kgsl_sync.c create mode 100644 drivers/gpu/msm/kgsl_sync.h diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index 7b8f3e63..65774c34 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -16,6 +16,7 @@ msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o +msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o msm_adreno-y += \ adreno_ringbuffer.o \ diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 9f615d3f..0554d3a2 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -33,6 +33,7 @@ #include "kgsl_sharedmem.h" #include "kgsl_device.h" #include "kgsl_trace.h" +#include "kgsl_sync.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "kgsl." @@ -59,9 +60,9 @@ static struct ion_client *kgsl_ion_client; * @returns - 0 on success or error code on failure */ -static int kgsl_add_event(struct kgsl_device *device, u32 ts, +int kgsl_add_event(struct kgsl_device *device, u32 ts, void (*cb)(struct kgsl_device *, void *, u32), void *priv, - struct kgsl_device_private *owner) + void *owner) { struct kgsl_event *event; struct list_head *n; @@ -105,6 +106,7 @@ static int kgsl_add_event(struct kgsl_device *device, u32 ts, queue_work(device->work_queue, &device->ts_expired_ws); return 0; } +EXPORT_SYMBOL(kgsl_add_event); /** * kgsl_cancel_events - Cancel all events for a process @@ -112,8 +114,8 @@ static int kgsl_add_event(struct kgsl_device *device, u32 ts, * @owner - driver instance that owns the events to cancel * */ -static void kgsl_cancel_events(struct kgsl_device *device, - struct kgsl_device_private *owner) +void kgsl_cancel_events(struct kgsl_device *device, + void *owner) { struct kgsl_event *event, *event_tmp; unsigned int cur = device->ftbl->readtimestamp(device, @@ -135,6 +137,7 @@ static void kgsl_cancel_events(struct kgsl_device *device, kfree(event); } } +EXPORT_SYMBOL(kgsl_cancel_events); static inline struct kgsl_mem_entry * kgsl_mem_entry_create(void) @@ -255,6 +258,12 @@ kgsl_create_context(struct kgsl_device_private *dev_priv) context->id = id; context->dev_priv = dev_priv; + if (kgsl_sync_timeline_create(context)) { + idr_remove(&dev_priv->device->context_idr, id); + kfree(context); + return NULL; + } + return context; } @@ -271,6 +280,7 @@ kgsl_destroy_context(struct kgsl_device_private *dev_priv, BUG_ON(context->devctxt); id = context->id; + kgsl_sync_timeline_destroy(context); kfree(context); idr_remove(&dev_priv->device->context_idr, id); @@ -1844,6 +1854,11 @@ static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv, param->timestamp, param->priv, param->len, dev_priv); break; + case KGSL_TIMESTAMP_EVENT_FENCE: + ret = kgsl_add_fence_event(dev_priv->device, + param->context_id, param->timestamp, param->priv, + param->len, dev_priv); + break; default: ret = -EINVAL; } @@ -1914,6 +1929,8 @@ static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP; else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD) cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP; + else if (cmd == IOCTL_KGSL_TIMESTAMP_EVENT_OLD) + cmd = IOCTL_KGSL_TIMESTAMP_EVENT; nr = _IOC_NR(cmd); diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 25c4827e..c2229cfc 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -242,5 +242,11 @@ kgsl_mem_entry_put(struct kgsl_mem_entry *entry) { kref_put(&entry->refcount, kgsl_mem_entry_destroy); } +int kgsl_add_event(struct kgsl_device *device, u32 ts, + void (*cb)(struct kgsl_device *, void *, u32), void *priv, + void *owner); + +void kgsl_cancel_events(struct kgsl_device *device, + void *owner); #endif /* __KGSL_H */ diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index ba3c3290..d244bea3 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -23,6 +23,7 @@ #include "kgsl_pwrctrl.h" #include "kgsl_log.h" #include "kgsl_pwrscale.h" +#include #define KGSL_TIMEOUT_NONE 0 #define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF @@ -129,7 +130,7 @@ struct kgsl_event { void (*func)(struct kgsl_device *, void *, u32); void *priv; struct list_head list; - struct kgsl_device_private *owner; + void *owner; }; @@ -212,6 +213,12 @@ struct kgsl_context { * context was responsible for causing it */ unsigned int reset_status; + + /* + * Timeline used to create fences that can be signaled when a + * sync_pt timestamp expires. + */ + struct sync_timeline *timeline; }; struct kgsl_process_private { diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c new file mode 100644 index 00000000..afd4c48b --- /dev/null +++ b/drivers/gpu/msm/kgsl_sync.c @@ -0,0 +1,201 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "kgsl_sync.h" + +struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline, + unsigned int timestamp) +{ + struct sync_pt *pt; + pt = sync_pt_create(timeline, (int) sizeof(struct kgsl_sync_pt)); + if (pt) { + struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt; + kpt->timestamp = timestamp; + } + return pt; +} + +/* + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void kgsl_sync_pt_destroy(struct sync_pt *pt) +{ + sync_pt_free(pt); +} + +static struct sync_pt *kgsl_sync_pt_dup(struct sync_pt *pt) +{ + struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt; + return kgsl_sync_pt_create(pt->parent, kpt->timestamp); +} + +static int kgsl_sync_pt_has_signaled(struct sync_pt *pt) +{ + struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt; + struct kgsl_sync_timeline *ktimeline = + (struct kgsl_sync_timeline *) pt->parent; + unsigned int ts = kpt->timestamp; + unsigned int last_ts = ktimeline->last_timestamp; + if (timestamp_cmp(last_ts, ts) >= 0) { + /* signaled */ + return 1; + } + return 0; +} + +struct kgsl_fence_event_priv { + struct kgsl_context *context; +}; + +/** + * kgsl_fence_event_cb - Event callback for a fence timestamp event + * @device - The KGSL device that expired the timestamp + * @priv - private data for the event + * @context_id - the context id that goes with the timestamp + * @timestamp - the timestamp that triggered the event + * + * Signal a fence following the expiration of a timestamp + */ + +static inline void kgsl_fence_event_cb(struct kgsl_device *device, + void *priv, u32 timestamp) +{ + struct kgsl_fence_event_priv *ev = priv; + kgsl_sync_timeline_signal(ev->context->timeline, timestamp); + kfree(ev); +} + +/** + * kgsl_add_fence_event - Create a new fence event + * @device - KGSL device to create the event on + * @timestamp - Timestamp to trigger the event + * @data - Return fence fd stored in struct kgsl_timestamp_event_fence + * @len - length of the fence event + * @owner - driver instance that owns this event + * @returns 0 on success or error code on error + * + * Create a fence and register an event to signal the fence when + * the timestamp expires + */ + +int kgsl_add_fence_event(struct kgsl_device *device, + u32 context_id, u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner) +{ + struct kgsl_fence_event_priv *event; + struct kgsl_timestamp_event_fence priv; + struct kgsl_context *context; + struct sync_pt *pt; + struct sync_fence *fence = NULL; + int ret = -EINVAL; + + if (len != sizeof(priv)) + return -EINVAL; + + context = kgsl_find_context(owner, context_id); + if (context == NULL) + return -EINVAL; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event == NULL) + return -ENOMEM; + event->context = context; + + pt = kgsl_sync_pt_create(context->timeline, timestamp); + if (pt == NULL) { + KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n"); + ret = -ENOMEM; + goto fail_pt; + } + + fence = sync_fence_create("kgsl-fence", pt); + if (fence == NULL) { + /* only destroy pt when not added to fence */ + kgsl_sync_pt_destroy(pt); + KGSL_DRV_ERR(device, "sync_fence_create failed\n"); + ret = -ENOMEM; + goto fail_fence; + } + + priv.fence_fd = get_unused_fd_flags(0); + if (priv.fence_fd < 0) { + KGSL_DRV_ERR(device, "invalid fence fd\n"); + ret = -EINVAL; + goto fail_fd; + } + sync_fence_install(fence, priv.fence_fd); + + if (copy_to_user(data, &priv, sizeof(priv))) { + ret = -EFAULT; + goto fail_copy_fd; + } + + ret = kgsl_add_event(device, timestamp, + kgsl_fence_event_cb, event, owner); + if (ret) + goto fail_event; + + return 0; + +fail_event: +fail_copy_fd: + /* clean up sync_fence_install */ + sync_fence_put(fence); + put_unused_fd(priv.fence_fd); +fail_fd: + /* clean up sync_fence_create */ + sync_fence_put(fence); +fail_fence: +fail_pt: + kfree(event); + return ret; +} + +static const struct sync_timeline_ops kgsl_sync_timeline_ops = { + .dup = kgsl_sync_pt_dup, + .has_signaled = kgsl_sync_pt_has_signaled, +}; + +int kgsl_sync_timeline_create(struct kgsl_context *context) +{ + struct kgsl_sync_timeline *ktimeline; + + context->timeline = sync_timeline_create(&kgsl_sync_timeline_ops, + (int) sizeof(struct kgsl_sync_timeline), "kgsl-timeline"); + if (context->timeline == NULL) + return -EINVAL; + + ktimeline = (struct kgsl_sync_timeline *) context->timeline; + ktimeline->last_timestamp = 0; + + return 0; +} + +void kgsl_sync_timeline_signal(struct sync_timeline *timeline, + unsigned int timestamp) +{ + struct kgsl_sync_timeline *ktimeline = + (struct kgsl_sync_timeline *) timeline; + ktimeline->last_timestamp = timestamp; + sync_timeline_signal(timeline); +} + +void kgsl_sync_timeline_destroy(struct kgsl_context *context) +{ + sync_timeline_destroy(context->timeline); +} diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h new file mode 100644 index 00000000..06b3ad0d --- /dev/null +++ b/drivers/gpu/msm/kgsl_sync.h @@ -0,0 +1,75 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_SYNC_H +#define __KGSL_SYNC_H + +#include +#include "kgsl_device.h" + +struct kgsl_sync_timeline { + struct sync_timeline timeline; + unsigned int last_timestamp; +}; + +struct kgsl_sync_pt { + struct sync_pt pt; + unsigned int timestamp; +}; + +#if defined(CONFIG_SYNC) +struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline, + unsigned int timestamp); +void kgsl_sync_pt_destroy(struct sync_pt *pt); +int kgsl_add_fence_event(struct kgsl_device *device, + u32 context_id, u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner); +int kgsl_sync_timeline_create(struct kgsl_context *context); +void kgsl_sync_timeline_signal(struct sync_timeline *timeline, + unsigned int timestamp); +void kgsl_sync_timeline_destroy(struct kgsl_context *context); +#else +static inline struct sync_pt +*kgsl_sync_pt_create(struct sync_timeline *timeline, unsigned int timestamp) +{ + return NULL; +} + +static inline void kgsl_sync_pt_destroy(struct sync_pt *pt) +{ +} + +static inline int kgsl_add_fence_event(struct kgsl_device *device, + u32 context_id, u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner) +{ + return -EINVAL; +} + +static int kgsl_sync_timeline_create(struct kgsl_context *context) +{ + context->timeline = NULL; + return 0; +} + +static inline void +kgsl_sync_timeline_signal(struct sync_timeline *timeline, + unsigned int timestamp) +{ +} + +static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context) +{ +} +#endif + +#endif /* __KGSL_SYNC_H */ diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h index baef1cc4..0f22d4d7 100644 --- a/include/linux/msm_kgsl.h +++ b/include/linux/msm_kgsl.h @@ -436,7 +436,8 @@ struct kgsl_cff_syncmem { /* * A timestamp event allows the user space to register an action following an - * expired timestamp. + * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to + * _IOWR to support fences which need to return a fd for the priv parameter. */ struct kgsl_timestamp_event { @@ -447,7 +448,7 @@ struct kgsl_timestamp_event { size_t len; /* Size of the event specific blob */ }; -#define IOCTL_KGSL_TIMESTAMP_EVENT \ +#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \ _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event) /* A genlock timestamp event releases an existing lock on timestamp expire */ @@ -458,6 +459,17 @@ struct kgsl_timestamp_event_genlock { int handle; /* Handle of the genlock lock to release */ }; +/* A fence timestamp event releases an existing lock on timestamp expire */ + +#define KGSL_TIMESTAMP_EVENT_FENCE 2 + +struct kgsl_timestamp_event_fence { + int fence_fd; /* Fence to signal */ +}; + +#define IOCTL_KGSL_TIMESTAMP_EVENT \ + _IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event) + #ifdef __KERNEL__ #ifdef CONFIG_MSM_KGSL_DRM int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start, From 79408c69531dd993e0025a02849695bb79fbdfe1 Mon Sep 17 00:00:00 2001 From: Lexmazter Date: Fri, 1 Mar 2013 03:38:48 +0200 Subject: [PATCH 095/117] include/linux/sync.h - added --- include/linux/sync.h | 314 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 314 insertions(+) create mode 100644 include/linux/sync.h diff --git a/include/linux/sync.h b/include/linux/sync.h new file mode 100644 index 00000000..f057a4d4 --- /dev/null +++ b/include/linux/sync.h @@ -0,0 +1,314 @@ +/* + * include/linux/sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_H +#define _LINUX_SYNC_H + +#include +#ifdef __KERNEL__ + +#include +#include +#include + +struct sync_timeline; +struct sync_pt; +struct sync_fence; + +/** + * struct sync_timeline_ops - sync object implementation ops + * @driver_name: name of the implentation + * @dup: duplicate a sync_pt + * @has_signaled: returns: + * 1 if pt has signaled + * 0 if pt has not signaled + * <0 on error + * @compare: returns: + * 1 if b will signal before a + * 0 if a and b will signal at the same time + * -1 if a will signabl before b + * @free_pt: called before sync_pt is freed + * @release_obj: called before sync_timeline is freed + */ +struct sync_timeline_ops { + const char *driver_name; + + /* required */ + struct sync_pt *(*dup)(struct sync_pt *pt); + + /* required */ + int (*has_signaled)(struct sync_pt *pt); + + /* required */ + int (*compare)(struct sync_pt *a, struct sync_pt *b); + + /* optional */ + void (*free_pt)(struct sync_pt *sync_pt); + + /* optional */ + void (*release_obj)(struct sync_timeline *sync_timeline); +}; + +/** + * struct sync_timeline - sync object + * @ops: ops that define the implementaiton of the sync_timeline + * @name: name of the sync_timeline. Useful for debugging + * @destoryed: set when sync_timeline is destroyed + * @child_list_head: list of children sync_pts for this sync_timeline + * @child_list_lock: lock protecting @child_list_head, destroyed, and + * sync_pt.status + * @active_list_head: list of active (unsignaled/errored) sync_pts + */ +struct sync_timeline { + const struct sync_timeline_ops *ops; + char name[32]; + + /* protected by child_list_lock */ + bool destroyed; + + struct list_head child_list_head; + spinlock_t child_list_lock; + + struct list_head active_list_head; + spinlock_t active_list_lock; +}; + +/** + * struct sync_pt - sync point + * @parent: sync_timeline to which this sync_pt belongs + * @child_list: membership in sync_timeline.child_list_head + * @active_list: membership in sync_timeline.active_list_head + * @fence: sync_fence to which the sync_pt belongs + * @pt_list: membership in sync_fence.pt_list_head + * @status: 1: signaled, 0:active, <0: error + */ +struct sync_pt { + struct sync_timeline *parent; + struct list_head child_list; + + struct list_head active_list; + + struct sync_fence *fence; + struct list_head pt_list; + + /* protected by parent->active_list_lock */ + int status; +}; + +/** + * struct sync_fence - sync fence + * @file: file representing this fence + * @name: name of sync_fence. Useful for debugging + * @pt_list_head: list of sync_pts in ths fence. immutable once fence + * is created + * @waiter_list_head: list of asynchronous waiters on this fence + * @waiter_list_lock: lock protecting @waiter_list_head and @status + * @status: 1: signaled, 0:active, <0: error + * + * @wq: wait queue for fence signaling + */ +struct sync_fence { + struct file *file; + char name[32]; + + /* this list is immutable once the fence is created */ + struct list_head pt_list_head; + + struct list_head waiter_list_head; + spinlock_t waiter_list_lock; /* also protects status */ + int status; + + wait_queue_head_t wq; +}; + +/** + * struct sync_fence_waiter - metadata for asynchronous waiter on a fence + * @waiter_list: membership in sync_fence.waiter_list_head + * @callback: function pointer to call when fence signals + * @callback_data: pointer to pass to @callback + */ +struct sync_fence_waiter { + struct list_head waiter_list; + + void (*callback)(struct sync_fence *fence, void *data); + void *callback_data; +}; + +/* + * API for sync_timeline implementers + */ + +/** + * sync_timeline_create() - creates a sync object + * @ops: specifies the implemention ops for the object + * @size: size to allocate for this obj + * @name: sync_timeline name + * + * Creates a new sync_timeline which will use the implemetation specified by + * @ops. @size bytes will be allocated allowing for implemntation specific + * data to be kept after the generic sync_timeline stuct. + */ +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name); + +/** + * sync_timeline_destory() - destorys a sync object + * @obj: sync_timeline to destroy + * + * A sync implemntation should call this when the @obj is going away + * (i.e. module unload.) @obj won't actually be freed until all its childern + * sync_pts are freed. + */ +void sync_timeline_destroy(struct sync_timeline *obj); + +/** + * sync_timeline_signal() - signal a status change on a sync_timeline + * @obj: sync_timeline to signal + * + * A sync implemntation should call this any time one of it's sync_pts + * has signaled or has an error condition. + */ +void sync_timeline_signal(struct sync_timeline *obj); + +/** + * sync_pt_create() - creates a sync pt + * @parent: sync_pt's parent sync_timeline + * @size: size to allocate for this pt + * + * Creates a new sync_pt as a chiled of @parent. @size bytes will be + * allocated allowing for implemntation specific data to be kept after + * the generic sync_timeline struct. + */ +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); + +/** + * sync_pt_free() - frees a sync pt + * @pt: sync_pt to free + * + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void sync_pt_free(struct sync_pt *pt); + +/** + * sync_fence_create() - creates a sync fence + * @name: name of fence to create + * @pt: sync_pt to add to the fence + * + * Creates a fence containg @pt. Once this is called, the fence takes + * ownership of @pt. + */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); + +/* + * API for sync_fence consumers + */ + +/** + * sync_fence_merge() - merge two fences + * @name: name of new fence + * @a: fence a + * @b: fence b + * + * Creates a new fence which contains copies of all the sync_pts in both + * @a and @b. @a and @b remain valid, independent fences. + */ +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b); + +/** + * sync_fence_fdget() - get a fence from an fd + * @fd: fd referencing a fence + * + * Ensures @fd references a valid fence, increments the refcount of the backing + * file, and returns the fence. + */ +struct sync_fence *sync_fence_fdget(int fd); + +/** + * sync_fence_put() - puts a refernnce of a sync fence + * @fence: fence to put + * + * Puts a reference on @fence. If this is the last reference, the fence and + * all it's sync_pts will be freed + */ +void sync_fence_put(struct sync_fence *fence); + +/** + * sync_fence_install() - installs a fence into a file descriptor + * @fence: fence to instal + * @fd: file descriptor in which to install the fence + * + * Installs @fence into @fd. @fd's should be acquired through get_unused_fd(). + */ +void sync_fence_install(struct sync_fence *fence, int fd); + +/** + * sync_fence_wait_async() - registers and async wait on the fence + * @fence: fence to wait on + * @callback: callback + * @callback_data data to pass to the callback + * + * Returns 1 if @fence has already signaled. + * + * Registers a callback to be called when @fence signals or has an error + */ +int sync_fence_wait_async(struct sync_fence *fence, + void (*callback)(struct sync_fence *, void *data), + void *callback_data); + +/** + * sync_fence_wait() - wait on fence + * @fence: fence to wait on + * @tiemout: timeout in ms + * + * Wait for @fence to be signaled or have an error. Waits indefintly + * if @timeout = 0 + */ +int sync_fence_wait(struct sync_fence *fence, long timeout); + +/* useful for sync driver's debug print handlers */ +const char *sync_status_str(int status); + +#endif /* __KERNEL__ */ + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2: file descriptor of second fence + * @name: name of new fence + * @fence: returns the fd of the new fence to userspace + */ +struct sync_merge_data { + __s32 fd2; /* fd of second fence */ + char name[32]; /* name of new fence */ + __s32 fence; /* fd on newly created fence */ +}; + +#define SYNC_IOC_MAGIC '>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds. + */ +#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __u32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data. Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +#endif /* _LINUX_SYNC_H */ From ca7acf45347e0955d75940ff18accc7fafaf34e8 Mon Sep 17 00:00:00 2001 From: David Hays Date: Mon, 6 May 2013 23:14:07 -0500 Subject: [PATCH 096/117] vigor: enable sync Change-Id: I580a38884ef5f0fc685639737e6f5798322cf704 --- arch/arm/configs/vigor_aosp_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index bc5837fc..692d1c5d 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -1116,6 +1116,7 @@ CONFIG_EXTRA_FIRMWARE="" # CONFIG_SYS_HYPERVISOR is not set CONFIG_GENLOCK=y CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y # CONFIG_CONNECTOR is not set # CONFIG_MTD is not set # CONFIG_PARPORT is not set From 635b8f5bb568104602cffc15441179dd566a0fbd Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 14 Dec 2011 13:34:53 -0800 Subject: [PATCH 097/117] pmem: add support for reusable pmem Individual pmem devices can now be marked as reusable. The memory will be reused through fmem. Change-Id: Id71c2e0c42f093420c241771d7a2620ed2accfc4 Signed-off-by: Laura Abbott --- drivers/misc/pmem.c | 28 ++++++++++++++++++++-------- include/linux/android_pmem.h | 4 ++++ 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c index 2c7c2848..2732dced 100644 --- a/drivers/misc/pmem.c +++ b/drivers/misc/pmem.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -243,6 +244,10 @@ struct pmem_info { * map and unmap as needed */ int map_on_demand; + /* + * memory will be reused through fmem + */ + int reusable; }; #define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id) @@ -2874,19 +2879,26 @@ int pmem_setup(struct android_pmem_platform_data *pdata, pr_info("allocating %lu bytes at %p (%lx physical) for %s\n", pmem[id].size, pmem[id].vbase, pmem[id].base, pmem[id].name); - pmem[id].map_on_demand = pdata->map_on_demand; + pmem[id].reusable = pdata->reusable; + /* reusable pmem requires map on demand */ + pmem[id].map_on_demand = pdata->map_on_demand || pdata->reusable; if (pmem[id].map_on_demand) { - pmem_vma = get_vm_area(pmem[id].size, VM_IOREMAP); - if (!pmem_vma) { - pr_err("pmem: Failed to allocate virtual space for " + if (pmem[id].reusable) { + const struct fmem_data *fmem_info = fmem_get_info(); + pmem[id].area = fmem_info->area; + } else { + pmem_vma = get_vm_area(pmem[id].size, VM_IOREMAP); + if (!pmem_vma) { + pr_err("pmem: Failed to allocate virtual space for " "%s\n", pdata->name); - goto out_put_kobj; - } - pr_err("pmem: Reserving virtual address range %lx - %lx for" + goto out_put_kobj; + } + pr_err("pmem: Reserving virtual address range %lx - %lx for" " %s\n", (unsigned long) pmem_vma->addr, (unsigned long) pmem_vma->addr + pmem[id].size, pdata->name); - pmem[id].area = pmem_vma; + pmem[id].area = pmem_vma; + } } else pmem[id].area = NULL; diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h index 54763a79..532eef12 100644 --- a/include/linux/android_pmem.h +++ b/include/linux/android_pmem.h @@ -178,6 +178,10 @@ struct android_pmem_platform_data * indicates that this region should be mapped/unmaped as needed */ int map_on_demand; + /* + * indicates this pmem may be reused via fmem + */ + int reusable; }; int pmem_setup(struct android_pmem_platform_data *pdata, From 5a08fb59246d51c1af19fa6edb36576a49d08ddb Mon Sep 17 00:00:00 2001 From: David Hays Date: Wed, 8 May 2013 17:23:50 -0500 Subject: [PATCH 098/117] Include: linux/fmem.h Change-Id: Iea427b3ffd2b97808d0b71ef0f53c89401adda8d --- include/linux/fmem.h | 62 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 include/linux/fmem.h diff --git a/include/linux/fmem.h b/include/linux/fmem.h new file mode 100644 index 00000000..e4fa82cb --- /dev/null +++ b/include/linux/fmem.h @@ -0,0 +1,62 @@ +/* + * + * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _FMEM_H_ +#define _FMEM_H_ + +#include + +struct fmem_platform_data { + unsigned long phys; + unsigned long size; + unsigned long reserved_size_low; + unsigned long reserved_size_high; + unsigned long align; +}; + +struct fmem_data { + unsigned long phys; + void *virt; + struct vm_struct *area; + unsigned long size; + unsigned long reserved_size_low; + unsigned long reserved_size_high; +}; + +enum fmem_state { + FMEM_UNINITIALIZED = 0, + FMEM_C_STATE, + FMEM_T_STATE, + FMEM_O_STATE, +}; + +#ifdef CONFIG_QCACHE +struct fmem_data *fmem_get_info(void); +int fmem_set_state(enum fmem_state); +void lock_fmem_state(void); +void unlock_fmem_state(void); +void *fmem_map_virtual_area(int cacheability); +void fmem_unmap_virtual_area(void); +#else +static inline struct fmem_data *fmem_get_info(void) { return NULL; } +static inline int fmem_set_state(enum fmem_state f) { return -ENODEV; } +static inline void lock_fmem_state(void) { return; } +static inline void unlock_fmem_state(void) { return; } +static inline void *fmem_map_virtual_area(int cacheability) { return NULL; } +static inline void fmem_unmap_virtual_area(void) { return; } +#endif + +int request_fmem_c_region(void *unused); +int release_fmem_c_region(void *unused); +#endif From 354765ee04c68c60cb68504122cc796fe39cb4e3 Mon Sep 17 00:00:00 2001 From: David Hays Date: Thu, 9 May 2013 22:28:07 -0500 Subject: [PATCH 099/117] msm_fb: hdmi: Resolution modes on HDMI Add a new header file that lists all supported HDMI resolution modes and the associated timing information. Change-Id: Icd0b7bb2926742730c030ea505b1218b610f4c10 --- include/video/Kbuild | 1 + include/video/msm_hdmi_modes.h | 332 +++++++++++++++++++++++++++++++++ 2 files changed, 333 insertions(+) create mode 100644 include/video/msm_hdmi_modes.h diff --git a/include/video/Kbuild b/include/video/Kbuild index ad3e622c..53e13cbb 100644 --- a/include/video/Kbuild +++ b/include/video/Kbuild @@ -1,3 +1,4 @@ header-y += edid.h header-y += sisfb.h header-y += uvesafb.h +header-y += msm_hdmi_modes.h diff --git a/include/video/msm_hdmi_modes.h b/include/video/msm_hdmi_modes.h new file mode 100644 index 00000000..b9d38926 --- /dev/null +++ b/include/video/msm_hdmi_modes.h @@ -0,0 +1,332 @@ +#ifndef __MSM_HDMI_MODES_H__ +#define __MSM_HDMI_MODES_H__ +#include + +struct msm_hdmi_mode_timing_info { + uint32_t video_format; + uint32_t active_h; + uint32_t front_porch_h; + uint32_t pulse_width_h; + uint32_t back_porch_h; + uint32_t active_low_h; + uint32_t active_v; + uint32_t front_porch_v; + uint32_t pulse_width_v; + uint32_t back_porch_v; + uint32_t active_low_v; + /* Must divide by 1000 to get the actual frequency in MHZ */ + uint32_t pixel_freq; + /* Must divide by 1000 to get the actual frequency in HZ */ + uint32_t refresh_rate; + uint32_t interlaced; + uint32_t supported; +}; + +#define MSM_HDMI_MODES_CEA 1 +#define MSM_HDMI_MODES_XTND 2 +#define MSM_HDMI_MODES_DVI 4 +#define MSM_HDMI_MODES_ALL 7 + +/* all video formats defined by CEA 861D */ +#define HDMI_VFRMT_UNKNOWN 0 +#define HDMI_VFRMT_640x480p60_4_3 1 +#define HDMI_VFRMT_720x480p60_4_3 2 +#define HDMI_VFRMT_720x480p60_16_9 3 +#define HDMI_VFRMT_1280x720p60_16_9 4 +#define HDMI_VFRMT_1920x1080i60_16_9 5 +#define HDMI_VFRMT_720x480i60_4_3 6 +#define HDMI_VFRMT_1440x480i60_4_3 HDMI_VFRMT_720x480i60_4_3 +#define HDMI_VFRMT_720x480i60_16_9 7 +#define HDMI_VFRMT_1440x480i60_16_9 HDMI_VFRMT_720x480i60_16_9 +#define HDMI_VFRMT_720x240p60_4_3 8 +#define HDMI_VFRMT_1440x240p60_4_3 HDMI_VFRMT_720x240p60_4_3 +#define HDMI_VFRMT_720x240p60_16_9 9 +#define HDMI_VFRMT_1440x240p60_16_9 HDMI_VFRMT_720x240p60_16_9 +#define HDMI_VFRMT_2880x480i60_4_3 10 +#define HDMI_VFRMT_2880x480i60_16_9 11 +#define HDMI_VFRMT_2880x240p60_4_3 12 +#define HDMI_VFRMT_2880x240p60_16_9 13 +#define HDMI_VFRMT_1440x480p60_4_3 14 +#define HDMI_VFRMT_1440x480p60_16_9 15 +#define HDMI_VFRMT_1920x1080p60_16_9 16 +#define HDMI_VFRMT_720x576p50_4_3 17 +#define HDMI_VFRMT_720x576p50_16_9 18 +#define HDMI_VFRMT_1280x720p50_16_9 19 +#define HDMI_VFRMT_1920x1080i50_16_9 20 +#define HDMI_VFRMT_720x576i50_4_3 21 +#define HDMI_VFRMT_1440x576i50_4_3 HDMI_VFRMT_720x576i50_4_3 +#define HDMI_VFRMT_720x576i50_16_9 22 +#define HDMI_VFRMT_1440x576i50_16_9 HDMI_VFRMT_720x576i50_16_9 +#define HDMI_VFRMT_720x288p50_4_3 23 +#define HDMI_VFRMT_1440x288p50_4_3 HDMI_VFRMT_720x288p50_4_3 +#define HDMI_VFRMT_720x288p50_16_9 24 +#define HDMI_VFRMT_1440x288p50_16_9 HDMI_VFRMT_720x288p50_16_9 +#define HDMI_VFRMT_2880x576i50_4_3 25 +#define HDMI_VFRMT_2880x576i50_16_9 26 +#define HDMI_VFRMT_2880x288p50_4_3 27 +#define HDMI_VFRMT_2880x288p50_16_9 28 +#define HDMI_VFRMT_1440x576p50_4_3 29 +#define HDMI_VFRMT_1440x576p50_16_9 30 +#define HDMI_VFRMT_1920x1080p50_16_9 31 +#define HDMI_VFRMT_1920x1080p24_16_9 32 +#define HDMI_VFRMT_1920x1080p25_16_9 33 +#define HDMI_VFRMT_1920x1080p30_16_9 34 +#define HDMI_VFRMT_2880x480p60_4_3 35 +#define HDMI_VFRMT_2880x480p60_16_9 36 +#define HDMI_VFRMT_2880x576p50_4_3 37 +#define HDMI_VFRMT_2880x576p50_16_9 38 +#define HDMI_VFRMT_1920x1250i50_16_9 39 +#define HDMI_VFRMT_1920x1080i100_16_9 40 +#define HDMI_VFRMT_1280x720p100_16_9 41 +#define HDMI_VFRMT_720x576p100_4_3 42 +#define HDMI_VFRMT_720x576p100_16_9 43 +#define HDMI_VFRMT_720x576i100_4_3 44 +#define HDMI_VFRMT_1440x576i100_4_3 HDMI_VFRMT_720x576i100_4_3 +#define HDMI_VFRMT_720x576i100_16_9 45 +#define HDMI_VFRMT_1440x576i100_16_9 HDMI_VFRMT_720x576i100_16_9 +#define HDMI_VFRMT_1920x1080i120_16_9 46 +#define HDMI_VFRMT_1280x720p120_16_9 47 +#define HDMI_VFRMT_720x480p120_4_3 48 +#define HDMI_VFRMT_720x480p120_16_9 49 +#define HDMI_VFRMT_720x480i120_4_3 50 +#define HDMI_VFRMT_1440x480i120_4_3 HDMI_VFRMT_720x480i120_4_3 +#define HDMI_VFRMT_720x480i120_16_9 51 +#define HDMI_VFRMT_1440x480i120_16_9 HDMI_VFRMT_720x480i120_16_9 +#define HDMI_VFRMT_720x576p200_4_3 52 +#define HDMI_VFRMT_720x576p200_16_9 53 +#define HDMI_VFRMT_720x576i200_4_3 54 +#define HDMI_VFRMT_1440x576i200_4_3 HDMI_VFRMT_720x576i200_4_3 +#define HDMI_VFRMT_720x576i200_16_9 55 +#define HDMI_VFRMT_1440x576i200_16_9 HDMI_VFRMT_720x576i200_16_9 +#define HDMI_VFRMT_720x480p240_4_3 56 +#define HDMI_VFRMT_720x480p240_16_9 57 +#define HDMI_VFRMT_720x480i240_4_3 58 +#define HDMI_VFRMT_1440x480i240_4_3 HDMI_VFRMT_720x480i240_4_3 +#define HDMI_VFRMT_720x480i240_16_9 59 +#define HDMI_VFRMT_1440x480i240_16_9 HDMI_VFRMT_720x480i240_16_9 +#define HDMI_VFRMT_1280x720p24_16_9 60 +#define HDMI_VFRMT_1280x720p25_16_9 61 +#define HDMI_VFRMT_1280x720p30_16_9 62 +#define HDMI_VFRMT_1920x1080p120_16_9 63 +#define HDMI_VFRMT_1920x1080p100_16_9 64 +/* Video Identification Codes from 65-127 are reserved for the future */ +#define HDMI_VFRMT_END 127 + +/* extended video formats */ +#define HDMI_VFRMT_3840x2160p30_16_9 (HDMI_VFRMT_END + 1) +#define HDMI_VFRMT_3840x2160p25_16_9 (HDMI_VFRMT_END + 2) +#define HDMI_VFRMT_3840x2160p24_16_9 (HDMI_VFRMT_END + 3) +#define HDMI_VFRMT_4096x2160p24_16_9 (HDMI_VFRMT_END + 4) +#define HDMI_EVFRMT_END HDMI_VFRMT_4096x2160p24_16_9 + +/* VESA DMT TIMINGS */ +#define HDMI_VFRMT_2560x1600p60_16_9 (HDMI_EVFRMT_END + 1) +#define HDMI_VFRMT_1280x1024p60_5_4 (HDMI_EVFRMT_END + 2) +#define VESA_DMT_VFRMT_END HDMI_VFRMT_1280x1024p60_5_4 +#define HDMI_VFRMT_MAX (VESA_DMT_VFRMT_END + 1) +#define HDMI_VFRMT_FORCE_32BIT 0x7FFFFFFF + +/* Timing information for supported modes */ +#define VFRMT_NOT_SUPPORTED(VFRMT) \ + {VFRMT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, false} + +#define HDMI_VFRMT_640x480p60_4_3_TIMING \ + {HDMI_VFRMT_640x480p60_4_3, 640, 16, 96, 48, true, \ + 480, 10, 2, 33, true, 25200, 60000, false, true} +#define HDMI_VFRMT_720x480p60_4_3_TIMING \ + {HDMI_VFRMT_720x480p60_4_3, 720, 16, 62, 60, true, \ + 480, 9, 6, 30, true, 27030, 60000, false, true} +#define HDMI_VFRMT_720x480p60_16_9_TIMING \ + {HDMI_VFRMT_720x480p60_16_9, 720, 16, 62, 60, true, \ + 480, 9, 6, 30, true, 27030, 60000, false, true} +#define HDMI_VFRMT_1280x720p60_16_9_TIMING \ + {HDMI_VFRMT_1280x720p60_16_9, 1280, 110, 40, 220, false, \ + 720, 5, 5, 20, false, 74250, 60000, false, true} +#define HDMI_VFRMT_1920x1080i60_16_9_TIMING \ + {HDMI_VFRMT_1920x1080i60_16_9, 1920, 88, 44, 148, false, \ + 540, 2, 5, 5, false, 74250, 60000, false, true} +#define HDMI_VFRMT_1440x480i60_4_3_TIMING \ + {HDMI_VFRMT_1440x480i60_4_3, 1440, 38, 124, 114, true, \ + 240, 4, 3, 15, true, 27000, 60000, true, true} +#define HDMI_VFRMT_1440x480i60_16_9_TIMING \ + {HDMI_VFRMT_1440x480i60_16_9, 1440, 38, 124, 114, true, \ + 240, 4, 3, 15, true, 27000, 60000, true, true} +#define HDMI_VFRMT_1920x1080p60_16_9_TIMING \ + {HDMI_VFRMT_1920x1080p60_16_9, 1920, 88, 44, 148, false, \ + 1080, 4, 5, 36, false, 148500, 60000, false, true} +#define HDMI_VFRMT_720x576p50_4_3_TIMING \ + {HDMI_VFRMT_720x576p50_4_3, 720, 12, 64, 68, true, \ + 576, 5, 5, 39, true, 27000, 50000, false, true} +#define HDMI_VFRMT_720x576p50_16_9_TIMING \ + {HDMI_VFRMT_720x576p50_16_9, 720, 12, 64, 68, true, \ + 576, 5, 5, 39, true, 27000, 50000, false, true} +#define HDMI_VFRMT_1280x720p50_16_9_TIMING \ + {HDMI_VFRMT_1280x720p50_16_9, 1280, 440, 40, 220, false, \ + 720, 5, 5, 20, false, 74250, 50000, false, true} +#define HDMI_VFRMT_1440x576i50_4_3_TIMING \ + {HDMI_VFRMT_1440x576i50_4_3, 1440, 24, 126, 138, true, \ + 288, 2, 3, 19, true, 27000, 50000, true, true} +#define HDMI_VFRMT_1440x576i50_16_9_TIMING \ + {HDMI_VFRMT_1440x576i50_16_9, 1440, 24, 126, 138, true, \ + 288, 2, 3, 19, true, 27000, 50000, true, true} +#define HDMI_VFRMT_1920x1080p50_16_9_TIMING \ + {HDMI_VFRMT_1920x1080p50_16_9, 1920, 528, 44, 148, false, \ + 1080, 4, 5, 36, false, 148500, 50000, false, true} +#define HDMI_VFRMT_1920x1080p24_16_9_TIMING \ + {HDMI_VFRMT_1920x1080p24_16_9, 1920, 638, 44, 148, false, \ + 1080, 4, 5, 36, false, 74250, 24000, false, true} +#define HDMI_VFRMT_1920x1080p25_16_9_TIMING \ + {HDMI_VFRMT_1920x1080p25_16_9, 1920, 528, 44, 148, false, \ + 1080, 4, 5, 36, false, 74250, 25000, false, true} +#define HDMI_VFRMT_1920x1080p30_16_9_TIMING \ + {HDMI_VFRMT_1920x1080p30_16_9, 1920, 88, 44, 148, false, \ + 1080, 4, 5, 36, false, 74250, 30000, false, true} +#define HDMI_VFRMT_1280x1024p60_5_4_TIMING \ + {HDMI_VFRMT_1280x1024p60_5_4, 1280, 48, 112, 248, false, \ + 1024, 1, 3, 38, false, 108000, 60000, false, true} +#define HDMI_VFRMT_2560x1600p60_16_9_TIMING \ + {HDMI_VFRMT_2560x1600p60_16_9, 2560, 48, 32, 80, false, \ + 1600, 3, 6, 37, false, 268500, 60000, false, true} +#define HDMI_VFRMT_3840x2160p30_16_9_TIMING \ + {HDMI_VFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false, \ + 2160, 8, 10, 72, false, 297000, 30000, false, true} +#define HDMI_VFRMT_3840x2160p25_16_9_TIMING \ + {HDMI_VFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false, \ + 2160, 8, 10, 72, false, 297000, 25000, false, true} +#define HDMI_VFRMT_3840x2160p24_16_9_TIMING \ + {HDMI_VFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false, \ + 2160, 8, 10, 72, false, 297000, 24000, false, true} +#define HDMI_VFRMT_4096x2160p24_16_9_TIMING \ + {HDMI_VFRMT_4096x2160p24_16_9, 4096, 1020, 88, 296, false, \ + 2160, 8, 10, 72, false, 297000, 24000, false, true} + +#define MSM_HDMI_MODES_SET_TIMING(LUT, MODE) do { \ + struct msm_hdmi_mode_timing_info mode = MODE##_TIMING; \ + LUT[MODE] = mode;\ + } while (0) + +static inline void MSM_HDMI_MODES_INIT_TIMINGS( + struct msm_hdmi_mode_timing_info *lut) +{ + int i; + + for (i = 0; i < HDMI_VFRMT_MAX; i++) { + struct msm_hdmi_mode_timing_info mode = VFRMT_NOT_SUPPORTED(i); + lut[i] = mode; + } +} + +static inline void MSM_HDMI_MODES_SET_SUPP_TIMINGS( + struct msm_hdmi_mode_timing_info *lut, int type) +{ + if (type & MSM_HDMI_MODES_CEA) { + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_640x480p60_4_3); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_720x480p60_4_3); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_720x480p60_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1280x720p60_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1920x1080i60_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1440x480i60_4_3); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1440x480i60_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1920x1080p60_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_720x576p50_4_3); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_720x576p50_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1280x720p50_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1440x576i50_4_3); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1440x576i50_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1920x1080p50_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1920x1080p24_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1920x1080p25_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1920x1080p30_16_9); + } + + if (type & MSM_HDMI_MODES_XTND) { + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_3840x2160p30_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_3840x2160p25_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_3840x2160p24_16_9); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_4096x2160p24_16_9); + } + + if (type & MSM_HDMI_MODES_DVI) { + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_1280x1024p60_5_4); + MSM_HDMI_MODES_SET_TIMING(lut, HDMI_VFRMT_2560x1600p60_16_9); + } +} + +static inline const char *msm_hdmi_mode_2string(uint32_t mode) +{ + switch (mode) { + case HDMI_VFRMT_UNKNOWN: return "Unknown"; + case HDMI_VFRMT_640x480p60_4_3: return "640x480 p60 4/3"; + case HDMI_VFRMT_720x480p60_4_3: return "720x480 p60 4/3"; + case HDMI_VFRMT_720x480p60_16_9: return "720x480 p60 16/9"; + case HDMI_VFRMT_1280x720p60_16_9: return "1280x 720 p60 16/9"; + case HDMI_VFRMT_1920x1080i60_16_9: return "1920x1080 i60 16/9"; + case HDMI_VFRMT_1440x480i60_4_3: return "1440x480 i60 4/3"; + case HDMI_VFRMT_1440x480i60_16_9: return "1440x480 i60 16/9"; + case HDMI_VFRMT_1440x240p60_4_3: return "1440x240 p60 4/3"; + case HDMI_VFRMT_1440x240p60_16_9: return "1440x240 p60 16/9"; + case HDMI_VFRMT_2880x480i60_4_3: return "2880x480 i60 4/3"; + case HDMI_VFRMT_2880x480i60_16_9: return "2880x480 i60 16/9"; + case HDMI_VFRMT_2880x240p60_4_3: return "2880x240 p60 4/3"; + case HDMI_VFRMT_2880x240p60_16_9: return "2880x240 p60 16/9"; + case HDMI_VFRMT_1440x480p60_4_3: return "1440x480 p60 4/3"; + case HDMI_VFRMT_1440x480p60_16_9: return "1440x480 p60 16/9"; + case HDMI_VFRMT_1920x1080p60_16_9: return "1920x1080 p60 16/9"; + case HDMI_VFRMT_720x576p50_4_3: return "720x576 p50 4/3"; + case HDMI_VFRMT_720x576p50_16_9: return "720x576 p50 16/9"; + case HDMI_VFRMT_1280x720p50_16_9: return "1280x720 p50 16/9"; + case HDMI_VFRMT_1920x1080i50_16_9: return "1920x1080 i50 16/9"; + case HDMI_VFRMT_1440x576i50_4_3: return "1440x576 i50 4/3"; + case HDMI_VFRMT_1440x576i50_16_9: return "1440x576 i50 16/9"; + case HDMI_VFRMT_1440x288p50_4_3: return "1440x288 p50 4/3"; + case HDMI_VFRMT_1440x288p50_16_9: return "1440x288 p50 16/9"; + case HDMI_VFRMT_2880x576i50_4_3: return "2880x576 i50 4/3"; + case HDMI_VFRMT_2880x576i50_16_9: return "2880x576 i50 16/9"; + case HDMI_VFRMT_2880x288p50_4_3: return "2880x288 p50 4/3"; + case HDMI_VFRMT_2880x288p50_16_9: return "2880x288 p50 16/9"; + case HDMI_VFRMT_1440x576p50_4_3: return "1440x576 p50 4/3"; + case HDMI_VFRMT_1440x576p50_16_9: return "1440x576 p50 16/9"; + case HDMI_VFRMT_1920x1080p50_16_9: return "1920x1080 p50 16/9"; + case HDMI_VFRMT_1920x1080p24_16_9: return "1920x1080 p24 16/9"; + case HDMI_VFRMT_1920x1080p25_16_9: return "1920x1080 p25 16/9"; + case HDMI_VFRMT_1920x1080p30_16_9: return "1920x1080 p30 16/9"; + case HDMI_VFRMT_2880x480p60_4_3: return "2880x480 p60 4/3"; + case HDMI_VFRMT_2880x480p60_16_9: return "2880x480 p60 16/9"; + case HDMI_VFRMT_2880x576p50_4_3: return "2880x576 p50 4/3"; + case HDMI_VFRMT_2880x576p50_16_9: return "2880x576 p50 16/9"; + case HDMI_VFRMT_1920x1250i50_16_9: return "1920x1250 i50 16/9"; + case HDMI_VFRMT_1920x1080i100_16_9: return "1920x1080 i100 16/9"; + case HDMI_VFRMT_1280x720p100_16_9: return "1280x720 p100 16/9"; + case HDMI_VFRMT_720x576p100_4_3: return "720x576 p100 4/3"; + case HDMI_VFRMT_720x576p100_16_9: return "720x576 p100 16/9"; + case HDMI_VFRMT_1440x576i100_4_3: return "1440x576 i100 4/3"; + case HDMI_VFRMT_1440x576i100_16_9: return "1440x576 i100 16/9"; + case HDMI_VFRMT_1920x1080i120_16_9: return "1920x1080 i120 16/9"; + case HDMI_VFRMT_1280x720p120_16_9: return "1280x720 p120 16/9"; + case HDMI_VFRMT_720x480p120_4_3: return "720x480 p120 4/3"; + case HDMI_VFRMT_720x480p120_16_9: return "720x480 p120 16/9"; + case HDMI_VFRMT_1440x480i120_4_3: return "1440x480 i120 4/3"; + case HDMI_VFRMT_1440x480i120_16_9: return "1440x480 i120 16/9"; + case HDMI_VFRMT_720x576p200_4_3: return "720x576 p200 4/3"; + case HDMI_VFRMT_720x576p200_16_9: return "720x576 p200 16/9"; + case HDMI_VFRMT_1440x576i200_4_3: return "1440x576 i200 4/3"; + case HDMI_VFRMT_1440x576i200_16_9: return "1440x576 i200 16/9"; + case HDMI_VFRMT_720x480p240_4_3: return "720x480 p240 4/3"; + case HDMI_VFRMT_720x480p240_16_9: return "720x480 p240 16/9"; + case HDMI_VFRMT_1440x480i240_4_3: return "1440x480 i240 4/3"; + case HDMI_VFRMT_1440x480i240_16_9: return "1440x480 i240 16/9"; + case HDMI_VFRMT_1280x720p24_16_9: return "1280x720 p24 16/9"; + case HDMI_VFRMT_1280x720p25_16_9: return "1280x720 p25 16/9"; + case HDMI_VFRMT_1280x720p30_16_9: return "1280x720 p30 16/9"; + case HDMI_VFRMT_1920x1080p120_16_9: return "1920x1080 p120 16/9"; + case HDMI_VFRMT_1920x1080p100_16_9: return "1920x1080 p100 16/9"; + case HDMI_VFRMT_3840x2160p30_16_9: return "3840x2160 p30 16/9"; + case HDMI_VFRMT_3840x2160p25_16_9: return "3840x2160 p25 16/9"; + case HDMI_VFRMT_3840x2160p24_16_9: return "3840x2160 p24 16/9"; + case HDMI_VFRMT_4096x2160p24_16_9: return "4096x2160 p24 16/9"; + case HDMI_VFRMT_2560x1600p60_16_9: return "2560x1600 p60 16/9"; + case HDMI_VFRMT_1280x1024p60_5_4: return "1280x1042 p60 5/4"; + default: return "???"; + } +} +#endif /* __MSM_HDMI_MODES_H__ */ From 7df8be791755906937ad401b14814f0d5fb3581c Mon Sep 17 00:00:00 2001 From: David Hays Date: Mon, 13 May 2013 13:10:18 -0500 Subject: [PATCH 100/117] Added USB OTG and USB Headphone support Change-Id: I511a9ea325e6c369432c998a4934447ce8a75eef --- arch/arm/mach-msm/board-vigor.c | 2 + arch/arm/mach-msm/devices-msm8x60.c | 3 +- drivers/usb/misc/ehset.c | 147 ++++++++++++++++++++++++++++ sound/usb/card.c | 13 +++ 4 files changed, 164 insertions(+), 1 deletion(-) create mode 100644 drivers/usb/misc/ehset.c diff --git a/arch/arm/mach-msm/board-vigor.c b/arch/arm/mach-msm/board-vigor.c index bae59260..aa6af978 100644 --- a/arch/arm/mach-msm/board-vigor.c +++ b/arch/arm/mach-msm/board-vigor.c @@ -1415,6 +1415,7 @@ static int vigor_phy_init_seq[] = { 0x06, 0x36, 0x0C, 0x31, 0x31, 0x32, 0x1, 0x0 static struct msm_otg_platform_data msm_otg_pdata = { .phy_init_seq = vigor_phy_init_seq, .mode = USB_PERIPHERAL, + .mode = USB_OTG, .otg_control = OTG_PMIC_CONTROL, .phy_type = CI_45NM_INTEGRATED_PHY, .vbus_power = msm_hsusb_vbus_power, @@ -7539,6 +7540,7 @@ static struct platform_device *vigor_devices[] __initdata = { #endif #if defined(CONFIG_USB_GADGET_MSM_72K) || defined(CONFIG_USB_EHCI_HCD) + &msm_device_hsusb_host, &msm_device_otg, #endif #ifdef CONFIG_BATTERY_MSM diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c index 46f7d668..9add2731 100644 --- a/arch/arm/mach-msm/devices-msm8x60.c +++ b/arch/arm/mach-msm/devices-msm8x60.c @@ -1904,7 +1904,7 @@ struct platform_device msm_device_gadget_peripheral = { .coherent_dma_mask = 0xffffffffULL, }, }; -#ifdef CONFIG_USB_EHCI_MSM_72K + static struct resource resources_hsusb_host[] = { { .start = 0x12500000, @@ -1929,6 +1929,7 @@ struct platform_device msm_device_hsusb_host = { }, }; +#ifdef CONFIG_USB_EHCI_MSM_72K static struct platform_device *msm_host_devices[] = { &msm_device_hsusb_host, }; diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c new file mode 100644 index 00000000..30879e01 --- /dev/null +++ b/drivers/usb/misc/ehset.c @@ -0,0 +1,147 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#include +#include +#include +#include +#include +#include +#include + +#define TEST_SE0_NAK_PID 0x0101 +#define TEST_J_PID 0x0102 +#define TEST_K_PID 0x0103 +#define TEST_PACKET_PID 0x0104 +#define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106 +#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107 +#define TEST_SINGLE_STEP_SET_FEATURE 0x0108 + +static int ehset_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + int status = -1; + struct usb_device *dev = interface_to_usbdev(intf); + struct usb_device *rh_udev = dev->bus->root_hub; + struct usb_device *hub_udev = dev->parent; + int port1 = dev->portnum; + int test_mode = le16_to_cpu(dev->descriptor.idProduct); + + switch (test_mode) { + case TEST_SE0_NAK_PID: + status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), + USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, + (3 << 8) | port1, NULL, 0, 1000); + break; + case TEST_J_PID: + status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), + USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, + (1 << 8) | port1, NULL, 0, 1000); + break; + case TEST_K_PID: + status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), + USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, + (2 << 8) | port1, NULL, 0, 1000); + break; + case TEST_PACKET_PID: + status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), + USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, + (4 << 8) | port1, NULL, 0, 1000); + break; + case TEST_HS_HOST_PORT_SUSPEND_RESUME: + /* Test: wait for 15secs -> suspend -> 15secs delay -> resume */ + msleep(15 * 1000); + status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), + USB_REQ_SET_FEATURE, USB_RT_PORT, + USB_PORT_FEAT_SUSPEND, port1, NULL, 0, 1000); + if (status < 0) + break; + msleep(15 * 1000); + status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), + USB_REQ_CLEAR_FEATURE, USB_RT_PORT, + USB_PORT_FEAT_SUSPEND, port1, NULL, 0, 1000); + break; + case TEST_SINGLE_STEP_GET_DEV_DESC: + /* Test: wait for 15secs -> GetDescriptor request */ + msleep(15 * 1000); + { + struct usb_device_descriptor *buf; + buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, + USB_DT_DEVICE << 8, 0, + buf, USB_DT_DEVICE_SIZE, + USB_CTRL_GET_TIMEOUT); + kfree(buf); + } + break; + case TEST_SINGLE_STEP_SET_FEATURE: + /* GetDescriptor's SETUP request -> 15secs delay -> IN & STATUS + * Issue request to ehci root hub driver with portnum = 1 + */ + status = usb_control_msg(rh_udev, usb_sndctrlpipe(rh_udev, 0), + USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, + (6 << 8) | 1, NULL, 0, 60 * 1000); + + break; + default: + pr_err("%s: undefined test mode ( %X )\n", __func__, test_mode); + return -EINVAL; + } + + return (status < 0) ? status : 0; +} + +static void ehset_disconnect(struct usb_interface *intf) +{ +} + +static struct usb_device_id ehset_id_table[] = { + { USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) }, + { USB_DEVICE(0x1a0a, TEST_J_PID) }, + { USB_DEVICE(0x1a0a, TEST_K_PID) }, + { USB_DEVICE(0x1a0a, TEST_PACKET_PID) }, + { USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) }, + { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) }, + { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, ehset_id_table); + +static struct usb_driver ehset_driver = { + .name = "usb_ehset_test", + .probe = ehset_probe, + .disconnect = ehset_disconnect, + .id_table = ehset_id_table, +}; + +static int __init ehset_init(void) +{ + return usb_register(&ehset_driver); +} + +static void __exit ehset_exit(void) +{ + usb_deregister(&ehset_driver); +} + +module_init(ehset_init); +module_exit(ehset_exit); + +MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture"); +MODULE_LICENSE("GPL v2"); diff --git a/sound/usb/card.c b/sound/usb/card.c index 8d0a3c14..26127c8c 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -85,6 +86,7 @@ static int nrpacks = 8; /* max. number of packets per urb */ static int async_unlink = 1; static int device_setup[SNDRV_CARDS]; /* device parameter for this card */ static int ignore_ctl_error; +struct switch_dev *usbaudiosdev; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the USB audio adapter."); @@ -418,6 +420,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx, } snd_usb_audio_create_proc(chip); + switch_set_state(usbaudiosdev, 1); *rchip = chip; return 0; @@ -580,6 +583,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) } else { mutex_unlock(®ister_mutex); } + switch_set_state(usbaudiosdev, 0); } /* @@ -716,16 +720,25 @@ static struct usb_driver usb_audio_driver = { static int __init snd_usb_audio_init(void) { + int err; if (nrpacks < 1 || nrpacks > MAX_PACKS) { printk(KERN_WARNING "invalid nrpacks value.\n"); return -EINVAL; } + usbaudiosdev = kzalloc(sizeof(usbaudiosdev), GFP_KERNEL); + usbaudiosdev->name = "usb_audio"; + err = switch_dev_register(usbaudiosdev); + if (err) + pr_err("Usb-audio switch registration failed\n"); + else + pr_debug("usb hs_detected\n"); return usb_register(&usb_audio_driver); } static void __exit snd_usb_audio_cleanup(void) { usb_deregister(&usb_audio_driver); + kfree(usbaudiosdev); } module_init(snd_usb_audio_init); From bce395881ba55a05ba111a819f781b8aa7975552 Mon Sep 17 00:00:00 2001 From: Padmanabhan Komanduru Date: Fri, 16 Mar 2012 11:32:23 +0530 Subject: [PATCH 101/117] msm_fb: Clean up the fix for error in y-offset check in FB driver There is a failure observed in yres_virtual offset check and every third frame gets skipped without an error message. Due to this high fps is noticed in the test applications. Correct the yres_virtual offset check. CRs-fixed: 325134 Signed-off-by: Padmanabhan Komanduru (cherry picked from commit 2bd101d5bed1729b317114456360605e86289c15) Change-Id: I35af570df2d6d2609562203f30f3caa1506b9b1d Signed-off-by: Srivalli Oguri --- drivers/video/msm/msm_fb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index 28607cc8..d9df551a 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -1405,7 +1405,8 @@ static int msm_fb_register(struct msm_fb_data_type *mfd) var->xres = panel_info->xres; var->yres = panel_info->yres; var->xres_virtual = ALIGN(panel_info->xres, 32); - var->yres_virtual = panel_info->yres * mfd->fb_page; + var->yres_virtual = panel_info->yres * mfd->fb_page + + ((PAGE_SIZE - remainder)/fix->line_length) * mfd->fb_page; var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */ /* * id field for fb app From 482663450e61533db6f31faeed51a157844726fe Mon Sep 17 00:00:00 2001 From: Padmanabhan Komanduru Date: Thu, 12 Apr 2012 11:33:14 +0530 Subject: [PATCH 102/117] msm_fb: Make buffers aligned to 4K page boundary This change makes the frame buffers aligned to the page boundary. Improves GPU performance since accessing the frame buffer from page boundary is faster. Change-Id: I17879437dd7cbb9a1758d82321c6e68dbd88b9e0 CRs-Fixed: 325134 Signed-off-by: Padmanabhan Komanduru --- drivers/video/msm/mdp4_overlay_dsi_video.c | 49 +++++++++++++++++-- drivers/video/msm/mdp4_overlay_lcdc.c | 49 +++++++++++++++++-- drivers/video/msm/mdp_dma.c | 27 ++++++++++- drivers/video/msm/mdp_dma_tv.c | 55 ++++++++++++++++++++-- drivers/video/msm/msm_fb.c | 25 ++++++++++ 5 files changed, 190 insertions(+), 15 deletions(-) diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c index 29643c06..1488c7ca 100644 --- a/drivers/video/msm/mdp4_overlay_dsi_video.c +++ b/drivers/video/msm/mdp4_overlay_dsi_video.c @@ -85,13 +85,16 @@ int mdp4_dsi_video_on(struct platform_device *pdev) int hsync_end_x; uint8 *buf; int bpp, ptype; + int yres, remainder; struct fb_info *fbi; struct fb_var_screeninfo *var; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *pipe; + struct msm_panel_info *panel_info; int ret; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); + panel_info = &mfd->panel_info; if (!mfd) return -ENODEV; @@ -102,10 +105,28 @@ int mdp4_dsi_video_on(struct platform_device *pdev) fbi = mfd->fbi; var = &fbi->var; + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } + + if (!remainder) + remainder = PAGE_SIZE; + bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - buf += fbi->var.xoffset * bpp + - fbi->var.yoffset * fbi->fix.line_length; + if (fbi->var.yoffset < yres) { + buf += fbi->var.xoffset * bpp; + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + buf += fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder; + } else { + buf += fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); + } if (dsi_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); @@ -464,16 +485,36 @@ void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; + int yres, remainder; struct mdp4_overlay_pipe *pipe; + struct msm_panel_info *panel_info = &mfd->panel_info; if (!mfd->panel_power_on) return; + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } + + if (!remainder) + remainder = PAGE_SIZE; + /* no need to power on cmd block since it's dsi mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - buf += fbi->var.xoffset * bpp + - fbi->var.yoffset * fbi->fix.line_length; + if (fbi->var.yoffset < yres) { + buf += fbi->var.xoffset * bpp; + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + buf += fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder; + } else { + buf += fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); + } mutex_lock(&mfd->dma->ov_mutex); diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c index d291ca7f..61f099d0 100644 --- a/drivers/video/msm/mdp4_overlay_lcdc.c +++ b/drivers/video/msm/mdp4_overlay_lcdc.c @@ -92,8 +92,11 @@ int mdp_lcdc_on(struct platform_device *pdev) struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *pipe; int ret; + int yres, remainder; + struct msm_panel_info *panel_info; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); + panel_info = &mfd->panel_info; if (!mfd) return -ENODEV; @@ -104,6 +107,17 @@ int mdp_lcdc_on(struct platform_device *pdev) fbi = mfd->fbi; var = &fbi->var; + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } + + if (!remainder) + remainder = PAGE_SIZE; + /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); if (is_mdp4_hw_reset()) { @@ -113,8 +127,15 @@ int mdp_lcdc_on(struct platform_device *pdev) bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - buf += fbi->var.xoffset * bpp + - fbi->var.yoffset * fbi->fix.line_length; + if (fbi->var.yoffset < yres) { + buf += fbi->var.xoffset * bpp; + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + buf += fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder; + } else { + buf += fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); + } if (lcdc_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); @@ -380,16 +401,36 @@ void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; + int yres, remainder; struct mdp4_overlay_pipe *pipe; + struct msm_panel_info *panel_info = &mfd->panel_info; if (!mfd->panel_power_on) return; + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } + + if (!remainder) + remainder = PAGE_SIZE; + /* no need to power on cmd block since it's lcdc mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - buf += fbi->var.xoffset * bpp + - fbi->var.yoffset * fbi->fix.line_length; + if (fbi->var.yoffset < yres) { + buf += fbi->var.xoffset * bpp; + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + buf += fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder; + } else { + buf += fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); + } mutex_lock(&mfd->dma->ov_mutex); diff --git a/drivers/video/msm/mdp_dma.c b/drivers/video/msm/mdp_dma.c index 9d83bf83..07f33aa7 100644 --- a/drivers/video/msm/mdp_dma.c +++ b/drivers/video/msm/mdp_dma.c @@ -464,14 +464,37 @@ void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty, boolean sync) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; + struct fb_info *fbi = mfd->fbi; + struct msm_panel_info *panel_info = &mfd->panel_info; MDPIBUF *iBuf; int bpp = info->var.bits_per_pixel / 8; + int yres, remainder; + + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } + + if (!remainder) + remainder = PAGE_SIZE; down(&mfd->sem); + iBuf = &mfd->ibuf; iBuf->buf = (uint8 *) info->fix.smem_start; - iBuf->buf += info->var.xoffset * bpp + - info->var.yoffset * info->fix.line_length; + + if (fbi->var.yoffset < yres) { + iBuf->buf += fbi->var.xoffset * bpp; + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + iBuf->buf += fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder; + } else { + iBuf->buf += fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); + } iBuf->ibuf_width = info->var.xres_virtual; iBuf->bpp = bpp; diff --git a/drivers/video/msm/mdp_dma_tv.c b/drivers/video/msm/mdp_dma_tv.c index 1874c33f..efb06e3c 100644 --- a/drivers/video/msm/mdp_dma_tv.c +++ b/drivers/video/msm/mdp_dma_tv.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2009, 2012 Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,12 +41,15 @@ int mdp_dma3_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; + struct msm_panel_info *panel_info; struct fb_info *fbi; uint8 *buf; int bpp; int ret = 0; + int yres, remainder; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); + panel_info = &mfd->panel_info; if (!mfd) return -ENODEV; @@ -55,13 +58,33 @@ int mdp_dma3_on(struct platform_device *pdev) return -EINVAL; fbi = mfd->fbi; + + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } + + if (!remainder) + remainder = PAGE_SIZE; + /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - buf += fbi->var.xoffset * bpp + - fbi->var.yoffset * fbi->fix.line_length; + + if (fbi->var.yoffset < yres) { + buf += fbi->var.xoffset * bpp; + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + buf += fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder; + } else { + buf += fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); + } /* starting address[31..8] of Video frame buffer is CS0 */ MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3); @@ -114,15 +137,37 @@ void mdp_dma3_update(struct msm_fb_data_type *mfd) uint8 *buf; int bpp; unsigned long flag; + int yres, remainder; + struct msm_panel_info *panel_info = &mfd->panel_info; if (!mfd->panel_power_on) return; + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; + } + + if (!remainder) + remainder = PAGE_SIZE; + /* no need to power on cmd block since dma3 is running */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - buf += fbi->var.xoffset * bpp + - fbi->var.yoffset * fbi->fix.line_length; + + if (fbi->var.yoffset < yres) { + buf += fbi->var.xoffset * bpp; + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + buf += fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder; + } else { + buf += fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); + } + MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3); spin_lock_irqsave(&mdp_spin_lock, flag); diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index d9df551a..69ad26d9 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -1241,6 +1241,7 @@ static int msm_fb_register(struct msm_fb_data_type *mfd) struct fb_var_screeninfo *var; int *id; int fbram_offset; + int remainder, remainder_mode2; /* * fb info initialization @@ -1378,6 +1379,30 @@ static int msm_fb_register(struct msm_fb_data_type *mfd) fix->line_length = msm_fb_line_length(mfd->index, panel_info->xres, bpp); + + /* Make sure all buffers can be addressed on a page boundary by an x + * and y offset */ + + remainder = (fix->line_length * panel_info->yres) % PAGE_SIZE; + if (!remainder) + remainder = PAGE_SIZE; + remainder_mode2 = (fix->line_length * + panel_info->mode2_yres) % PAGE_SIZE; + if (!remainder_mode2) + remainder_mode2 = PAGE_SIZE; + + /* calculate smem_len based on max size of two supplied modes */ + fix->smem_len = MAX((msm_fb_line_length(mfd->index, panel_info->xres, + bpp) * + panel_info->yres + PAGE_SIZE - + remainder) * mfd->fb_page, + (msm_fb_line_length(mfd->index, + panel_info->mode2_xres, + bpp) * + panel_info->mode2_yres + PAGE_SIZE - + remainder_mode2) * mfd->fb_page); + + /* calculate smem_len based on max size of two supplied modes */ fix->smem_len = roundup(MAX(msm_fb_line_length(mfd->index, ALIGN(panel_info->xres, 32), From b435079fe0e9da796d049fc1a8324f5648c92a41 Mon Sep 17 00:00:00 2001 From: Padmanabhan Komanduru Date: Tue, 21 Feb 2012 12:00:12 +0530 Subject: [PATCH 103/117] msm_fb: Clean up of frame buffer 4KB alignment changes Remove redundant code from the 4 KB alignment patch and reduce the overhead of calculation of the hole size by using bitwise operators in its calculation. Change-Id: I1706484249bf8e01b250e91fa87e00f6021d9a72 CRs-fixed: 325134 Signed-off-by: Padmanabhan Komanduru --- drivers/video/msm/mdp4_overlay_dsi_video.c | 47 +--------------------- drivers/video/msm/mdp4_overlay_lcdc.c | 47 +--------------------- drivers/video/msm/mdp_dma.c | 23 +---------- drivers/video/msm/mdp_dma_tv.c | 47 +--------------------- drivers/video/msm/msm_fb.c | 35 +++++++++++++++- drivers/video/msm/msm_fb.h | 1 + 6 files changed, 41 insertions(+), 159 deletions(-) diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c index 1488c7ca..e6791d05 100644 --- a/drivers/video/msm/mdp4_overlay_dsi_video.c +++ b/drivers/video/msm/mdp4_overlay_dsi_video.c @@ -85,16 +85,13 @@ int mdp4_dsi_video_on(struct platform_device *pdev) int hsync_end_x; uint8 *buf; int bpp, ptype; - int yres, remainder; struct fb_info *fbi; struct fb_var_screeninfo *var; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *pipe; - struct msm_panel_info *panel_info; int ret; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); - panel_info = &mfd->panel_info; if (!mfd) return -ENODEV; @@ -105,28 +102,9 @@ int mdp4_dsi_video_on(struct platform_device *pdev) fbi = mfd->fbi; var = &fbi->var; - if (panel_info->mode2_yres != 0) { - yres = panel_info->mode2_yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } else { - yres = panel_info->yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } - - if (!remainder) - remainder = PAGE_SIZE; - bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - if (fbi->var.yoffset < yres) { - buf += fbi->var.xoffset * bpp; - } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { - buf += fbi->var.xoffset * bpp + yres * - fbi->fix.line_length + PAGE_SIZE - remainder; - } else { - buf += fbi->var.xoffset * bpp + 2 * yres * - fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); - } + buf += calc_fb_offset(mfd, fbi, bpp); if (dsi_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); @@ -485,36 +463,15 @@ void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; - int yres, remainder; struct mdp4_overlay_pipe *pipe; - struct msm_panel_info *panel_info = &mfd->panel_info; if (!mfd->panel_power_on) return; - if (panel_info->mode2_yres != 0) { - yres = panel_info->mode2_yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } else { - yres = panel_info->yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } - - if (!remainder) - remainder = PAGE_SIZE; - /* no need to power on cmd block since it's dsi mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - if (fbi->var.yoffset < yres) { - buf += fbi->var.xoffset * bpp; - } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { - buf += fbi->var.xoffset * bpp + yres * - fbi->fix.line_length + PAGE_SIZE - remainder; - } else { - buf += fbi->var.xoffset * bpp + 2 * yres * - fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); - } + buf += calc_fb_offset(mfd, fbi, bpp); mutex_lock(&mfd->dma->ov_mutex); diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c index 61f099d0..9adfc6a3 100644 --- a/drivers/video/msm/mdp4_overlay_lcdc.c +++ b/drivers/video/msm/mdp4_overlay_lcdc.c @@ -92,11 +92,8 @@ int mdp_lcdc_on(struct platform_device *pdev) struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *pipe; int ret; - int yres, remainder; - struct msm_panel_info *panel_info; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); - panel_info = &mfd->panel_info; if (!mfd) return -ENODEV; @@ -107,17 +104,6 @@ int mdp_lcdc_on(struct platform_device *pdev) fbi = mfd->fbi; var = &fbi->var; - if (panel_info->mode2_yres != 0) { - yres = panel_info->mode2_yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } else { - yres = panel_info->yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } - - if (!remainder) - remainder = PAGE_SIZE; - /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); if (is_mdp4_hw_reset()) { @@ -127,15 +113,7 @@ int mdp_lcdc_on(struct platform_device *pdev) bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - if (fbi->var.yoffset < yres) { - buf += fbi->var.xoffset * bpp; - } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { - buf += fbi->var.xoffset * bpp + yres * - fbi->fix.line_length + PAGE_SIZE - remainder; - } else { - buf += fbi->var.xoffset * bpp + 2 * yres * - fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); - } + buf += calc_fb_offset(mfd, fbi, bpp); if (lcdc_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); @@ -401,36 +379,15 @@ void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; - int yres, remainder; struct mdp4_overlay_pipe *pipe; - struct msm_panel_info *panel_info = &mfd->panel_info; if (!mfd->panel_power_on) return; - if (panel_info->mode2_yres != 0) { - yres = panel_info->mode2_yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } else { - yres = panel_info->yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } - - if (!remainder) - remainder = PAGE_SIZE; - /* no need to power on cmd block since it's lcdc mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - if (fbi->var.yoffset < yres) { - buf += fbi->var.xoffset * bpp; - } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { - buf += fbi->var.xoffset * bpp + yres * - fbi->fix.line_length + PAGE_SIZE - remainder; - } else { - buf += fbi->var.xoffset * bpp + 2 * yres * - fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); - } + buf += calc_fb_offset(mfd, fbi, bpp); mutex_lock(&mfd->dma->ov_mutex); diff --git a/drivers/video/msm/mdp_dma.c b/drivers/video/msm/mdp_dma.c index 07f33aa7..0cd73ded 100644 --- a/drivers/video/msm/mdp_dma.c +++ b/drivers/video/msm/mdp_dma.c @@ -465,36 +465,15 @@ void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty, { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; struct fb_info *fbi = mfd->fbi; - struct msm_panel_info *panel_info = &mfd->panel_info; MDPIBUF *iBuf; int bpp = info->var.bits_per_pixel / 8; - int yres, remainder; - - if (panel_info->mode2_yres != 0) { - yres = panel_info->mode2_yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } else { - yres = panel_info->yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } - - if (!remainder) - remainder = PAGE_SIZE; down(&mfd->sem); iBuf = &mfd->ibuf; iBuf->buf = (uint8 *) info->fix.smem_start; - if (fbi->var.yoffset < yres) { - iBuf->buf += fbi->var.xoffset * bpp; - } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { - iBuf->buf += fbi->var.xoffset * bpp + yres * - fbi->fix.line_length + PAGE_SIZE - remainder; - } else { - iBuf->buf += fbi->var.xoffset * bpp + 2 * yres * - fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); - } + iBuf->buf += calc_fb_offset(mfd, fbi, bpp); iBuf->ibuf_width = info->var.xres_virtual; iBuf->bpp = bpp; diff --git a/drivers/video/msm/mdp_dma_tv.c b/drivers/video/msm/mdp_dma_tv.c index efb06e3c..540ae4b4 100644 --- a/drivers/video/msm/mdp_dma_tv.c +++ b/drivers/video/msm/mdp_dma_tv.c @@ -41,15 +41,12 @@ int mdp_dma3_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; - struct msm_panel_info *panel_info; struct fb_info *fbi; uint8 *buf; int bpp; int ret = 0; - int yres, remainder; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); - panel_info = &mfd->panel_info; if (!mfd) return -ENODEV; @@ -59,32 +56,13 @@ int mdp_dma3_on(struct platform_device *pdev) fbi = mfd->fbi; - if (panel_info->mode2_yres != 0) { - yres = panel_info->mode2_yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } else { - yres = panel_info->yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } - - if (!remainder) - remainder = PAGE_SIZE; - /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - if (fbi->var.yoffset < yres) { - buf += fbi->var.xoffset * bpp; - } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { - buf += fbi->var.xoffset * bpp + yres * - fbi->fix.line_length + PAGE_SIZE - remainder; - } else { - buf += fbi->var.xoffset * bpp + 2 * yres * - fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); - } + buf += calc_fb_offset(mfd, fbi, bpp); /* starting address[31..8] of Video frame buffer is CS0 */ MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3); @@ -137,36 +115,15 @@ void mdp_dma3_update(struct msm_fb_data_type *mfd) uint8 *buf; int bpp; unsigned long flag; - int yres, remainder; - struct msm_panel_info *panel_info = &mfd->panel_info; if (!mfd->panel_power_on) return; - if (panel_info->mode2_yres != 0) { - yres = panel_info->mode2_yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } else { - yres = panel_info->yres; - remainder = (fbi->fix.line_length*yres)%PAGE_SIZE; - } - - if (!remainder) - remainder = PAGE_SIZE; - /* no need to power on cmd block since dma3 is running */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; - if (fbi->var.yoffset < yres) { - buf += fbi->var.xoffset * bpp; - } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { - buf += fbi->var.xoffset * bpp + yres * - fbi->fix.line_length + PAGE_SIZE - remainder; - } else { - buf += fbi->var.xoffset * bpp + 2 * yres * - fbi->fix.line_length + 2 * (PAGE_SIZE - remainder); - } + buf += calc_fb_offset(mfd, fbi, bpp); MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3); diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index 69ad26d9..da226d07 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -1039,6 +1039,36 @@ static int msm_fb_blank_sub(int blank_mode, struct fb_info *info, return ret; } +int calc_fb_offset(struct msm_fb_data_type *mfd, struct fb_info *fbi, int bpp) +{ + struct msm_panel_info *panel_info = &mfd->panel_info; + int remainder, yres, offset; + + if (panel_info->mode2_yres != 0) { + yres = panel_info->mode2_yres; + remainder = (fbi->fix.line_length*yres) & (PAGE_SIZE - 1); + } else { + yres = panel_info->yres; + remainder = (fbi->fix.line_length*yres) & (PAGE_SIZE - 1); + } + + if (!remainder) + remainder = PAGE_SIZE; + + if (fbi->var.yoffset < yres) { + offset = (fbi->var.xoffset * bpp); + /* iBuf->buf += fbi->var.xoffset * bpp + 0 * + yres * fbi->fix.line_length; */ + } else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) { + offset = (fbi->var.xoffset * bpp + yres * + fbi->fix.line_length + PAGE_SIZE - remainder); + } else { + offset = (fbi->var.xoffset * bpp + 2 * yres * + fbi->fix.line_length + 2 * (PAGE_SIZE - remainder)); + } + return offset; +} + static void msm_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { @@ -1383,11 +1413,12 @@ static int msm_fb_register(struct msm_fb_data_type *mfd) /* Make sure all buffers can be addressed on a page boundary by an x * and y offset */ - remainder = (fix->line_length * panel_info->yres) % PAGE_SIZE; + remainder = (fix->line_length * panel_info->yres) & (PAGE_SIZE - 1); + /* PAGE_SIZE is a power of 2 */ if (!remainder) remainder = PAGE_SIZE; remainder_mode2 = (fix->line_length * - panel_info->mode2_yres) % PAGE_SIZE; + panel_info->mode2_yres) & (PAGE_SIZE - 1); if (!remainder_mode2) remainder_mode2 = PAGE_SIZE; diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h index 39c2647e..6d700e02 100644 --- a/drivers/video/msm/msm_fb.h +++ b/drivers/video/msm/msm_fb.h @@ -203,6 +203,7 @@ void msm_fb_set_backlight(struct msm_fb_data_type *mfd, __u32 bkl_lvl); struct platform_device *msm_fb_add_device(struct platform_device *pdev); int msm_fb_detect_client(const char *name); +int calc_fb_offset(struct msm_fb_data_type *mfd, struct fb_info *fbi, int bpp); #ifdef CONFIG_FB_BACKLIGHT void msm_fb_config_backlight(struct msm_fb_data_type *mfd); From 7cf1c41fb56ddf81b9bd91f86714a458cf94a818 Mon Sep 17 00:00:00 2001 From: Adrian Salido-Moreno Date: Mon, 21 May 2012 17:32:07 -0700 Subject: [PATCH 104/117] msm_fb: display: Fix invalid x offset programming for YUV444 fmt Source x offset adjustment is not properly programmed for YUV444 format, it is always programmed as 0. Add support for correct x offset adjustment. Change-Id: I73e87f961bda60670cc764c66be955e48ccf2734 Signed-off-by: Adrian Salido-Moreno Signed-off-by: Ravishangar Kalyanam --- drivers/video/msm/mdp4_overlay.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c index 73f25b8f..9e27bda8 100644 --- a/drivers/video/msm/mdp4_overlay.c +++ b/drivers/video/msm/mdp4_overlay.c @@ -500,12 +500,14 @@ static void mdp4_overlay_vg_get_src_offset(struct mdp4_overlay_pipe *pipe, case MDP_BGR_565: case MDP_XRGB_8888: case MDP_RGB_888: + case MDP_YCBCR_H1V1: + case MDP_YCRCB_H1V1: *luma_off = pipe->src_x * pipe->bpp; break; default: - pr_err("Source format %u not supported for x offset adjustment\n", - pipe->src_format); + pr_err("%s: fmt %u not supported for adjustment\n", + __func__, pipe->src_format); break; } } From 014245fd4bd6ef0834c058ae5b70704c7565b0ee Mon Sep 17 00:00:00 2001 From: Carl Vanderlip Date: Wed, 28 Mar 2012 10:17:20 -0700 Subject: [PATCH 105/117] video: msm: Load correct default values to QSEED at init The previous QSEED Table 1 values are not suitable values (would end up smoothing and sharpening content which can lead to artifacts). The values loaded by this patch are the suggested default values for QSEED since they do not smooth or sharpen most content (bi-cubic filter), but will slightly smooth low frequency components of the image (i.e. helps reduce artifacts that produce sharp edges on content that is supposed to be a constant color; often introduced by some video encoding algorithms). CRS-Fixed: 338131 Signed-off-by: Carl Vanderlip (cherry picked from commit d89c0e7712dd5cf13769b4d0facfecb93028538c) Change-Id: I3b65661c69402c23f4322366aa4f12e66d661b86 Signed-off-by: Gopal G Goberu --- drivers/video/msm/mdp4_util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c index 148695db..2e9bf7b1 100644 --- a/drivers/video/msm/mdp4_util.c +++ b/drivers/video/msm/mdp4_util.c @@ -586,7 +586,7 @@ static uint32 vg_qseed_table0[] = { }; static uint32 vg_qseed_table1[] = { - 0x76543210, 0xfedcba98 + 0x00000000, 0x20000000, }; static uint32 vg_qseed_table2[] = { From 1e603dadc60420b9b449bee59c4868eab2fd13d3 Mon Sep 17 00:00:00 2001 From: Jeevan Shriram Date: Mon, 2 Apr 2012 10:20:48 +0530 Subject: [PATCH 106/117] msm_fb: display: Assign proper destination rectangles to mdp blit Add check for (90 + Flip Horizontal) and Flip Vertical cases to assign proper destination rectangle parameters to avoid corrupted frame buffer content. CRs-Fixed: 344807 Change-Id: I7e7b0b8d26f08f63a6f9c55eedd37f31cf0348d7 Signed-off-by: Jeevan Shriram (cherry picked from commit 07c82d6ca63c6be28ab897fdb3ccfc1a448c342f) --- drivers/video/msm/msm_fb.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index da226d07..11cdb8a2 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -2487,6 +2487,8 @@ int mdp_blit(struct fb_info *info, struct mdp_blit_req *req) /* blit first region */ if (((splitreq.flags & 0x07) == 0x07) || + ((splitreq.flags & 0x07) == 0x05) || + ((splitreq.flags & 0x07) == 0x02) || ((splitreq.flags & 0x07) == 0x0)) { if (splitreq.flags & MDP_ROT_90) { @@ -2567,6 +2569,8 @@ int mdp_blit(struct fb_info *info, struct mdp_blit_req *req) /* blit second region */ if (((splitreq.flags & 0x07) == 0x07) || + ((splitreq.flags & 0x07) == 0x05) || + ((splitreq.flags & 0x07) == 0x02) || ((splitreq.flags & 0x07) == 0x0)) { splitreq.src_rect.h = s_h_1; splitreq.src_rect.y = s_y_1; From 2440619e47aede7a271e69f099853c159e62e17b Mon Sep 17 00:00:00 2001 From: Padmanabhan Komanduru Date: Fri, 9 Mar 2012 20:16:21 +0530 Subject: [PATCH 107/117] msm_fb: Fix error in y-offset check logic in FB driver After 4 KB alignment changes, the frame from 3rd buffer doesn't get updated since during PUT_VSCREEN_INFO ioctl, the driver returns -1 to userspace due to error in logic because the hole size is not considered in calculating the logic for validity of the y-offset. Change fixes this error. CRs-fixed: 325134 Signed-off-by: Padmanabhan Komanduru (cherry picked from commit d37b52404236bf4cadf530a238cafc5d6499d32a) Change-Id: Ifccfa25a37fe0f8d86bf5b822d9affe7fa41203a Signed-off-by: Srivalli Oguri --- drivers/video/msm/msm_fb.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index 11cdb8a2..26f2e7ee 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -1934,6 +1934,7 @@ static int msm_fb_pan_display(struct fb_var_screeninfo *var, static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; + int hole_offset; if (var->rotate != FB_ROTATE_UR) return -EINVAL; @@ -2028,7 +2029,20 @@ static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) if (var->xoffset > (var->xres_virtual - var->xres)) return -EINVAL; - if (var->yoffset > (var->yres_virtual - var->yres)) + if (!mfd->panel_info.mode2_yres) + hole_offset = (mfd->fbi->fix.line_length * + mfd->panel_info.yres) % PAGE_SIZE; + else + hole_offset = (mfd->fbi->fix.line_length * + mfd->panel_info.mode2_yres) % PAGE_SIZE; + + if (!hole_offset) { + hole_offset = PAGE_SIZE - hole_offset; + hole_offset = hole_offset/mfd->fbi->fix.line_length; + } + + if (var->yoffset > (var->yres_virtual - var->yres + (hole_offset * + (mfd->fb_page - 1)))) return -EINVAL; return 0; From 43f43542f0eca0d68dc829fc70b426cac970ded1 Mon Sep 17 00:00:00 2001 From: Padmanabhan Komanduru Date: Fri, 16 Mar 2012 11:32:23 +0530 Subject: [PATCH 108/117] msm_fb: Clean up the fix for error in y-offset check in FB driver There is a failure observed in yres_virtual offset check and every third frame gets skipped without an error message. Due to this high fps is noticed in the test applications. Correct the yres_virtual offset check. CRs-fixed: 325134 Signed-off-by: Padmanabhan Komanduru (cherry picked from commit 2bd101d5bed1729b317114456360605e86289c15) Change-Id: I35af570df2d6d2609562203f30f3caa1506b9b1d Signed-off-by: Srivalli Oguri --- drivers/video/msm/msm_fb.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index 26f2e7ee..11cdb8a2 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -1934,7 +1934,6 @@ static int msm_fb_pan_display(struct fb_var_screeninfo *var, static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; - int hole_offset; if (var->rotate != FB_ROTATE_UR) return -EINVAL; @@ -2029,20 +2028,7 @@ static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) if (var->xoffset > (var->xres_virtual - var->xres)) return -EINVAL; - if (!mfd->panel_info.mode2_yres) - hole_offset = (mfd->fbi->fix.line_length * - mfd->panel_info.yres) % PAGE_SIZE; - else - hole_offset = (mfd->fbi->fix.line_length * - mfd->panel_info.mode2_yres) % PAGE_SIZE; - - if (!hole_offset) { - hole_offset = PAGE_SIZE - hole_offset; - hole_offset = hole_offset/mfd->fbi->fix.line_length; - } - - if (var->yoffset > (var->yres_virtual - var->yres + (hole_offset * - (mfd->fb_page - 1)))) + if (var->yoffset > (var->yres_virtual - var->yres)) return -EINVAL; return 0; From 47ef0e2bbe8216d9fa421066f6e176f2ed852f8d Mon Sep 17 00:00:00 2001 From: David Hays Date: Sat, 25 May 2013 00:37:10 -0400 Subject: [PATCH 109/117] vigor: audio changes/enable back mic --- arch/arm/mach-msm/board-vigor-audio.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-msm/board-vigor-audio.c b/arch/arm/mach-msm/board-vigor-audio.c index 15fc1c36..b96a4d90 100644 --- a/arch/arm/mach-msm/board-vigor-audio.c +++ b/arch/arm/mach-msm/board-vigor-audio.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -326,7 +327,7 @@ int vigor_is_msm_i2s_slave(void) int vigor_support_aic3254(void) { - return 0; + return 1; } int vigor_support_adie(void) @@ -336,7 +337,7 @@ int vigor_support_adie(void) int vigor_support_back_mic(void) { - return 0; + return 1; } int vigor_is_msm_i2s_master(void) From 91c1dd21936cb28ff3151dfeed213ca14729a728 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 3 Nov 2011 16:40:32 -0700 Subject: [PATCH 110/117] usb: Add network bridge host driver for dun and rmnet This host driver will be used to communicate with modem devices with dial up network and RMNET interfaces. This driver works as a bridge to pass control and data packets between the modem and peripheral usb gadget driver. Driver currently supports modem devices (vendor ID 0x05c6) with PIDs 0x9001 Change-Id: Id85b552b39d061528a1c3c90a354d73580c9b631 Signed-off-by: Hemant Kumar Signed-off-by: Jack Pham --- arch/arm/mach-msm/include/mach/usb_bridge.h | 122 +++ drivers/usb/misc/mdm_ctrl_bridge.c | 729 ++++++++++++++++ drivers/usb/misc/mdm_data_bridge.c | 923 ++++++++++++++++++++ 3 files changed, 1774 insertions(+) create mode 100644 arch/arm/mach-msm/include/mach/usb_bridge.h create mode 100644 drivers/usb/misc/mdm_ctrl_bridge.c create mode 100644 drivers/usb/misc/mdm_data_bridge.c diff --git a/arch/arm/mach-msm/include/mach/usb_bridge.h b/arch/arm/mach-msm/include/mach/usb_bridge.h new file mode 100644 index 00000000..2b7e754b --- /dev/null +++ b/arch/arm/mach-msm/include/mach/usb_bridge.h @@ -0,0 +1,122 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#ifndef __LINUX_USB_BRIDGE_H__ +#define __LINUX_USB_BRIDGE_H__ + +#include +#include + +/* bridge device 0: DUN + * bridge device 1 : Tethered RMNET + */ +#define MAX_BRIDGE_DEVICES 2 + +/*PID 9001*/ +#define DUN_IFACE_NUM 2 +#define TETHERED_RMNET_IFACE_NUM 3 + +struct bridge_ops { + int (*send_pkt)(void *, void *, size_t actual); + void (*send_cbits)(void *, unsigned int); + + /* flow control */ + void (*unthrottle_tx)(void *); +}; + +#define TX_THROTTLED BIT(0) +#define RX_THROTTLED BIT(1) + +struct bridge { + /* context of the gadget port using bridge driver */ + void *ctx; + + /* bridge device array index mapped to the gadget port array index. + * data bridge[ch_id] <-- bridge --> gadget port[ch_id] + */ + unsigned int ch_id; + + /* flow control bits */ + unsigned long flags; + + /* data/ctrl bridge callbacks */ + struct bridge_ops ops; +}; + +#if defined(CONFIG_USB_QCOM_MDM_BRIDGE) || \ + defined(CONFIG_USB_QCOM_MDM_BRIDGE_MODULE) + +/* Bridge APIs called by gadget driver */ +int ctrl_bridge_open(struct bridge *); +void ctrl_bridge_close(unsigned int); +int ctrl_bridge_write(unsigned int, char *, size_t); +int ctrl_bridge_set_cbits(unsigned int, unsigned int); +unsigned int ctrl_bridge_get_cbits_tohost(unsigned int); +int data_bridge_open(struct bridge *brdg); +void data_bridge_close(unsigned int); +int data_bridge_write(unsigned int , struct sk_buff *); +int data_bridge_unthrottle_rx(unsigned int); + +/* defined in control bridge */ +int ctrl_bridge_probe(struct usb_interface *, struct usb_host_endpoint *, int); +void ctrl_bridge_disconnect(unsigned int); +int ctrl_bridge_resume(unsigned int); +int ctrl_bridge_suspend(unsigned int); + +#else + +static inline int __maybe_unused ctrl_bridge_open(struct bridge *brdg) +{ + return -ENODEV; +} + +static inline void __maybe_unused ctrl_bridge_close(unsigned int id) { } + +static inline int __maybe_unused ctrl_bridge_write(unsigned int id, + char *data, size_t size) +{ + return -ENODEV; +} + +static inline int __maybe_unused ctrl_bridge_set_cbits(unsigned int id, + unsigned int cbits) +{ + return -ENODEV; +} + +static inline unsigned int __maybe_unused +ctrl_bridge_get_cbits_tohost(unsigned int id) +{ + return -ENODEV; +} + +static inline int __maybe_unused data_bridge_open(struct bridge *brdg) +{ + return -ENODEV; +} + +static inline void __maybe_unused data_bridge_close(unsigned int id) { } + +static inline int __maybe_unused data_bridge_write(unsigned int id, + struct sk_buff *skb) +{ + return -ENODEV; +} + +static inline int __maybe_unused data_bridge_unthrottle_rx(unsigned int id) +{ + return -ENODEV; +} + +#endif +#endif diff --git a/drivers/usb/misc/mdm_ctrl_bridge.c b/drivers/usb/misc/mdm_ctrl_bridge.c new file mode 100644 index 00000000..87adf2e9 --- /dev/null +++ b/drivers/usb/misc/mdm_ctrl_bridge.c @@ -0,0 +1,729 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char *ctrl_bridge_names[] = { + "dun_ctrl_hsic0", + "rmnet_ctrl_hsic0" +}; + +/* polling interval for Interrupt ep */ +#define HS_INTERVAL 7 +#define FS_LS_INTERVAL 3 + +#define ACM_CTRL_DTR (1 << 0) +#define DEFAULT_READ_URB_LENGTH 4096 + +struct ctrl_bridge { + + struct usb_device *udev; + struct usb_interface *intf; + + unsigned int int_pipe; + struct urb *inturb; + void *intbuf; + + struct urb *readurb; + void *readbuf; + + struct usb_anchor tx_submitted; + struct usb_ctrlrequest *in_ctlreq; + + struct bridge *brdg; + struct platform_device *pdev; + + /* input control lines (DSR, CTS, CD, RI) */ + unsigned int cbits_tohost; + + /* output control lines (DTR, RTS) */ + unsigned int cbits_tomdm; + + /* counters */ + unsigned int snd_encap_cmd; + unsigned int get_encap_res; + unsigned int resp_avail; + unsigned int set_ctrl_line_sts; + unsigned int notify_ser_state; + +}; + +static struct ctrl_bridge *__dev[MAX_BRIDGE_DEVICES]; + +/* counter used for indexing ctrl bridge devices */ +static int ch_id; + +unsigned int ctrl_bridge_get_cbits_tohost(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + return dev->cbits_tohost; +} +EXPORT_SYMBOL(ctrl_bridge_get_cbits_tohost); + +int ctrl_bridge_set_cbits(unsigned int id, unsigned int cbits) +{ + struct ctrl_bridge *dev; + struct bridge *brdg; + int retval; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + pr_debug("%s: dev[id] =%u cbits : %u\n", __func__, id, cbits); + + brdg = dev->brdg; + if (!brdg) + return -ENODEV; + + dev->cbits_tomdm = cbits; + + retval = ctrl_bridge_write(id, NULL, 0); + + /* if DTR is high, update latest modem info to host */ + if (brdg && (cbits & ACM_CTRL_DTR) && brdg->ops.send_cbits) + brdg->ops.send_cbits(brdg->ctx, dev->cbits_tohost); + + return retval; +} +EXPORT_SYMBOL(ctrl_bridge_set_cbits); + +static void resp_avail_cb(struct urb *urb) +{ + struct ctrl_bridge *dev = urb->context; + struct usb_device *udev; + int status = 0; + int resubmit_urb = 1; + struct bridge *brdg = dev->brdg; + + udev = interface_to_usbdev(dev->intf); + switch (urb->status) { + case 0: + /*success*/ + dev->get_encap_res++; + if (brdg && brdg->ops.send_pkt) + brdg->ops.send_pkt(brdg->ctx, urb->transfer_buffer, + urb->actual_length); + break; + + /*do not resubmit*/ + case -ESHUTDOWN: + case -ENOENT: + case -ECONNRESET: + /* unplug */ + case -EPROTO: + /*babble error*/ + resubmit_urb = 0; + /*resubmit*/ + case -EOVERFLOW: + default: + dev_dbg(&udev->dev, "%s: non zero urb status = %d\n", + __func__, urb->status); + } + + if (resubmit_urb) { + /*re- submit int urb to check response available*/ + status = usb_submit_urb(dev->inturb, GFP_ATOMIC); + if (status) + dev_err(&udev->dev, + "%s: Error re-submitting Int URB %d\n", + __func__, status); + } +} + +static void notification_available_cb(struct urb *urb) +{ + int status; + struct usb_cdc_notification *ctrl; + struct usb_device *udev; + struct ctrl_bridge *dev = urb->context; + struct bridge *brdg = dev->brdg; + unsigned int ctrl_bits; + unsigned char *data; + + udev = interface_to_usbdev(dev->intf); + + switch (urb->status) { + case 0: + /*success*/ + break; + case -ESHUTDOWN: + case -ENOENT: + case -ECONNRESET: + case -EPROTO: + /* unplug */ + return; + case -EPIPE: + dev_err(&udev->dev, "%s: stall on int endpoint\n", __func__); + /* TBD : halt to be cleared in work */ + case -EOVERFLOW: + default: + pr_debug_ratelimited("%s: non zero urb status = %d\n", + __func__, urb->status); + goto resubmit_int_urb; + } + + ctrl = (struct usb_cdc_notification *)urb->transfer_buffer; + data = (unsigned char *)(ctrl + 1); + + switch (ctrl->bNotificationType) { + case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: + dev->resp_avail++; + usb_fill_control_urb(dev->readurb, udev, + usb_rcvctrlpipe(udev, 0), + (unsigned char *)dev->in_ctlreq, + dev->readbuf, + DEFAULT_READ_URB_LENGTH, + resp_avail_cb, dev); + + status = usb_submit_urb(dev->readurb, GFP_ATOMIC); + if (status) { + dev_err(&udev->dev, + "%s: Error submitting Read URB %d\n", + __func__, status); + goto resubmit_int_urb; + } + return; + case USB_CDC_NOTIFY_NETWORK_CONNECTION: + dev_dbg(&udev->dev, "%s network\n", ctrl->wValue ? + "connected to" : "disconnected from"); + break; + case USB_CDC_NOTIFY_SERIAL_STATE: + dev->notify_ser_state++; + ctrl_bits = get_unaligned_le16(data); + dev_dbg(&udev->dev, "serial state: %d\n", ctrl_bits); + dev->cbits_tohost = ctrl_bits; + if (brdg && brdg->ops.send_cbits) + brdg->ops.send_cbits(brdg->ctx, ctrl_bits); + break; + default: + dev_err(&udev->dev, "%s: unknown notification %d received:" + "index %d len %d data0 %d data1 %d", + __func__, ctrl->bNotificationType, ctrl->wIndex, + ctrl->wLength, data[0], data[1]); + } + +resubmit_int_urb: + status = usb_submit_urb(urb, GFP_ATOMIC); + if (status) + dev_err(&udev->dev, "%s: Error re-submitting Int URB %d\n", + __func__, status); +} + +int ctrl_bridge_start_read(struct ctrl_bridge *dev) +{ + int retval = 0; + struct usb_device *udev; + + udev = interface_to_usbdev(dev->intf); + + retval = usb_autopm_get_interface_async(dev->intf); + if (retval < 0) { + dev_err(&udev->dev, "%s resumption fail\n", __func__); + goto done_nopm; + } + + retval = usb_submit_urb(dev->inturb, GFP_KERNEL); + if (retval < 0) + dev_err(&udev->dev, "%s intr submit %d\n", __func__, retval); + + usb_autopm_put_interface_async(dev->intf); +done_nopm: + return retval; +} + +static int ctrl_bridge_stop_read(struct ctrl_bridge *dev) +{ + if (dev->readurb) { + dev_dbg(&dev->udev->dev, "killing rcv urb\n"); + usb_unlink_urb(dev->readurb); + } + + if (dev->inturb) { + dev_dbg(&dev->udev->dev, "killing int urb\n"); + usb_unlink_urb(dev->inturb); + } + + return 0; +} + +int ctrl_bridge_open(struct bridge *brdg) +{ + struct ctrl_bridge *dev; + + if (!brdg) { + err("bridge is null\n"); + return -EINVAL; + } + + if (brdg->ch_id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[brdg->ch_id]; + if (!dev) { + err("dev is null\n"); + return -ENODEV; + } + + dev->brdg = brdg; + dev->snd_encap_cmd = 0; + dev->get_encap_res = 0; + dev->resp_avail = 0; + dev->set_ctrl_line_sts = 0; + dev->notify_ser_state = 0; + + return ctrl_bridge_start_read(dev); +} +EXPORT_SYMBOL(ctrl_bridge_open); + +void ctrl_bridge_close(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return; + + dev = __dev[id]; + if (!dev && !dev->brdg) + return; + + dev_dbg(&dev->udev->dev, "%s:\n", __func__); + + ctrl_bridge_set_cbits(dev->brdg->ch_id, 0); + usb_unlink_anchored_urbs(&dev->tx_submitted); + ctrl_bridge_stop_read(dev); + + dev->brdg = NULL; +} +EXPORT_SYMBOL(ctrl_bridge_close); + +static void ctrl_write_callback(struct urb *urb) +{ + + if (urb->status) { + pr_debug("Write status/size %d/%d\n", + urb->status, urb->actual_length); + } + + kfree(urb->transfer_buffer); + kfree(urb->setup_packet); + usb_free_urb(urb); +} + +int ctrl_bridge_write(unsigned int id, char *data, size_t size) +{ + int result; + struct urb *writeurb; + struct usb_ctrlrequest *out_ctlreq; + struct usb_device *udev; + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) { + result = -EINVAL; + goto free_data; + } + + dev = __dev[id]; + + if (!dev) { + result = -ENODEV; + goto free_data; + } + + udev = interface_to_usbdev(dev->intf); + + dev_dbg(&udev->dev, "%s:[id]:%u: write (%d bytes)\n", + __func__, id, size); + + writeurb = usb_alloc_urb(0, GFP_ATOMIC); + if (!writeurb) { + dev_err(&udev->dev, "%s: error allocating read urb\n", + __func__); + result = -ENOMEM; + goto free_data; + } + + out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_ATOMIC); + if (!out_ctlreq) { + dev_err(&udev->dev, + "%s: error allocating setup packet buffer\n", + __func__); + result = -ENOMEM; + goto free_urb; + } + + /* CDC Send Encapsulated Request packet */ + out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | + USB_RECIP_INTERFACE); + if (!data && !size) { + out_ctlreq->bRequest = USB_CDC_REQ_SET_CONTROL_LINE_STATE; + out_ctlreq->wValue = dev->cbits_tomdm; + dev->set_ctrl_line_sts++; + } else { + out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; + out_ctlreq->wValue = 0; + dev->snd_encap_cmd++; + } + out_ctlreq->wIndex = + dev->intf->cur_altsetting->desc.bInterfaceNumber; + out_ctlreq->wLength = cpu_to_le16(size); + + usb_fill_control_urb(writeurb, udev, + usb_sndctrlpipe(udev, 0), + (unsigned char *)out_ctlreq, + (void *)data, size, + ctrl_write_callback, NULL); + + result = usb_autopm_get_interface_async(dev->intf); + if (result < 0) { + dev_err(&udev->dev, "%s: unable to resume interface: %d\n", + __func__, result); + + /* + * Revisit: if (result == -EPERM) + * bridge_suspend(dev->intf, PMSG_SUSPEND); + */ + + goto free_ctrlreq; + } + + usb_anchor_urb(writeurb, &dev->tx_submitted); + result = usb_submit_urb(writeurb, GFP_ATOMIC); + if (result < 0) { + dev_err(&udev->dev, "%s: submit URB error %d\n", + __func__, result); + usb_autopm_put_interface_async(dev->intf); + goto unanchor_urb; + } + + return size; + +unanchor_urb: + usb_unanchor_urb(writeurb); +free_ctrlreq: + kfree(out_ctlreq); +free_urb: + usb_free_urb(writeurb); +free_data: + kfree(data); + + return result; +} +EXPORT_SYMBOL(ctrl_bridge_write); + +int ctrl_bridge_suspend(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + usb_kill_anchored_urbs(&dev->tx_submitted); + + return ctrl_bridge_stop_read(dev); +} + +int ctrl_bridge_resume(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + return ctrl_bridge_start_read(dev); +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t ctrl_bridge_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ctrl_bridge *dev; + char *buf; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName#%s dev %p\n" + "snd encap cmd cnt: %u\n" + "get encap res cnt: %u\n" + "res available cnt: %u\n" + "set ctrlline sts cnt: %u\n" + "notify ser state cnt: %u\n" + "cbits_tomdm: %d\n" + "cbits_tohost: %d\n", + dev->pdev->name, dev, + dev->snd_encap_cmd, + dev->get_encap_res, + dev->resp_avail, + dev->set_ctrl_line_sts, + dev->notify_ser_state, + dev->cbits_tomdm, + dev->cbits_tohost); + + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t ctrl_bridge_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ctrl_bridge *dev; + int i; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + dev->snd_encap_cmd = 0; + dev->get_encap_res = 0; + dev->resp_avail = 0; + dev->set_ctrl_line_sts = 0; + dev->notify_ser_state = 0; + } + return count; +} + +const struct file_operations ctrl_stats_ops = { + .read = ctrl_bridge_read_stats, + .write = ctrl_bridge_reset_stats, +}; + +struct dentry *ctrl_dent; +struct dentry *ctrl_dfile; +static void ctrl_bridge_debugfs_init(void) +{ + ctrl_dent = debugfs_create_dir("ctrl_hsic_bridge", 0); + if (IS_ERR(ctrl_dent)) + return; + + ctrl_dfile = + debugfs_create_file("status", 0644, ctrl_dent, 0, + &ctrl_stats_ops); + if (!ctrl_dfile || IS_ERR(ctrl_dfile)) + debugfs_remove(ctrl_dent); +} + +static void ctrl_bridge_debugfs_exit(void) +{ + debugfs_remove(ctrl_dfile); + debugfs_remove(ctrl_dent); +} + +#else +static void ctrl_bridge_debugfs_init(void) { } +static void ctrl_bridge_debugfs_exit(void) { } +#endif + +int +ctrl_bridge_probe(struct usb_interface *ifc, struct usb_host_endpoint *int_in, + int id) +{ + struct ctrl_bridge *dev; + struct usb_device *udev; + struct usb_endpoint_descriptor *ep; + u16 wMaxPacketSize; + int retval = 0; + int interval; + + udev = interface_to_usbdev(ifc); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + dev_err(&udev->dev, "%s: unable to allocate dev\n", + __func__); + return -ENOMEM; + } + dev->pdev = platform_device_alloc(ctrl_bridge_names[id], id); + if (!dev->pdev) { + dev_err(&dev->udev->dev, + "%s: unable to allocate platform device\n", __func__); + retval = -ENOMEM; + goto nomem; + } + + dev->udev = udev; + dev->int_pipe = usb_rcvintpipe(udev, + int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + dev->intf = ifc; + + init_usb_anchor(&dev->tx_submitted); + + /*use max pkt size from ep desc*/ + ep = &dev->intf->cur_altsetting->endpoint[0].desc; + + dev->inturb = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->inturb) { + dev_err(&udev->dev, "%s: error allocating int urb\n", __func__); + retval = -ENOMEM; + goto pdev_del; + } + + wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize); + + dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL); + if (!dev->intbuf) { + dev_err(&udev->dev, "%s: error allocating int buffer\n", + __func__); + retval = -ENOMEM; + goto free_inturb; + } + + interval = + (udev->speed == USB_SPEED_HIGH) ? HS_INTERVAL : FS_LS_INTERVAL; + + usb_fill_int_urb(dev->inturb, udev, dev->int_pipe, + dev->intbuf, wMaxPacketSize, + notification_available_cb, dev, interval); + + dev->readurb = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->readurb) { + dev_err(&udev->dev, "%s: error allocating read urb\n", + __func__); + retval = -ENOMEM; + goto free_intbuf; + } + + dev->readbuf = kmalloc(DEFAULT_READ_URB_LENGTH, GFP_KERNEL); + if (!dev->readbuf) { + dev_err(&udev->dev, "%s: error allocating read buffer\n", + __func__); + retval = -ENOMEM; + goto free_rurb; + } + + dev->in_ctlreq = kmalloc(sizeof(*dev->in_ctlreq), GFP_KERNEL); + if (!dev->in_ctlreq) { + dev_err(&udev->dev, + "%s:error allocating setup packet buffer\n", + __func__); + retval = -ENOMEM; + goto free_rbuf; + } + + dev->in_ctlreq->bRequestType = + (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); + dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; + dev->in_ctlreq->wValue = 0; + dev->in_ctlreq->wIndex = + dev->intf->cur_altsetting->desc.bInterfaceNumber; + dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH); + + __dev[id] = dev; + + platform_device_add(dev->pdev); + + ch_id++; + + return retval; + +free_rbuf: + kfree(dev->readbuf); +free_rurb: + usb_free_urb(dev->readurb); +free_intbuf: + kfree(dev->intbuf); +free_inturb: + usb_free_urb(dev->inturb); +pdev_del: + platform_device_del(dev->pdev); +nomem: + kfree(dev); + + return retval; +} + +void ctrl_bridge_disconnect(unsigned int id) +{ + struct ctrl_bridge *dev = __dev[id]; + + dev_dbg(&dev->udev->dev, "%s:\n", __func__); + + kfree(dev->in_ctlreq); + kfree(dev->readbuf); + kfree(dev->intbuf); + + usb_free_urb(dev->readurb); + usb_free_urb(dev->inturb); + + platform_device_del(dev->pdev); + __dev[id] = NULL; + ch_id--; + + kfree(dev); +} + +static int __init ctrl_bridge_init(void) +{ + ctrl_bridge_debugfs_init(); + + return 0; +} +module_init(ctrl_bridge_init); + +static void __exit ctrl_bridge_exit(void) +{ + ctrl_bridge_debugfs_exit(); +} +module_exit(ctrl_bridge_exit); + +MODULE_DESCRIPTION("Qualcomm modem control bridge driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/misc/mdm_data_bridge.c b/drivers/usb/misc/mdm_data_bridge.c new file mode 100644 index 00000000..c41fcfb0 --- /dev/null +++ b/drivers/usb/misc/mdm_data_bridge.c @@ -0,0 +1,923 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_RX_URBS 50 +#define RMNET_RX_BUFSIZE 2048 + +#define STOP_SUBMIT_URB_LIMIT 400 +#define FLOW_CTRL_EN_THRESHOLD 500 +#define FLOW_CTRL_DISABLE 300 +#define FLOW_CTRL_SUPPORT 1 + +static const char *data_bridge_names[] = { + "dun_data_hsic0", + "rmnet_data_hsic0" +}; + +static struct workqueue_struct *bridge_wq; + +static unsigned int fctrl_support = FLOW_CTRL_SUPPORT; +module_param(fctrl_support, uint, S_IRUGO | S_IWUSR); + +static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD; +module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR); + +static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE; +module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); + +unsigned int max_rx_urbs = MAX_RX_URBS; +module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR); + +unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT; +module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR); + +#define TX_HALT BIT(0) +#define RX_HALT BIT(1) +#define SUSPENDED BIT(2) + +struct data_bridge { + struct usb_interface *intf; + struct usb_device *udev; + unsigned int bulk_in; + unsigned int bulk_out; + + /* keep track of in-flight URBs */ + struct usb_anchor tx_active; + struct usb_anchor rx_active; + + /* keep track of outgoing URBs during suspend */ + struct usb_anchor delayed; + + struct list_head rx_idle; + struct sk_buff_head rx_done; + + struct workqueue_struct *wq; + struct work_struct process_rx_w; + + struct bridge *brdg; + + /* work queue function for handling halt conditions */ + struct work_struct kevent; + + unsigned long flags; + + struct platform_device *pdev; + + /* counters */ + atomic_t pending_txurbs; + unsigned int txurb_drp_cnt; + unsigned long to_host; + unsigned long to_modem; + unsigned int tx_throttled_cnt; + unsigned int tx_unthrottled_cnt; + unsigned int rx_throttled_cnt; + unsigned int rx_unthrottled_cnt; +}; + +static struct data_bridge *__dev[MAX_BRIDGE_DEVICES]; + +/* counter used for indexing data bridge devices */ +static int ch_id; + +static int submit_rx_urb(struct data_bridge *dev, struct urb *urb, + gfp_t flags); + +static inline bool rx_halted(struct data_bridge *dev) +{ + return test_bit(RX_HALT, &dev->flags); +} + +static inline bool rx_throttled(struct bridge *brdg) +{ + return test_bit(RX_THROTTLED, &brdg->flags); +} + +int data_bridge_unthrottle_rx(unsigned int id) +{ + struct data_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev && !dev->brdg) + return -ENODEV; + + dev->rx_unthrottled_cnt++; + queue_work(dev->wq, &dev->process_rx_w); + + return 0; +} +EXPORT_SYMBOL(data_bridge_unthrottle_rx); + +static void data_bridge_process_rx(struct work_struct *work) +{ + int retval; + unsigned long flags; + struct urb *rx_idle; + struct sk_buff *skb; + struct data_bridge *dev = + container_of(work, struct data_bridge, process_rx_w); + + struct bridge *brdg = dev->brdg; + + if (!brdg || !brdg->ops.send_pkt || rx_halted(dev)) + return; + + while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) { + dev->to_host++; + /* hand off sk_buff to client,they'll need to free it */ + retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len); + if (retval == -ENOTCONN || retval == -EINVAL) { + return; + } else if (retval == -EBUSY) { + dev->rx_throttled_cnt++; + break; + } + } + + spin_lock_irqsave(&dev->rx_done.lock, flags); + if (dev->rx_done.qlen > stop_submit_urb_limit && rx_throttled(brdg)) { + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + return; + } + + while (!list_empty(&dev->rx_idle)) { + + rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list); + list_del(&rx_idle->urb_list); + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL); + spin_lock_irqsave(&dev->rx_done.lock, flags); + if (retval) + break; + } + spin_unlock_irqrestore(&dev->rx_done.lock, flags); +} + +static void data_bridge_read_cb(struct urb *urb) +{ + struct bridge *brdg; + struct sk_buff *skb = urb->context; + struct data_bridge *dev = *(struct data_bridge **)skb->cb; + bool queue = 0; + + brdg = dev->brdg; + + skb_put(skb, urb->actual_length); + + switch (urb->status) { + case 0: /* success */ + queue = 1; + spin_lock(&dev->rx_done.lock); + __skb_queue_tail(&dev->rx_done, skb); + spin_unlock(&dev->rx_done.lock); + break; + + /*do not resubmit*/ + case -EPIPE: + set_bit(RX_HALT, &dev->flags); + dev_err(&dev->udev->dev, "%s: epout halted\n", __func__); + schedule_work(&dev->kevent); + /* FALLTHROUGH */ + case -ESHUTDOWN: + case -ENOENT: /* suspended */ + case -ECONNRESET: /* unplug */ + case -EPROTO: + dev_kfree_skb_any(skb); + break; + + /*resubmit */ + case -EOVERFLOW: /*babble error*/ + default: + queue = 1; + dev_kfree_skb_any(skb); + pr_debug_ratelimited("%s: non zero urb status = %d\n", + __func__, urb->status); + break; + } + + spin_lock(&dev->rx_done.lock); + list_add_tail(&urb->urb_list, &dev->rx_idle); + spin_unlock(&dev->rx_done.lock); + + if (queue) + queue_work(dev->wq, &dev->process_rx_w); +} + +static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb, + gfp_t flags) +{ + struct sk_buff *skb; + int retval = -EINVAL; + + skb = alloc_skb(RMNET_RX_BUFSIZE, flags); + if (!skb) { + usb_free_urb(rx_urb); + return -ENOMEM; + } + + *((struct data_bridge **)skb->cb) = dev; + + usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in, + skb->data, RMNET_RX_BUFSIZE, + data_bridge_read_cb, skb); + + if (test_bit(SUSPENDED, &dev->flags)) + goto suspended; + + usb_anchor_urb(rx_urb, &dev->rx_active); + retval = usb_submit_urb(rx_urb, flags); + if (retval) + goto fail; + + return 0; +fail: + usb_unanchor_urb(rx_urb); +suspended: + dev_kfree_skb_any(skb); + usb_free_urb(rx_urb); + return retval; +} + +static int data_bridge_prepare_rx(struct data_bridge *dev) +{ + int i; + struct urb *rx_urb; + + for (i = 0; i < max_rx_urbs; i++) { + rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!rx_urb) + return -ENOMEM; + + list_add_tail(&rx_urb->urb_list, &dev->rx_idle); + } + return 0; +} + +int data_bridge_open(struct bridge *brdg) +{ + struct data_bridge *dev; + + if (!brdg) { + err("bridge is null\n"); + return -EINVAL; + } + + if (brdg->ch_id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[brdg->ch_id]; + if (!dev) { + err("dev is null\n"); + return -ENODEV; + } + + dev_dbg(&dev->udev->dev, "%s: dev:%p\n", __func__, dev); + + dev->brdg = brdg; + atomic_set(&dev->pending_txurbs, 0); + dev->to_host = 0; + dev->to_modem = 0; + dev->txurb_drp_cnt = 0; + dev->tx_throttled_cnt = 0; + dev->tx_unthrottled_cnt = 0; + dev->rx_throttled_cnt = 0; + dev->rx_unthrottled_cnt = 0; + + queue_work(dev->wq, &dev->process_rx_w); + + return 0; +} +EXPORT_SYMBOL(data_bridge_open); + +void data_bridge_close(unsigned int id) +{ + struct data_bridge *dev; + struct sk_buff *skb; + unsigned long flags; + + if (id >= MAX_BRIDGE_DEVICES) + return; + + dev = __dev[id]; + if (!dev && !dev->brdg) + return; + + dev_dbg(&dev->udev->dev, "%s:\n", __func__); + + usb_unlink_anchored_urbs(&dev->tx_active); + usb_unlink_anchored_urbs(&dev->rx_active); + usb_unlink_anchored_urbs(&dev->delayed); + + spin_lock_irqsave(&dev->rx_done.lock, flags); + while ((skb = __skb_dequeue(&dev->rx_done))) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + + dev->brdg = NULL; +} +EXPORT_SYMBOL(data_bridge_close); + +static void defer_kevent(struct work_struct *work) +{ + int status; + struct data_bridge *dev = + container_of(work, struct data_bridge, kevent); + + if (!dev) + return; + + if (test_bit(TX_HALT, &dev->flags)) { + usb_unlink_anchored_urbs(&dev->tx_active); + + status = usb_autopm_get_interface(dev->intf); + if (status < 0) { + dev_err(&dev->udev->dev, + "can't acquire interface, status %d\n", status); + return; + } + + status = usb_clear_halt(dev->udev, dev->bulk_out); + usb_autopm_put_interface(dev->intf); + if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) + dev_err(&dev->udev->dev, + "can't clear tx halt, status %d\n", status); + else + clear_bit(TX_HALT, &dev->flags); + } + + if (test_bit(RX_HALT, &dev->flags)) { + usb_unlink_anchored_urbs(&dev->rx_active); + + status = usb_autopm_get_interface(dev->intf); + if (status < 0) { + dev_err(&dev->udev->dev, + "can't acquire interface, status %d\n", status); + return; + } + + status = usb_clear_halt(dev->udev, dev->bulk_in); + usb_autopm_put_interface(dev->intf); + if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) + dev_err(&dev->udev->dev, + "can't clear rx halt, status %d\n", status); + else { + clear_bit(RX_HALT, &dev->flags); + if (dev->brdg) + queue_work(dev->wq, &dev->process_rx_w); + } + } +} + +static void data_bridge_write_cb(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct data_bridge *dev = *(struct data_bridge **)skb->cb; + struct bridge *brdg = dev->brdg; + int pending; + + pr_debug("%s: dev:%p\n", __func__, dev); + + switch (urb->status) { + case 0: /*success*/ + break; + case -EPIPE: + set_bit(TX_HALT, &dev->flags); + dev_err(&dev->udev->dev, "%s: epout halted\n", __func__); + schedule_work(&dev->kevent); + /* FALLTHROUGH */ + case -ESHUTDOWN: + case -ENOENT: /* suspended */ + case -ECONNRESET: /* unplug */ + case -EOVERFLOW: /*babble error*/ + /* FALLTHROUGH */ + default: + pr_debug_ratelimited("%s: non zero urb status = %d\n", + __func__, urb->status); + } + + usb_free_urb(urb); + dev_kfree_skb_any(skb); + + pending = atomic_dec_return(&dev->pending_txurbs); + + /*flow ctrl*/ + if (brdg && fctrl_support && pending <= fctrl_dis_thld && + test_and_clear_bit(TX_THROTTLED, &brdg->flags)) { + pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n", + __func__, pending); + dev->tx_unthrottled_cnt++; + if (brdg->ops.unthrottle_tx) + brdg->ops.unthrottle_tx(brdg->ctx); + } + + usb_autopm_put_interface_async(dev->intf); +} + +int data_bridge_write(unsigned int id, struct sk_buff *skb) +{ + int result; + int size = skb->len; + int pending; + struct urb *txurb; + struct data_bridge *dev = __dev[id]; + struct bridge *brdg; + + if (!dev || !dev->brdg || !usb_get_intfdata(dev->intf)) + return -ENODEV; + + brdg = dev->brdg; + + dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len); + + result = usb_autopm_get_interface(dev->intf); + if (result < 0) { + dev_err(&dev->udev->dev, "%s: resume failure\n", __func__); + goto error; + } + + txurb = usb_alloc_urb(0, GFP_KERNEL); + if (!txurb) { + dev_err(&dev->udev->dev, "%s: error allocating read urb\n", + __func__); + result = -ENOMEM; + goto error; + } + + /* store dev pointer in skb */ + *((struct data_bridge **)skb->cb) = dev; + + usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out, + skb->data, skb->len, data_bridge_write_cb, skb); + + if (test_bit(SUSPENDED, &dev->flags)) { + usb_anchor_urb(txurb, &dev->delayed); + goto free_urb; + } + + pending = atomic_inc_return(&dev->pending_txurbs); + usb_anchor_urb(txurb, &dev->tx_active); + + result = usb_submit_urb(txurb, GFP_KERNEL); + if (result < 0) { + usb_unanchor_urb(txurb); + atomic_dec(&dev->pending_txurbs); + dev_err(&dev->udev->dev, "%s: submit URB error %d\n", + __func__, result); + goto free_urb; + } + + dev->to_modem++; + dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending); + + /* flow control: last urb submitted but return -EBUSY */ + if (fctrl_support && pending > fctrl_en_thld) { + set_bit(TX_THROTTLED, &brdg->flags); + dev->tx_throttled_cnt++; + pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n", + __func__, pending); + return -EBUSY; + } + + return size; + +free_urb: + usb_free_urb(txurb); +error: + dev->txurb_drp_cnt++; + usb_autopm_put_interface(dev->intf); + + return result; +} +EXPORT_SYMBOL(data_bridge_write); + +static int data_bridge_resume(struct data_bridge *dev) +{ + struct urb *urb; + int retval; + + while ((urb = usb_get_from_anchor(&dev->delayed))) { + usb_anchor_urb(urb, &dev->tx_active); + atomic_inc(&dev->pending_txurbs); + retval = usb_submit_urb(urb, GFP_ATOMIC); + if (retval < 0) { + atomic_dec(&dev->pending_txurbs); + usb_unanchor_urb(urb); + + /* TODO: need to free urb data */ + usb_scuttle_anchored_urbs(&dev->delayed); + break; + } + dev->to_modem++; + dev->txurb_drp_cnt--; + } + + clear_bit(SUSPENDED, &dev->flags); + + if (dev->brdg) + queue_work(dev->wq, &dev->process_rx_w); + + return 0; +} + +static int bridge_resume(struct usb_interface *iface) +{ + int retval = 0; + int oldstate; + struct data_bridge *dev = usb_get_intfdata(iface); + struct bridge *brdg = dev->brdg; + + oldstate = iface->dev.power.power_state.event; + iface->dev.power.power_state.event = PM_EVENT_ON; + + retval = data_bridge_resume(dev); + if (!retval) { + if (oldstate & PM_EVENT_SUSPEND && brdg) + retval = ctrl_bridge_resume(brdg->ch_id); + } + return retval; +} + +static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message) +{ + if (atomic_read(&dev->pending_txurbs) && + (message.event & PM_EVENT_AUTO)) + return -EBUSY; + + set_bit(SUSPENDED, &dev->flags); + + usb_kill_anchored_urbs(&dev->tx_active); + usb_kill_anchored_urbs(&dev->rx_active); + + return 0; +} + +static int bridge_suspend(struct usb_interface *intf, pm_message_t message) +{ + int retval; + struct data_bridge *dev = usb_get_intfdata(intf); + struct bridge *brdg = dev->brdg; + + retval = data_bridge_suspend(dev, message); + if (!retval) { + if (message.event & PM_EVENT_SUSPEND) { + if (brdg) + retval = ctrl_bridge_suspend(brdg->ch_id); + intf->dev.power.power_state.event = message.event; + } + } else { + dev_dbg(&dev->udev->dev, "%s: device is busy,cannot suspend\n", + __func__); + } + return retval; +} + +static int data_bridge_probe(struct usb_interface *iface, + struct usb_host_endpoint *bulk_in, + struct usb_host_endpoint *bulk_out, int id) +{ + struct data_bridge *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + err("%s: unable to allocate dev\n", __func__); + return -ENOMEM; + } + + dev->pdev = platform_device_alloc(data_bridge_names[id], id); + if (!dev->pdev) { + err("%s: unable to allocate platform device\n", __func__); + kfree(dev); + return -ENOMEM; + } + + init_usb_anchor(&dev->tx_active); + init_usb_anchor(&dev->rx_active); + init_usb_anchor(&dev->delayed); + + INIT_LIST_HEAD(&dev->rx_idle); + skb_queue_head_init(&dev->rx_done); + + dev->wq = bridge_wq; + + dev->udev = interface_to_usbdev(iface); + dev->intf = iface; + + dev->bulk_in = usb_rcvbulkpipe(dev->udev, + bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + + dev->bulk_out = usb_sndbulkpipe(dev->udev, + bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + + usb_set_intfdata(iface, dev); + + INIT_WORK(&dev->kevent, defer_kevent); + INIT_WORK(&dev->process_rx_w, data_bridge_process_rx); + + __dev[id] = dev; + + /*allocate list of rx urbs*/ + data_bridge_prepare_rx(dev); + + platform_device_add(dev->pdev); + + return 0; +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct data_bridge *dev; + char *buf; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName#%s dev %p\n" + "pending tx urbs: %u\n" + "tx urb drp cnt: %u\n" + "to host: %lu\n" + "to mdm: %lu\n" + "tx throttled cnt: %u\n" + "tx unthrottled cnt: %u\n" + "rx throttled cnt: %u\n" + "rx unthrottled cnt: %u\n" + "rx done skb qlen: %u\n" + "suspended: %d\n" + "TX_HALT: %d\n" + "RX_HALT: %d\n", + dev->pdev->name, dev, + atomic_read(&dev->pending_txurbs), + dev->txurb_drp_cnt, + dev->to_host, + dev->to_modem, + dev->tx_throttled_cnt, + dev->tx_unthrottled_cnt, + dev->rx_throttled_cnt, + dev->rx_unthrottled_cnt, + dev->rx_done.qlen, + test_bit(SUSPENDED, &dev->flags), + test_bit(TX_HALT, &dev->flags), + test_bit(RX_HALT, &dev->flags)); + + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t data_bridge_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct data_bridge *dev; + int i; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + dev->to_host = 0; + dev->to_modem = 0; + dev->txurb_drp_cnt = 0; + dev->tx_throttled_cnt = 0; + dev->tx_unthrottled_cnt = 0; + dev->rx_throttled_cnt = 0; + dev->rx_unthrottled_cnt = 0; + } + return count; +} + +const struct file_operations data_stats_ops = { + .read = data_bridge_read_stats, + .write = data_bridge_reset_stats, +}; + +struct dentry *data_dent; +struct dentry *data_dfile; +static void data_bridge_debugfs_init(void) +{ + data_dent = debugfs_create_dir("data_hsic_bridge", 0); + if (IS_ERR(data_dent)) + return; + + data_dfile = debugfs_create_file("status", 0644, data_dent, 0, + &data_stats_ops); + if (!data_dfile || IS_ERR(data_dfile)) + debugfs_remove(data_dent); +} + +static void data_bridge_debugfs_exit(void) +{ + debugfs_remove(data_dfile); + debugfs_remove(data_dent); +} + +#else +static void data_bridge_debugfs_init(void) { } +static void data_bridge_debugfs_exit(void) { } +#endif + +static int __devinit +bridge_probe(struct usb_interface *iface, const struct usb_device_id *id) +{ + struct usb_host_endpoint *endpoint = NULL; + struct usb_host_endpoint *bulk_in = NULL; + struct usb_host_endpoint *bulk_out = NULL; + struct usb_host_endpoint *int_in = NULL; + struct usb_device *udev; + int i; + int status = 0; + int numends; + int iface_num; + + iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + + if (iface->num_altsetting != 1) { + err("%s invalid num_altsetting %u\n", + __func__, iface->num_altsetting); + return -EINVAL; + } + + udev = interface_to_usbdev(iface); + usb_get_dev(udev); + + if (iface_num != DUN_IFACE_NUM && iface_num != TETHERED_RMNET_IFACE_NUM) + return 0; + + numends = iface->cur_altsetting->desc.bNumEndpoints; + for (i = 0; i < numends; i++) { + endpoint = iface->cur_altsetting->endpoint + i; + if (!endpoint) { + dev_err(&udev->dev, "%s: invalid endpoint %u\n", + __func__, i); + status = -EINVAL; + goto out; + } + + if (usb_endpoint_is_bulk_in(&endpoint->desc)) + bulk_in = endpoint; + else if (usb_endpoint_is_bulk_out(&endpoint->desc)) + bulk_out = endpoint; + else if (usb_endpoint_is_int_in(&endpoint->desc)) + int_in = endpoint; + } + + if (!bulk_in || !bulk_out || !int_in) { + dev_err(&udev->dev, "%s: invalid endpoints\n", __func__); + status = -EINVAL; + goto out; + } + + status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id); + if (status < 0) { + dev_err(&udev->dev, "data_bridge_probe failed %d\n", status); + goto out; + } + + status = ctrl_bridge_probe(iface, int_in, ch_id); + if (status < 0) { + dev_err(&udev->dev, "ctrl_bridge_probe failed %d\n", status); + goto free_data_bridge; + } + ch_id++; + + return 0; + +free_data_bridge: + platform_device_del(__dev[ch_id]->pdev); + usb_set_intfdata(iface, NULL); + kfree(__dev[ch_id]); + __dev[ch_id] = NULL; +out: + usb_put_dev(udev); + + return status; +} + +static void bridge_disconnect(struct usb_interface *intf) +{ + struct data_bridge *dev = usb_get_intfdata(intf); + struct list_head *head; + struct urb *rx_urb; + unsigned long flags; + int iface_num; + + if (!dev) { + err("%s: data device not found\n", __func__); + return; + } + + iface_num = intf->cur_altsetting->desc.bInterfaceNumber; + if (iface_num != DUN_IFACE_NUM && iface_num != TETHERED_RMNET_IFACE_NUM) + return; + + ch_id--; + ctrl_bridge_disconnect(ch_id); + platform_device_del(dev->pdev); + usb_set_intfdata(intf, NULL); + __dev[ch_id] = NULL; + + cancel_work_sync(&dev->process_rx_w); + cancel_work_sync(&dev->kevent); + + /*free rx urbs*/ + head = &dev->rx_idle; + spin_lock_irqsave(&dev->rx_done.lock, flags); + while (!list_empty(head)) { + rx_urb = list_entry(head->next, struct urb, urb_list); + list_del(&rx_urb->urb_list); + usb_free_urb(rx_urb); + } + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + + usb_put_dev(dev->udev); + kfree(dev); +} + +static const struct usb_device_id bridge_ids[] = { + { USB_DEVICE(0x5c6, 0x9001) }, +}; + +MODULE_DEVICE_TABLE(usb, bridge_ids); + +static struct usb_driver bridge_driver = { + .name = "mdm_bridge", + .probe = bridge_probe, + .disconnect = bridge_disconnect, + .id_table = bridge_ids, + .suspend = bridge_suspend, + .resume = bridge_resume, + .supports_autosuspend = 1, +}; + +static int __init bridge_init(void) +{ + int ret; + + ret = usb_register(&bridge_driver); + if (ret) { + err("%s: unable to register mdm_bridge driver", __func__); + return ret; + } + + bridge_wq = create_singlethread_workqueue("mdm_bridge"); + if (!bridge_wq) { + usb_deregister(&bridge_driver); + pr_err("%s: Unable to create workqueue:bridge\n", __func__); + return -ENOMEM; + } + + data_bridge_debugfs_init(); + + return 0; +} + +static void __exit bridge_exit(void) +{ + data_bridge_debugfs_exit(); + destroy_workqueue(bridge_wq); + usb_deregister(&bridge_driver); +} + +module_init(bridge_init); +module_exit(bridge_exit); + +MODULE_DESCRIPTION("Qualcomm modem data bridge driver"); +MODULE_LICENSE("GPL v2"); From 963887be854167e003537b5ce2a829d319c9c7ff Mon Sep 17 00:00:00 2001 From: Shane Passmore Date: Mon, 26 Nov 2012 19:20:27 -0600 Subject: [PATCH 111/117] USB: OTG: Take wakelock when VBUS present Enabled by default, can disable with: echo N > /sys/module/otg_wakelock/parameters/enabled Change-Id: I34974624c52ae23490852b44c270d2f326cf6116 Signed-off-by: Todd Poynor usb: otg: Temporarily grab wakelock on charger and disconnect events Change-Id: If995d4af4adcb08e8369009483f2956ad9627267 Signed-off-by: Todd Poynor add for OTG support by faux123 --- drivers/usb/otg/otg-wakelock.c | 170 +++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 drivers/usb/otg/otg-wakelock.c diff --git a/drivers/usb/otg/otg-wakelock.c b/drivers/usb/otg/otg-wakelock.c new file mode 100644 index 00000000..f4429b06 --- /dev/null +++ b/drivers/usb/otg/otg-wakelock.c @@ -0,0 +1,170 @@ +/* + * otg-wakelock.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#define TEMPORARY_HOLD_TIME 2000 + +static bool enabled = true; +static struct otg_transceiver *otgwl_xceiv; +static struct notifier_block otgwl_nb; + +/* + * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the + * held field is updated to match. + */ + +static DEFINE_SPINLOCK(otgwl_spinlock); + +/* + * Only one lock, but since these 3 fields are associated with each other... + */ + +struct otgwl_lock { + char name[40]; + struct wake_lock wakelock; + bool held; +}; + +/* + * VBUS present lock. Also used as a timed lock on charger + * connect/disconnect and USB host disconnect, to allow the system + * to react to the change in power. + */ + +static struct otgwl_lock vbus_lock; + +static void otgwl_hold(struct otgwl_lock *lock) +{ + if (!lock->held) { + wake_lock(&lock->wakelock); + lock->held = true; + } +} + +static void otgwl_temporary_hold(struct otgwl_lock *lock) +{ + wake_lock_timeout(&lock->wakelock, + msecs_to_jiffies(TEMPORARY_HOLD_TIME)); + lock->held = false; +} + +static void otgwl_drop(struct otgwl_lock *lock) +{ + if (lock->held) { + wake_unlock(&lock->wakelock); + lock->held = false; + } +} + +static void otgwl_handle_event(unsigned long event) +{ + unsigned long irqflags; + + spin_lock_irqsave(&otgwl_spinlock, irqflags); + + if (!enabled) { + otgwl_drop(&vbus_lock); + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); + return; + } + + switch (event) { + case USB_EVENT_VBUS: + case USB_EVENT_ENUMERATED: + otgwl_hold(&vbus_lock); + break; + + case USB_EVENT_NONE: + case USB_EVENT_ID: + case USB_EVENT_CHARGER: + otgwl_temporary_hold(&vbus_lock); + break; + + default: + break; + } + + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); +} + +static int otgwl_otg_notifications(struct notifier_block *nb, + unsigned long event, void *unused) +{ + otgwl_handle_event(event); + return NOTIFY_OK; +} + +static int set_enabled(const char *val, const struct kernel_param *kp) +{ + int rv = param_set_bool(val, kp); + + if (rv) + return rv; + + if (otgwl_xceiv) + otgwl_handle_event(otgwl_xceiv->last_event); + + return 0; +} + +static struct kernel_param_ops enabled_param_ops = { + .set = set_enabled, + .get = param_get_bool, +}; + +module_param_cb(enabled, &enabled_param_ops, &enabled, 0644); +MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present"); + +static int __init otg_wakelock_init(void) +{ + int ret; + + otgwl_xceiv = otg_get_transceiver(); + + if (!otgwl_xceiv) { + pr_err("%s: No OTG transceiver found\n", __func__); + return -ENODEV; + } + + snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s", + dev_name(otgwl_xceiv->dev)); + wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND, + vbus_lock.name); + + otgwl_nb.notifier_call = otgwl_otg_notifications; + ret = otg_register_notifier(otgwl_xceiv, &otgwl_nb); + + if (ret) { + pr_err("%s: otg_register_notifier on transceiver %s" + " failed\n", __func__, + dev_name(otgwl_xceiv->dev)); + otgwl_xceiv = NULL; + wake_lock_destroy(&vbus_lock.wakelock); + return ret; + } + + otgwl_handle_event(otgwl_xceiv->last_event); + return ret; +} + +late_initcall(otg_wakelock_init); + From 6b27a15080e597f308a20bf24e778ceb05869227 Mon Sep 17 00:00:00 2001 From: David Hays Date: Sat, 25 May 2013 10:15:32 -0500 Subject: [PATCH 112/117] vigor: update defconfig Change-Id: Ie693c3feee0f8e19fc406c12e1e91af6a52ec731 --- arch/arm/configs/vigor_aosp_defconfig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 692d1c5d..c6f17b6a 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -1043,8 +1043,8 @@ CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=y CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=y -# CONFIG_BT_BNEP_MC_FILTER is not set -# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_HIDP=y # @@ -2161,7 +2161,7 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_DEVICE_CLASS=y # CONFIG_USB_DYNAMIC_MINORS is not set CONFIG_USB_SUSPEND=y -# CONFIG_USB_OTG is not set +CONFIG_USB_OTG=y # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set # CONFIG_USB_MON is not set @@ -2307,7 +2307,7 @@ CONFIG_USB_GADGET_VERIZON_PRODUCT_ID=y # OTG and related infrastructure # CONFIG_USB_OTG_UTILS=y -# CONFIG_USB_OTG_WAKELOCK is not set +CONFIG_USB_OTG_WAKELOCK=y # CONFIG_USB_GPIO_VBUS is not set # CONFIG_USB_ULPI is not set # CONFIG_USB_MSM_OTG_72K is not set From b65ff1f49e1b9df5ca4df1db1a9364ce2ce3b9c1 Mon Sep 17 00:00:00 2001 From: David Hays Date: Sun, 26 May 2013 12:33:11 -0500 Subject: [PATCH 113/117] board-vigor: add missing MHL GPIOs --- arch/arm/mach-msm/board-vigor.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm/mach-msm/board-vigor.h b/arch/arm/mach-msm/board-vigor.h index 9d6445c6..09be34cb 100644 --- a/arch/arm/mach-msm/board-vigor.h +++ b/arch/arm/mach-msm/board-vigor.h @@ -104,6 +104,11 @@ /* MHL */ #define VIGOR_GPIO_MHL_RST_N (43) #define VIGOR_GPIO_MHL_INTR_N (58) +#define VIGOR_GPIO_MHL_WAKE_UP (62) +#define VIGOR_GPIO_MHL_RESET (70) +#define VIGOR_GPIO_MHL_INT (71) +#define VIGOR_GPIO_MHL_USB_SWITCH (99) +#define VIGOR_GPIO_MHL_USB_EN (139) #define VIGOR_GPIO_MHL_SCL (170) #define VIGOR_GPIO_MHL_SDA (171) #define VIGOR_GPIO_MHL_HPD (172) From 3320c3732e7b8ad5ee3e49ef96ac4781dceee5ac Mon Sep 17 00:00:00 2001 From: David Hays Date: Sun, 26 May 2013 14:34:41 -0500 Subject: [PATCH 114/117] Update board-vigor.c --- arch/arm/mach-msm/board-vigor.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm/mach-msm/board-vigor.c b/arch/arm/mach-msm/board-vigor.c index aa6af978..c939b16c 100644 --- a/arch/arm/mach-msm/board-vigor.c +++ b/arch/arm/mach-msm/board-vigor.c @@ -7539,10 +7539,9 @@ static struct platform_device *vigor_devices[] __initdata = { &asoc_mvs_dai1, #endif -#if defined(CONFIG_USB_GADGET_MSM_72K) || defined(CONFIG_USB_EHCI_HCD) &msm_device_hsusb_host, &msm_device_otg, -#endif + #ifdef CONFIG_BATTERY_MSM &msm_batt_device, #endif From 1e375c6cc0d40b53501cb71e9b950cdee235202f Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Thu, 22 Nov 2012 02:07:00 -0800 Subject: [PATCH 115/117] USB: gadget: use CAF android gadget at M8260AAABQNLZA30170 Conflicts: drivers/char/diag/diagchar_core.c drivers/char/diag/diagfwd.c drivers/usb/gadget/Kconfig drivers/usb/gadget/android.c drivers/usb/gadget/ci13xxx_udc.c drivers/usb/gadget/composite.c drivers/usb/gadget/f_adb.c drivers/usb/gadget/f_mass_storage.c drivers/usb/gadget/f_rmnet_sdio.c drivers/usb/gadget/f_rmnet_smd_sdio.c drivers/usb/gadget/f_serial.c drivers/usb/gadget/u_ether.c Change-Id: I67b3945aa74ba09fd8a99962c718de5713b44f5d --- arch/arm/mach-msm/include/mach/usb_bam.h | 40 + .../mach-msm/include/mach/usb_gadget_xport.h | 89 + drivers/char/diag/Kconfig | 19 +- drivers/char/diag/Makefile | 1 + drivers/char/diag/diagchar.h | 94 +- drivers/char/diag/diagchar_core.c | 744 +---- drivers/char/diag/diagfwd.c | 1017 ++++-- drivers/char/diag/diagfwd.h | 25 +- drivers/char/diag/diagfwd_cntl.c | 152 +- drivers/char/diag/diagfwd_cntl.h | 55 +- drivers/char/diag/diagfwd_hsic.c | 530 ++++ drivers/char/diag/diagfwd_hsic.h | 23 + drivers/char/diag/diagfwd_sdio.c | 210 +- drivers/char/diag/diagfwd_sdio.h | 2 +- drivers/usb/gadget/Kconfig | 252 +- drivers/usb/gadget/android.c | 1247 ++------ drivers/usb/gadget/ci13xxx_msm.c | 34 +- drivers/usb/gadget/ci13xxx_udc.c | 186 +- drivers/usb/gadget/ci13xxx_udc.h | 15 + drivers/usb/gadget/composite.c | 80 - drivers/usb/gadget/f_accessory.c | 4 +- drivers/usb/gadget/f_acm.c | 191 +- drivers/usb/gadget/f_adb.c | 69 +- drivers/usb/gadget/f_ccid.c | 1014 ++++++ drivers/usb/gadget/f_ccid.h | 83 + drivers/usb/gadget/f_diag.c | 271 +- drivers/usb/gadget/f_diag.h | 24 + drivers/usb/gadget/f_mass_storage.c | 216 +- drivers/usb/gadget/f_mtp.c | 30 +- drivers/usb/gadget/f_rmnet.c | 1057 +++++++ drivers/usb/gadget/f_rmnet.h | 19 + drivers/usb/gadget/f_rmnet_sdio.c | 1535 +++++++++ drivers/usb/gadget/f_rmnet_smd.c | 1368 ++++++++ drivers/usb/gadget/f_rmnet_smd_sdio.c | 87 +- drivers/usb/gadget/f_rndis.c | 50 +- drivers/usb/gadget/f_serial.c | 242 +- drivers/usb/gadget/msm72k_udc.c | 2793 +++++++++++++++++ drivers/usb/gadget/printer.c | 2 +- drivers/usb/gadget/qcom_maemo.c | 304 ++ drivers/usb/gadget/rndis.c | 19 +- drivers/usb/gadget/storage_common.c | 7 +- drivers/usb/gadget/u_bam.c | 488 ++- drivers/usb/gadget/u_ctrl_hsic.c | 617 ++++ drivers/usb/gadget/u_data_hsic.c | 962 ++++++ drivers/usb/gadget/u_ether.c | 179 +- drivers/usb/gadget/u_ether.h | 3 + drivers/usb/gadget/u_rmnet.h | 19 +- drivers/usb/gadget/u_rmnet_ctrl_smd.c | 114 +- drivers/usb/gadget/u_sdio.c | 23 +- drivers/usb/gadget/u_serial.c | 35 +- drivers/usb/gadget/u_serial.h | 2 +- drivers/usb/gadget/u_smd.c | 80 +- include/linux/diagchar.h | 142 +- include/linux/usb/android.h | 24 + include/linux/usb/ccid_desc.h | 112 + include/linux/usb/composite.h | 8 - include/linux/usb/gadget.h | 4 +- 57 files changed, 13265 insertions(+), 3747 deletions(-) create mode 100644 arch/arm/mach-msm/include/mach/usb_bam.h create mode 100644 arch/arm/mach-msm/include/mach/usb_gadget_xport.h create mode 100644 drivers/char/diag/diagfwd_hsic.c create mode 100644 drivers/char/diag/diagfwd_hsic.h create mode 100644 drivers/usb/gadget/f_ccid.c create mode 100644 drivers/usb/gadget/f_ccid.h create mode 100644 drivers/usb/gadget/f_diag.h create mode 100644 drivers/usb/gadget/f_rmnet.c create mode 100644 drivers/usb/gadget/f_rmnet.h create mode 100644 drivers/usb/gadget/f_rmnet_sdio.c create mode 100644 drivers/usb/gadget/f_rmnet_smd.c create mode 100644 drivers/usb/gadget/msm72k_udc.c create mode 100644 drivers/usb/gadget/qcom_maemo.c create mode 100644 drivers/usb/gadget/u_ctrl_hsic.c create mode 100644 drivers/usb/gadget/u_data_hsic.c create mode 100644 include/linux/usb/android.h create mode 100644 include/linux/usb/ccid_desc.h diff --git a/arch/arm/mach-msm/include/mach/usb_bam.h b/arch/arm/mach-msm/include/mach/usb_bam.h new file mode 100644 index 00000000..4caa71bb --- /dev/null +++ b/arch/arm/mach-msm/include/mach/usb_bam.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _USB_BAM_H_ +#define _USB_BAM_H_ + +/** + * Connect USB-to-Periperal SPS connection. + * + * This function returns the allocated pipes number. + * + * @idx - Connection index. + * + * @src_pipe_idx - allocated pipe index - USB as a + * source (output) + * + * @dst_pipe_idx - allocated pipe index - USB as a + * destination (output) + * + * @return 0 on success, negative value on error + * + */ +#ifdef CONFIG_USB_BAM +int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx); +#else +int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx) +{ + return -ENODEV; +} +#endif +#endif /* _USB_BAM_H_ */ diff --git a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h new file mode 100644 index 00000000..d8a3e60f --- /dev/null +++ b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h @@ -0,0 +1,89 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_GADGET_XPORT_H__ +#define __LINUX_USB_GADGET_XPORT_H__ + +enum transport_type { + USB_GADGET_XPORT_UNDEF, + USB_GADGET_XPORT_TTY, + USB_GADGET_XPORT_SDIO, + USB_GADGET_XPORT_SMD, + USB_GADGET_XPORT_BAM, + USB_GADGET_XPORT_BAM2BAM, + USB_GADGET_XPORT_HSIC, + USB_GADGET_XPORT_NONE, +}; + +#define XPORT_STR_LEN 10 + +static char *xport_to_str(enum transport_type t) +{ + switch (t) { + case USB_GADGET_XPORT_TTY: + return "TTY"; + case USB_GADGET_XPORT_SDIO: + return "SDIO"; + case USB_GADGET_XPORT_SMD: + return "SMD"; + case USB_GADGET_XPORT_BAM: + return "BAM"; + case USB_GADGET_XPORT_BAM2BAM: + return "BAM2BAM"; + case USB_GADGET_XPORT_HSIC: + return "HSIC"; + case USB_GADGET_XPORT_NONE: + return "NONE"; + default: + return "UNDEFINED"; + } +} + +static enum transport_type str_to_xport(const char *name) +{ + if (!strncasecmp("TTY", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_TTY; + if (!strncasecmp("SDIO", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_SDIO; + if (!strncasecmp("SMD", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_SMD; + if (!strncasecmp("BAM", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_BAM; + if (!strncasecmp("BAM2BAM", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_BAM2BAM; + if (!strncasecmp("HSIC", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_HSIC; + if (!strncasecmp("", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_NONE; + + return USB_GADGET_XPORT_UNDEF; +} + +enum gadget_type { + USB_GADGET_SERIAL, + USB_GADGET_RMNET, +}; + +#define NUM_RMNET_HSIC_PORTS 1 +#define NUM_DUN_HSIC_PORTS 1 +#define NUM_PORTS (NUM_RMNET_HSIC_PORTS \ + + NUM_DUN_HSIC_PORTS) + +int ghsic_ctrl_connect(void *, int); +void ghsic_ctrl_disconnect(void *, int); +int ghsic_ctrl_setup(unsigned int, enum gadget_type); +int ghsic_data_connect(void *, int); +void ghsic_data_disconnect(void *, int); +int ghsic_data_setup(unsigned int, enum gadget_type); + +#endif diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig index bb8ee2e3..53df29b7 100644 --- a/drivers/char/diag/Kconfig +++ b/drivers/char/diag/Kconfig @@ -18,15 +18,6 @@ config DIAG_OVER_USB default y help This feature helps segregate code required for DIAG traffic to go over USB. - -config MODEM_DIAG_MASTER - bool "Set if Modem is to be the master on DIAG" - depends on ARCH_MSM - default n - help - Diag master: Android just forwards the Diag packet to modem. Modem will remove HDLC by itself. - Diag slave: Android kernel should remove HDLC from Diag packet before send to modem. - In the latest modem codebase, it was n by default. endmenu menu "SDIO support for DIAG" @@ -38,3 +29,13 @@ config DIAG_SDIO_PIPE help SDIO Transport Layer for DIAG Router endmenu + +menu "HSIC support for DIAG" + +config DIAG_HSIC_PIPE + depends on USB_QCOM_DIAG_BRIDGE + default y + bool "Enable 9K DIAG traffic over HSIC" + help + HSIC Transport Layer for DIAG Router +endmenu diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile index 52ab2b94..c62b7fdd 100644 --- a/drivers/char/diag/Makefile +++ b/drivers/char/diag/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_DIAG_CHAR) := diagchar.o obj-$(CONFIG_DIAG_SDIO_PIPE) += diagfwd_sdio.o +obj-$(CONFIG_DIAG_HSIC_PIPE) += diagfwd_hsic.o diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagmem.o diagfwd_cntl.o diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index f96c724b..7025ea8e 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,14 +17,13 @@ #include #include #include -#include #include #include #include -#include #include /* Size of the USB buffers used for read and write*/ #define USB_MAX_OUT_BUF 4096 +#define APPS_BUF_SIZE 2000 #define IN_BUF_SIZE 16384 #define MAX_IN_BUF_SIZE 32768 #define MAX_SYNC_OBJ_NAME_SIZE 32 @@ -41,20 +40,24 @@ #define APPS_DATA 3 #define SDIO_DATA 4 #define WCNSS_DATA 5 +#define HSIC_DATA 6 #define MODEM_PROC 0 #define APPS_PROC 1 #define QDSP_PROC 2 #define WCNSS_PROC 3 -#define MSG_MASK_SIZE 8000 +#define MSG_MASK_SIZE 9500 #define LOG_MASK_SIZE 8000 #define EVENT_MASK_SIZE 1000 #define USER_SPACE_DATA 8000 #define PKT_SIZE 4096 -#define MAX_EQUIP_ID 12 +#define MAX_EQUIP_ID 15 +#define DIAG_CTRL_MSG_LOG_MASK 9 +#define DIAG_CTRL_MSG_EVENT_MASK 10 +#define DIAG_CTRL_MSG_F3_MASK 11 /* Maximum number of pkt reg supported at initialization*/ -extern unsigned int diag_max_registration; -extern unsigned int diag_threshold_registration; +extern unsigned int diag_max_reg; +extern unsigned int diag_threshold_reg; #define APPEND_DEBUG(ch) \ do { \ @@ -130,11 +133,7 @@ struct diagchar_dev { int *data_ready; int num_clients; struct diag_write_device *buf_tbl; - spinlock_t diagchar_lock; -#ifdef CONFIG_DIAG_SDIO_PIPE - struct cdev *cdev_mdm; - int num_mdmclients; -#endif + /* Memory pool parameters */ unsigned int itemsize; unsigned int poolsize; @@ -152,7 +151,11 @@ struct diagchar_dev { int count_hdlc_pool; int count_write_struct_pool; int used; - + /* Buffers for masks */ + struct mutex diag_cntl_mutex; + struct diag_ctrl_event_mask *event_mask; + struct diag_ctrl_log_mask *log_mask; + struct diag_ctrl_msg_mask *msg_mask; /* State for diag forwarding */ unsigned char *buf_in_1; unsigned char *buf_in_2; @@ -162,13 +165,13 @@ struct diagchar_dev { unsigned char *buf_in_qdsp_cntl; unsigned char *buf_in_wcnss; unsigned char *buf_in_wcnss_cntl; - struct mutex diagcharmdm_mutex; - wait_queue_head_t mdmwait_q; - struct diag_client_map *mdmclient_map; - int *mdmdata_ready; unsigned char *usb_buf_out; unsigned char *apps_rsp_buf; unsigned char *user_space_data; + /* buffer for updating mask to peripherals */ + unsigned char *buf_msg_mask_update; + unsigned char *buf_log_mask_update; + unsigned char *buf_event_mask_update; smd_channel_t *ch; smd_channel_t *ch_cntl; smd_channel_t *chqdsp; @@ -191,7 +194,6 @@ struct diagchar_dev { struct work_struct diag_read_work; #endif struct workqueue_struct *diag_wq; - struct wake_lock wake_lock; struct work_struct diag_drain_work; struct work_struct diag_read_smd_work; struct work_struct diag_read_smd_cntl_work; @@ -199,6 +201,10 @@ struct diagchar_dev { struct work_struct diag_read_smd_qdsp_cntl_work; struct work_struct diag_read_smd_wcnss_work; struct work_struct diag_read_smd_wcnss_cntl_work; + struct workqueue_struct *diag_cntl_wq; + struct work_struct diag_modem_mask_update_work; + struct work_struct diag_qdsp_mask_update_work; + struct work_struct diag_wcnss_mask_update_work; uint8_t *msg_masks; uint8_t *log_masks; int log_masks_length; @@ -214,45 +220,43 @@ struct diagchar_dev { struct diag_request *write_ptr_qdsp_2; struct diag_request *write_ptr_wcnss; int logging_mode; + int mask_check; int logging_process_id; -#if DIAG_XPST - unsigned char nohdlc; - unsigned char in_busy_dmrounter; - struct mutex smd_lock; - unsigned char init_done; - unsigned char is2ARM11; -#endif #ifdef CONFIG_DIAG_SDIO_PIPE - unsigned char *buf_in_sdio_1; - unsigned char *buf_in_sdio_2; + unsigned char *buf_in_sdio; unsigned char *usb_buf_mdm_out; struct sdio_channel *sdio_ch; int read_len_mdm; - int in_busy_sdio_1; - int in_busy_sdio_2; + int in_busy_sdio; struct usb_diag_ch *mdm_ch; struct work_struct diag_read_mdm_work; struct workqueue_struct *diag_sdio_wq; struct work_struct diag_read_sdio_work; - struct work_struct diag_remove_sdio_work; + struct work_struct diag_close_sdio_work; struct diag_request *usb_read_mdm_ptr; - struct diag_request *write_ptr_mdm_1; - struct diag_request *write_ptr_mdm_2; + struct diag_request *write_ptr_mdm; +#endif +#ifdef CONFIG_DIAG_HSIC_PIPE + unsigned char *buf_in_hsic; + unsigned char *usb_buf_mdm_out; + int hsic_initialized; + int hsic_ch; + int hsic_device_enabled; + int hsic_device_opened; + int read_len_mdm; + int in_busy_hsic_read_on_mdm; + int in_busy_hsic_write_on_mdm; + int in_busy_hsic_write; + int in_busy_hsic_read; + int usb_mdm_connected; + struct usb_diag_ch *mdm_ch; + struct workqueue_struct *diag_hsic_wq; + struct work_struct diag_read_mdm_work; + struct work_struct diag_read_hsic_work; + struct diag_request *usb_read_mdm_ptr; + struct diag_request *write_ptr_mdm; #endif - u64 diag_smd_count; /* from smd */ - u64 diag_qdsp_count; /* from qdsp */ - void (*enable_sd_log)(unsigned int enable); - int qxdm2sd_drop; }; -#define EPST_FUN 1 -#define HPST_FUN 0 - -#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_MSM7X27A) -#define SMDDIAG_NAME "DIAG" -#else -#define SMDDIAG_NAME "SMD_DIAG" -#endif extern struct diagchar_dev *driver; -extern int is_wcnss_used; #endif diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 0126f28d..bbed3299 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -30,15 +30,11 @@ #include "diagfwd_cntl.h" #ifdef CONFIG_DIAG_SDIO_PIPE #include "diagfwd_sdio.h" -static unsigned char *buf_9k; #endif -#include - -#ifdef CONFIG_USB_ANDROID_MDM9K_DIAG -int diag_support_mdm9k = 1; -#else -int diag_support_mdm9k = 0; +#ifdef CONFIG_DIAG_HSIC_PIPE +#include "diagfwd_hsic.h" #endif +#include MODULE_DESCRIPTION("Diag Char Driver"); MODULE_LICENSE("GPL v2"); @@ -52,7 +48,7 @@ struct diagchar_priv { }; /* The following variables can be specified by module options */ /* for copy buffer */ -static unsigned int itemsize = 2048; /*Size of item in the mempool */ +static unsigned int itemsize = 4096; /*Size of item in the mempool */ static unsigned int poolsize = 10; /*Number of items in the mempool */ /* for hdlc buffer */ static unsigned int itemsize_hdlc = 8192; /*Size of item in the mempool */ @@ -64,8 +60,8 @@ static unsigned int poolsize_write_struct = 8; /* Num of items in the mempool */ static unsigned int max_clients = 15; static unsigned int threshold_client_limit = 30; /* This is the maximum number of pkt registrations supported at initialization*/ -unsigned int diag_max_registration = 500; -unsigned int diag_threshold_registration = 650; +unsigned int diag_max_reg = 600; +unsigned int diag_threshold_reg = 750; /* Timer variables */ static struct timer_list drain_timer; @@ -75,96 +71,6 @@ module_param(itemsize, uint, 0); module_param(poolsize, uint, 0); module_param(max_clients, uint, 0); -static unsigned s, entries_once = 50; -static ssize_t show_diag_registration(struct device *dev, - struct device_attribute *attr, char *buf) -{ - uint16_t i, p = 0, e; - e = s + entries_once; - e = (e > diag_max_registration)?diag_max_registration:e; - - p += sprintf(buf+p, "Registration(%d) #%d -> #%d\n", - diag_max_registration, s, e - 1); - - for (i = s; i < e ; i++) { - p += sprintf(buf+p, "#%03d cmd_code: 0x%02x, subsys_id: 0x%02x ", i, - driver->table[i].cmd_code, driver->table[i].subsys_id); - if (driver->table[i].client_id == APPS_PROC) - p += sprintf(buf+p, "APPS_PROC(%d)\n", - driver->table[i].process_id); - else if (driver->table[i].client_id == MODEM_PROC) - p += sprintf(buf+p, "MODEM_PROC\n"); - else if (driver->table[i].client_id == QDSP_PROC) - p += sprintf(buf+p, "QDSP_PROC\n"); - else if (driver->table[i].client_id == WCNSS_PROC) - p += sprintf(buf+p, "WCNSS_PROC\n"); - else - p += sprintf(buf+p, "UNKNOWN SOURCE\n"); - } - - return p; - -} - -static ssize_t store_registration_index(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long u; - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) - return ret; - if (u > diag_max_registration) - return 0; - s = u; - return count; -} - -unsigned diag7k_debug_mask = DIAGLOG_MODE_NONE; -unsigned diag9k_debug_mask = DIAGLOG_MODE_NONE; -static ssize_t show_diag_debug_mask(struct device *dev, - struct device_attribute *attr, char *buf) -{ - uint16_t p = 0; - - p += sprintf(buf+p, "diag7k_debug_mask: %d\n" - "diag9k_debug_mask: %d\n", - diag7k_debug_mask, diag9k_debug_mask); - - return p; -} - -static ssize_t store_diag7k_debug_mask(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long u; - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) - return ret; - diag7k_debug_mask = u; - return count; -} - -static ssize_t store_diag9k_debug_mask(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long u; - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) - return ret; - diag9k_debug_mask = u; - return count; -} - -static DEVICE_ATTR(diag_reg_table, 0664, - show_diag_registration, store_registration_index); -static DEVICE_ATTR(diag7k_debug_mask, 0664, - show_diag_debug_mask, store_diag7k_debug_mask); -static DEVICE_ATTR(diag9k_debug_mask, 0664, - show_diag_debug_mask, store_diag9k_debug_mask); - /* delayed_rsp_id 0 represents no delay in the response. Any other number means that the diag packet has a delayed response. */ static uint16_t delayed_rsp_id = 1; @@ -177,9 +83,8 @@ static uint16_t delayed_rsp_id = 1; #define COPY_USER_SPACE_OR_EXIT(buf, data, length) \ do { \ - if (count < ret+length) \ - goto exit; \ - if (copy_to_user(buf, (void *)&data, length)) { \ + if ((count < ret+length) || (copy_to_user(buf, \ + (void *)&data, length))) { \ ret = -EFAULT; \ goto exit; \ } \ @@ -248,8 +153,6 @@ static int diagchar_open(struct inode *inode, struct file *file) { int i = 0; void *temp; - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); if (driver) { mutex_lock(&driver->diagchar_mutex); @@ -281,15 +184,16 @@ static int diagchar_open(struct inode *inode, struct file *file) } else { mutex_unlock(&driver->diagchar_mutex); pr_alert("Max client limit for DIAG reached\n"); - DIAG_INFO("Cannot open handle %s" + pr_info("Cannot open handle %s" " %d", current->comm, current->tgid); for (i = 0; i < driver->num_clients; i++) - DIAG_WARNING("%d) %s PID=%d", i, driver-> + pr_debug("%d) %s PID=%d", i, driver-> client_map[i].name, driver->client_map[i].pid); return -ENOMEM; } } + driver->data_ready[i] = 0x0; driver->data_ready[i] |= MSG_MASKS_TYPE; driver->data_ready[i] |= EVENT_MASKS_TYPE; driver->data_ready[i] |= LOG_MASKS_TYPE; @@ -319,19 +223,19 @@ static int diagchar_close(struct inode *inode, struct file *file) return -ENOMEM; } - if (driver) { #ifdef CONFIG_DIAG_OVER_USB - /* If the SD logging process exits, change logging to USB mode */ - if (driver->logging_process_id == current->tgid) { - driver->logging_mode = USB_MODE; - diagfwd_connect(); - } + /* If the SD logging process exits, change logging to USB mode */ + if (driver->logging_process_id == current->tgid) { + driver->logging_mode = USB_MODE; + diagfwd_connect(); + } #endif /* DIAG over USB */ - /* Delete the pkt response table entry for the exiting process */ - for (i = 0; i < diag_max_registration; i++) - if (driver->table[i].process_id == current->tgid) - driver->table[i].process_id = 0; + /* Delete the pkt response table entry for the exiting process */ + for (i = 0; i < diag_max_reg; i++) + if (driver->table[i].process_id == current->tgid) + driver->table[i].process_id = 0; + if (driver) { mutex_lock(&driver->diagchar_mutex); driver->ref_count--; /* On Client exit, try to destroy all 3 pools */ @@ -357,7 +261,7 @@ void diag_clear_reg(int proc_num) { int i; - for (i = 0; i < diag_max_registration; i++) { + for (i = 0; i < diag_max_reg; i++) { if (driver->table[i].client_id == proc_num) { driver->table[i].process_id = 0; } @@ -389,14 +293,11 @@ long diagchar_ioctl(struct file *filp, int success = -1; void *temp_buf; - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - if (iocmd == DIAG_IOCTL_COMMAND_REG) { struct bindpkt_params_per_process *pkt_params = (struct bindpkt_params_per_process *) ioarg; mutex_lock(&driver->diagchar_mutex); - for (i = 0; i < diag_max_registration; i++) { + for (i = 0; i < diag_max_reg; i++) { if (driver->table[i].process_id == 0) { diag_add_reg(i, pkt_params->params, &success, &count_entries); @@ -408,19 +309,20 @@ long diagchar_ioctl(struct file *filp, } } } - if (i < diag_threshold_registration) { + if (i < diag_threshold_reg) { /* Increase table size by amount required */ - diag_max_registration += pkt_params->count - + diag_max_reg += pkt_params->count - count_entries; /* Make sure size doesnt go beyond threshold */ - if (diag_max_registration > diag_threshold_registration) - diag_max_registration = - diag_threshold_registration; + if (diag_max_reg > diag_threshold_reg) { + diag_max_reg = diag_threshold_reg; + pr_info("diag: best case memory allocation\n"); + } temp_buf = krealloc(driver->table, - diag_max_registration*sizeof(struct + diag_max_reg*sizeof(struct diag_master_table), GFP_KERNEL); if (!temp_buf) { - diag_max_registration -= pkt_params->count - + diag_max_reg -= pkt_params->count - count_entries; pr_alert("diag: Insufficient memory for reg."); mutex_unlock(&driver->diagchar_mutex); @@ -428,7 +330,7 @@ long diagchar_ioctl(struct file *filp, } else { driver->table = temp_buf; } - for (j = i; j < diag_max_registration; j++) { + for (j = i; j < diag_max_reg; j++) { diag_add_reg(j, pkt_params->params, &success, &count_entries); if (pkt_params->count > count_entries) { @@ -438,6 +340,7 @@ long diagchar_ioctl(struct file *filp, return success; } } + mutex_unlock(&driver->diagchar_mutex); } else { mutex_unlock(&driver->diagchar_mutex); pr_err("Max size reached, Pkt Registration failed for" @@ -469,8 +372,12 @@ long diagchar_ioctl(struct file *filp, mutex_lock(&driver->diagchar_mutex); temp = driver->logging_mode; driver->logging_mode = (int)ioarg; - if (driver->logging_mode == UART_MODE) + if (driver->logging_mode == MEMORY_DEVICE_MODE) + driver->mask_check = 1; + if (driver->logging_mode == UART_MODE) { + driver->mask_check = 0; driver->logging_mode = MEMORY_DEVICE_MODE; + } driver->logging_process_id = current->tgid; mutex_unlock(&driver->diagchar_mutex); if (temp == MEMORY_DEVICE_MODE && driver->logging_mode @@ -480,6 +387,9 @@ long diagchar_ioctl(struct file *filp, driver->in_busy_qdsp_1 = 1; driver->in_busy_qdsp_2 = 1; driver->in_busy_wcnss = 1; +#ifdef CONFIG_DIAG_SDIO_PIPE + driver->in_busy_sdio = 1; +#endif } else if (temp == NO_LOGGING_MODE && driver->logging_mode == MEMORY_DEVICE_MODE) { driver->in_busy_1 = 0; @@ -497,6 +407,13 @@ long diagchar_ioctl(struct file *filp, if (driver->ch_wcnss) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); +#ifdef CONFIG_DIAG_SDIO_PIPE + driver->in_busy_sdio = 0; + /* Poll SDIO channel to check for data */ + if (driver->sdio_ch) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); +#endif } #ifdef CONFIG_DIAG_OVER_USB else if (temp == USB_MODE && driver->logging_mode @@ -507,9 +424,7 @@ long diagchar_ioctl(struct file *filp, diagfwd_connect(); else if (temp == USB_MODE && driver->logging_mode == MEMORY_DEVICE_MODE) { - DIAG_INFO("diag: USB disconnected\n"); diagfwd_disconnect(); - DIAG_INFO("sdlogging enable\n"); driver->in_busy_1 = 0; driver->in_busy_2 = 0; driver->in_busy_qdsp_2 = 0; @@ -525,11 +440,16 @@ long diagchar_ioctl(struct file *filp, if (driver->ch_wcnss) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); - } else if (temp == MEMORY_DEVICE_MODE && driver->logging_mode - == USB_MODE) { - DIAG_INFO("sdlogging disable\n"); - diagfwd_connect(); - } +#ifdef CONFIG_DIAG_SDIO_PIPE + driver->in_busy_sdio = 0; + /* Poll SDIO channel to check for data */ + if (driver->sdio_ch) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); +#endif + } else if (temp == MEMORY_DEVICE_MODE && + driver->logging_mode == USB_MODE) + diagfwd_connect(); #endif /* DIAG over USB */ success = 1; } @@ -542,29 +462,17 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, { int index = -1, i = 0, ret = 0; int num_data = 0, data_type; - -#ifdef SDQXDM_DEBUG - struct timeval t; -#endif - for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == current->tgid) index = i; if (index == -1) { - DIAG_ERR("%s:%s(parent:%s): tgid=%d " - "Client PID not found in table\n", __func__, - current->comm, current->parent->comm, current->tgid); - for (i = 0; i < driver->num_clients; i++) - DIAG_ERR("\t#%d: %d\n", i, driver->client_map[i].pid); + pr_err("diag: Client PID not found in table"); return -EINVAL; } wait_event_interruptible(driver->wait_q, driver->data_ready[index]); - if (diag7k_debug_mask) - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); mutex_lock(&driver->diagchar_mutex); if ((driver->data_ready[index] & USER_SPACE_LOG_TYPE) && (driver-> @@ -672,6 +580,20 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, driver->write_ptr_wcnss->length); driver->in_busy_wcnss = 0; } +#ifdef CONFIG_DIAG_SDIO_PIPE + /* copy 9K data over SDIO */ + if (driver->in_busy_sdio == 1) { + num_data++; + /*Copy the length of data being passed*/ + COPY_USER_SPACE_OR_EXIT(buf+ret, + (driver->write_ptr_mdm->length), 4); + /*Copy the actual data being passed*/ + COPY_USER_SPACE_OR_EXIT(buf+ret, + *(driver->buf_in_sdio), + driver->write_ptr_mdm->length); + driver->in_busy_sdio = 0; + } +#endif /* copy number of data fields */ COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4); ret -= 4; @@ -685,90 +607,17 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, if (driver->ch_wcnss) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); +#ifdef CONFIG_DIAG_SDIO_PIPE + if (driver->sdio_ch) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); +#endif APPEND_DEBUG('n'); goto exit; } else if (driver->data_ready[index] & USER_SPACE_LOG_TYPE) { /* In case, the thread wakes up and the logging mode is not memory device any more, the condition needs to be cleared */ driver->data_ready[index] ^= USER_SPACE_LOG_TYPE; - } else if (driver->data_ready[index] & USERMODE_DIAGFWD) { - data_type = USERMODE_DIAGFWD; - driver->data_ready[index] ^= USERMODE_DIAGFWD; - COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); - -#ifdef SDQXDM_DEBUG - do_gettimeofday(&t); - - if (driver->in_busy_1 && t.tv_sec > driver->write_ptr_1->second + 2) - pr_info("[diag-dbg] late pkt now: %ld.%04ld pkt: %d\n", - t.tv_sec, t.tv_usec/1000, driver->write_ptr_1->second); - if (driver->in_busy_2 && t.tv_sec > driver->write_ptr_2->second + 2) - pr_info("[diag-dbg] late pkt now: %ld.%04ld pkt: %d\n", - t.tv_sec, t.tv_usec/1000, driver->write_ptr_2->second); -#endif - for (i = 0; i < driver->poolsize_write_struct; i++) { - if (driver->buf_tbl[i].length > 0) { -#ifdef SDQXDM_DEBUG - if (diag7k_debug_mask) - printk(KERN_INFO "\n WRITING the buf address " - "and length is %x , %d\n", (unsigned int) - (driver->buf_tbl[i].buf), - driver->buf_tbl[i].length); -#endif - if (copy_to_user(buf+ret, (void *)driver-> - buf_tbl[i].buf, driver->buf_tbl[i].length)) - break; - - ret += driver->buf_tbl[i].length; - - diagmem_free(driver, (unsigned char *) - (driver->buf_tbl[i].buf), POOL_TYPE_HDLC); - driver->buf_tbl[i].length = 0; - driver->buf_tbl[i].buf = 0; - } - } - - /* copy modem data */ - if (driver->in_busy_1 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - *(driver->buf_in_1), - driver->write_ptr_1->length); - driver->in_busy_1 = 0; - } - if (driver->in_busy_2 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - *(driver->buf_in_2), - driver->write_ptr_2->length); - driver->in_busy_2 = 0; - } - - /* copy q6 data */ - if (driver->in_busy_qdsp_1 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_qdsp_1), - driver->write_ptr_qdsp_1->length); - driver->in_busy_qdsp_1 = 0; - } - if (driver->in_busy_qdsp_2 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_qdsp_2), driver-> - write_ptr_qdsp_2->length); - driver->in_busy_qdsp_2 = 0; - } - if (driver->ch) - queue_work(driver->diag_wq, - &(driver->diag_read_smd_work)); - if (driver->chqdsp) - queue_work(driver->diag_wq, - &(driver->diag_read_smd_qdsp_work)); - if (diag7k_debug_mask) - pr_info("%s() return %d byte\n", __func__, ret); - - goto exit; } if (driver->data_ready[index] & DEINIT_TYPE) { @@ -820,9 +669,6 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, } exit: - if (ret) - wake_lock_timeout(&driver->wake_lock, HZ / 2); - mutex_unlock(&driver->diagchar_mutex); return ret; } @@ -854,8 +700,8 @@ static int diagchar_write(struct file *file, const char __user *buf, err = copy_from_user(driver->user_space_data, buf + 4, payload_size); /* Check masks for On-Device logging */ - if (pkt_type == USER_SPACE_LOG_TYPE) { - if (!mask_request_validate((unsigned char *)buf)) { + if (driver->mask_check) { + if (!mask_request_validate(driver->user_space_data)) { pr_alert("diag: mask request Invalid\n"); return -EFAULT; } @@ -864,17 +710,32 @@ static int diagchar_write(struct file *file, const char __user *buf, #ifdef DIAG_DEBUG pr_debug("diag: user space data %d\n", payload_size); for (i = 0; i < payload_size; i++) - printk(KERN_DEBUG "\t %x", *(((unsigned char *)buf)+i)); + pr_debug("\t %x", *((driver->user_space_data)+i)); +#endif +#ifdef CONFIG_DIAG_SDIO_PIPE + /* send masks to 9k too */ + if (driver->sdio_ch) { + wait_event_interruptible(driver->wait_q, + (sdio_write_avail(driver->sdio_ch) >= + payload_size)); + if (driver->sdio_ch && (payload_size > 0)) { + sdio_write(driver->sdio_ch, (void *) + (driver->user_space_data), payload_size); + } + } #endif + /* send masks to modem now */ diag_process_hdlc((void *)(driver->user_space_data), payload_size); return 0; - } else if (pkt_type == USERMODE_DIAGFWD) { - if (diag7k_debug_mask) - pr_info("%s#%d recv %d bytes\n", __func__, __LINE__, payload_size); - buf += 4; - diag_process_hdlc((void *)buf, payload_size); - return count; + } + + if (payload_size > itemsize) { + pr_err("diag: Dropping packet, packet payload size crosses" + "4KB limit. Current payload size %d\n", + payload_size); + driver->dropped_count++; + return -EBADMSG; } buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY); @@ -927,8 +788,6 @@ static int diagchar_write(struct file *file, const char __user *buf, goto fail_free_hdlc; } buf_hdlc = NULL; - if (diag7k_debug_mask) - printk(KERN_INFO "\n size written is %d\n", driver->used); driver->used = 0; buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE, POOL_TYPE_HDLC); @@ -957,8 +816,6 @@ static int diagchar_write(struct file *file, const char __user *buf, goto fail_free_hdlc; } buf_hdlc = NULL; - if (diag7k_debug_mask) - printk(KERN_INFO "\n size written is %d\n", driver->used); driver->used = 0; buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE, POOL_TYPE_HDLC); @@ -984,8 +841,6 @@ static int diagchar_write(struct file *file, const char __user *buf, goto fail_free_hdlc; } buf_hdlc = NULL; - if (diag7k_debug_mask) - printk(KERN_INFO "\n size written is %d\n", driver->used); driver->used = 0; } @@ -1015,11 +870,11 @@ int mask_request_validate(unsigned char mask_buf[]) uint8_t subsys_id; uint16_t ss_cmd; - packet_id = mask_buf[4]; + packet_id = mask_buf[0]; if (packet_id == 0x4B) { - subsys_id = mask_buf[5]; - ss_cmd = *(uint16_t *)(mask_buf + 6); + subsys_id = mask_buf[1]; + ss_cmd = *(uint16_t *)(mask_buf + 2); /* Packets with SSID which are allowed */ switch (subsys_id) { case 0x04: /* DIAG_SUBSYS_WCDMA */ @@ -1083,315 +938,10 @@ static const struct file_operations diagcharfops = { .release = diagchar_close }; -#ifdef CONFIG_DIAG_SDIO_PIPE -static int diagcharmdm_open(struct inode *inode, struct file *file) -{ - int i = 0; - - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - if (driver) { - mutex_lock(&driver->diagcharmdm_mutex); - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == 0) - break; - - if (i < driver->num_mdmclients) { - driver->mdmclient_map[i].pid = current->tgid; - strncpy(driver->mdmclient_map[i].name, current->comm, 20); - driver->mdmclient_map[i].name[19] = '\0'; - } else { - mutex_unlock(&driver->diagcharmdm_mutex); - DIAG_INFO("%s:reach max client count\n", __func__); - for (i = 0; i < driver->num_clients; i++) - DIAG_WARNING("%d) %s PID=%d", i, driver-> - mdmclient_map[i].name, - driver->mdmclient_map[i].pid); - return -ENOMEM; - } - - driver->mdmdata_ready[i] |= MSG_MASKS_TYPE; - driver->mdmdata_ready[i] |= EVENT_MASKS_TYPE; - driver->mdmdata_ready[i] |= LOG_MASKS_TYPE; - - if (driver->ref_count == 0) - diagmem_init(driver); - driver->ref_count++; - - mutex_unlock(&driver->diagcharmdm_mutex); - return 0; - } - - return -ENOMEM; - -} - -static int diagcharmdm_close(struct inode *inode, struct file *file) -{ - - int i = 0; - - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - if (driver) { - mutex_lock(&driver->diagcharmdm_mutex); - - driver->ref_count--; - /* On Client exit, try to destroy all 3 pools */ - diagmem_exit(driver, POOL_TYPE_COPY); - diagmem_exit(driver, POOL_TYPE_HDLC); - diagmem_exit(driver, POOL_TYPE_WRITE_STRUCT); - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == current->tgid) { - driver->mdmclient_map[i].pid = 0; - break; - } - - if (i < driver->num_mdmclients) - DIAG_INFO("%s:#%d(%d) %s close\n", __func__, - i, current->tgid, current->comm); - else - DIAG_WARNING("%s: nothing close\n", __func__); - mutex_unlock(&driver->diagcharmdm_mutex); - return 0; - } - - return -ENOMEM; -} - -static long diagcharmdm_ioctl(struct file *filp, - unsigned int iocmd, unsigned long ioarg) -{ - int success = -1; - - if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) { - mutex_lock(&driver->diagcharmdm_mutex); - driver->logging_mode = (int)ioarg; - driver->logging_process_id = current->tgid; - mutex_unlock(&driver->diagcharmdm_mutex); - if (driver->logging_mode == MEMORY_DEVICE_MODE) { - DIAG_INFO("diagcharmdm_ioctl enable\n"); - diagfwd_disconnect(); - driver->qxdm2sd_drop = 0; - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; - buf_9k = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); - if (driver->sdio_ch) - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - - } else if (driver->logging_mode == USB_MODE) { - DIAG_INFO("diagcharmdm_ioctl disable\n"); - diagfwd_connect(); - driver->qxdm2sd_drop = 1; - - kfree(buf_9k); - } - success = 1; - } - return success; -} - -static int diagcharmdm_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - - int index = -1, i = 0, ret = 0; - int num_data = 0, data_type; - - if (diag9k_debug_mask) - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == current->tgid) - index = i; - - if (index == -1) { - DIAG_ERR("%s:%s(parent:%s): tgid=%d " - "Client PID not found in table\n", __func__, - current->comm, current->parent->comm, current->tgid); - for (i = 0; i < driver->num_mdmclients; i++) - DIAG_ERR("\t#%d: %d\n", i, driver->mdmclient_map[i].pid); - return -EINVAL; - } - - wait_event_interruptible(driver->mdmwait_q, - driver->mdmdata_ready[index]); - - mutex_lock(&driver->diagcharmdm_mutex); - - if ((driver->mdmdata_ready[index] & USER_SPACE_LOG_TYPE) && (driver-> - logging_mode == MEMORY_DEVICE_MODE)) { - /*Copy the type of data being passed*/ - data_type = driver->data_ready[index] & USER_SPACE_LOG_TYPE; - COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); - /* place holder for number of data field */ - ret += 4; - - if (driver->in_busy_sdio_1 == 1) { - - num_data++; - /*Copy the length of data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - (driver->write_ptr_mdm_1->length), 4); - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_1), driver-> - write_ptr_mdm_1->length); - driver->in_busy_sdio_1 = 0; - } - if (driver->in_busy_sdio_2 == 1) { - - num_data++; - /*Copy the length of data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - (driver->write_ptr_mdm_2->length), 4); - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_2), driver-> - write_ptr_mdm_2->length); - driver->in_busy_sdio_2 = 0; - } - - /* copy number of data fields */ - COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4); - ret -= 4; - - driver->mdmdata_ready[index] ^= USER_SPACE_LOG_TYPE; - - if (driver->sdio_ch) - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - - goto exit; - } else if (driver->mdmdata_ready[index] & USER_SPACE_LOG_TYPE) { - /* In case, the thread wakes up and the logging mode is - not memory device any more, the condition needs to be cleared */ - driver->mdmdata_ready[index] ^= USER_SPACE_LOG_TYPE; - } else if (driver->mdmdata_ready[index] & USERMODE_DIAGFWD) { - data_type = USERMODE_DIAGFWD; - driver->mdmdata_ready[index] ^= USERMODE_DIAGFWD; - COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); - - if (driver->in_busy_sdio_1 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_1), driver-> - write_ptr_mdm_1->length); - driver->in_busy_sdio_1 = 0; - } - if (driver->in_busy_sdio_2 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_2), driver-> - write_ptr_mdm_2->length); - driver->in_busy_sdio_2 = 0; - } - if (driver->sdio_ch) - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - goto exit; - } - - if (driver->mdmdata_ready[index] & DEINIT_TYPE) { - - driver->mdmdata_ready[index] ^= DEINIT_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & MSG_MASKS_TYPE) { - - driver->mdmdata_ready[index] ^= MSG_MASKS_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & EVENT_MASKS_TYPE) { - - driver->mdmdata_ready[index] ^= EVENT_MASKS_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & LOG_MASKS_TYPE) { - - driver->mdmdata_ready[index] ^= LOG_MASKS_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & PKT_TYPE) { - - driver->mdmdata_ready[index] ^= PKT_TYPE; - goto exit; - } -exit: - mutex_unlock(&driver->diagcharmdm_mutex); - - return ret; -} - -static int diagcharmdm_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - - int err, pkt_type; - int payload_size; - - if (diag9k_debug_mask) - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - -#ifdef CONFIG_DIAG_OVER_USB - if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) || - (driver->logging_mode == NO_LOGGING_MODE)) { - /*Drop the diag payload */ - return -EIO; - } -#endif /* DIAG over USB */ - - /* Get the packet type F3/log/event/Pkt response */ - err = copy_from_user((&pkt_type), buf, 4); - /*First 4 bytes indicate the type of payload - ignore these */ - payload_size = count - 4; - if (pkt_type == USER_SPACE_LOG_TYPE) { - if (diag9k_debug_mask) - DIAGFWD_INFO("writing mask file\n"); - if (!mask_request_validate((unsigned char *)buf)) { - DIAG_ERR("mask request Invalid ..cannot send to modem \n"); - return -EFAULT; - } - buf = buf + 4; - if (driver->sdio_ch) { - memcpy(buf_9k, buf, payload_size); - sdio_write(driver->sdio_ch, buf_9k, payload_size); - } - return count; - } else if (pkt_type == USERMODE_DIAGFWD) { - buf += 4; - if (driver->sdio_ch) { - memcpy(buf_9k, buf, payload_size); - sdio_write(driver->sdio_ch, buf_9k, payload_size); - } - return count; - } - return 0; -} - -static const struct file_operations diagcharmdmfops = { - .owner = THIS_MODULE, - .read = diagcharmdm_read, - .write = diagcharmdm_write, - .unlocked_ioctl = diagcharmdm_ioctl, - .open = diagcharmdm_open, - .release = diagcharmdm_close -}; -#endif - static int diagchar_setup_cdev(dev_t devno) { + int err; - struct device *diagdev; cdev_init(driver->cdev, &diagcharfops); @@ -1412,43 +962,15 @@ static int diagchar_setup_cdev(dev_t devno) return -1; } - diagdev = device_create(driver->diagchar_class, NULL, devno, + device_create(driver->diagchar_class, NULL, devno, (void *)driver, "diag"); - - err = device_create_file(diagdev, &dev_attr_diag_reg_table); - if (err) - DIAG_INFO("dev_attr_diag_reg_table registration failed !\n\n"); - err = device_create_file(diagdev, &dev_attr_diag7k_debug_mask); - if (err) - DIAG_INFO("dev_attr_diag7k_debug_mask registration failed !\n\n"); - err = device_create_file(diagdev, &dev_attr_diag9k_debug_mask); - if (err) - DIAG_INFO("dev_attr_diag9k_debug_mask registration failed !\n\n"); - -#ifdef CONFIG_DIAG_SDIO_PIPE - cdev_init(driver->cdev_mdm, &diagcharmdmfops); - - driver->cdev_mdm->owner = THIS_MODULE; - driver->cdev_mdm->ops = &diagcharmdmfops; - - err = cdev_add(driver->cdev_mdm, devno+1, 1); - - if (err) { - DIAG_ERR("diagchar cdev mdm registration failed !\n\n"); - return -1; - } - - device_create(driver->diagchar_class, NULL, devno+1, (void *)driver, "diag_mdm"); -#endif return 0; } static int diagchar_cleanup(void) { - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); if (driver) { if (driver->cdev) { /* TODO - Check if device exists before deleting */ @@ -1459,7 +981,6 @@ static int diagchar_cleanup(void) } if (!IS_ERR(driver->diagchar_class)) class_destroy(driver->diagchar_class); - wake_lock_destroy(&driver->wake_lock); kfree(driver); } return 0; @@ -1468,7 +989,7 @@ static int diagchar_cleanup(void) #ifdef CONFIG_DIAG_SDIO_PIPE void diag_sdio_fn(int type) { - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { if (type == INIT) diagfwd_sdio_init(); else if (type == EXIT) @@ -1479,12 +1000,24 @@ void diag_sdio_fn(int type) inline void diag_sdio_fn(int type) {} #endif +#ifdef CONFIG_DIAG_HSIC_PIPE +void diag_hsic_fn(int type) +{ + if (type == INIT) + diagfwd_hsic_init(); + else if (type == EXIT) + diagfwd_hsic_exit(); +} +#else +inline void diag_hsic_fn(int type) {} +#endif + static int __init diagchar_init(void) { dev_t dev; int error; - DIAG_INFO("diagfwd initializing ..\n"); + pr_debug("diagfwd initializing ..\n"); driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL); if (driver) { @@ -1500,10 +1033,9 @@ static int __init diagchar_init(void) driver->poolsize_write_struct = poolsize_write_struct; driver->num_clients = max_clients; driver->logging_mode = USB_MODE; + driver->mask_check = 0; mutex_init(&driver->diagchar_mutex); init_waitqueue_head(&driver->wait_q); - wake_lock_init(&driver->wake_lock, WAKE_LOCK_SUSPEND, "diagchar"); - INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn); INIT_WORK(&(driver->diag_read_smd_work), diag_read_smd_work_fn); INIT_WORK(&(driver->diag_read_smd_cntl_work), @@ -1516,25 +1048,12 @@ static int __init diagchar_init(void) diag_read_smd_wcnss_work_fn); INIT_WORK(&(driver->diag_read_smd_wcnss_cntl_work), diag_read_smd_wcnss_cntl_work_fn); -#ifdef CONFIG_DIAG_SDIO_PIPE - driver->num_mdmclients = 1; - init_waitqueue_head(&driver->mdmwait_q); - spin_lock_init(&driver->diagchar_lock); - mutex_init(&driver->diagcharmdm_mutex); - - driver->num = 2; -#else - driver->num = 1; -#endif diagfwd_init(); - if (chk_config_get_id() == AO8960_TOOLS_ID) { - diagfwd_cntl_init(); - DIAGFWD_INFO("CNTL channel was enabled in the platform\n"); - } else - DIAGFWD_INFO("CNTL channel was not enabled in the platform\n"); - + diagfwd_cntl_init(); diag_sdio_fn(INIT); + diag_hsic_fn(INIT); pr_debug("diagchar initializing ..\n"); + driver->num = 1; driver->name = ((void *)driver) + sizeof(struct diagchar_dev); strlcpy(driver->name, "diag", 4); @@ -1549,10 +1068,6 @@ static int __init diagchar_init(void) goto fail; } driver->cdev = cdev_alloc(); - -#ifdef CONFIG_DIAG_SDIO_PIPE - driver->cdev_mdm = cdev_alloc(); -#endif error = diagchar_setup_cdev(dev); if (error) goto fail; @@ -1561,7 +1076,7 @@ static int __init diagchar_init(void) goto fail; } - DIAG_INFO("diagchar initialized\n"); + pr_info("diagchar initialized now"); return 0; fail: @@ -1569,6 +1084,7 @@ static int __init diagchar_init(void) diagfwd_exit(); diagfwd_cntl_exit(); diag_sdio_fn(EXIT); + diag_hsic_fn(EXIT); return -1; } @@ -1579,9 +1095,9 @@ static void __exit diagchar_exit(void) ensure no memory leaks */ diagmem_exit(driver, POOL_TYPE_ALL); diagfwd_exit(); - if (chk_config_get_id() == AO8960_TOOLS_ID) - diagfwd_cntl_exit(); + diagfwd_cntl_exit(); diag_sdio_fn(EXIT); + diag_hsic_fn(EXIT); diagchar_cleanup(); printk(KERN_INFO "done diagchar exit\n"); } diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index dd0f0314..e11afa8d 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -9,7 +9,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - #include #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #ifdef CONFIG_DIAG_OVER_USB #include #endif @@ -36,22 +36,36 @@ #ifdef CONFIG_DIAG_SDIO_PIPE #include "diagfwd_sdio.h" #endif -#define MODE_CMD 41 -#define RESET_ID 2 +#define MODE_CMD 41 +#define RESET_ID 2 +#define ALL_EQUIP_ID 100 +#define ALL_SSID -1 +#define MAX_SSID_PER_RANGE 100 -int is_wcnss_used; int diag_debug_buf_idx; unsigned char diag_debug_buf[1024]; static unsigned int buf_tbl_size = 8; /*Number of entries in table of buffers */ -int sdio_diag_initialized; -int smd_diag_initialized; -#if DIAG_XPST -static int diag_smd_function_mode; -#endif struct diag_master_table entry; -smd_channel_t *ch_temp; +smd_channel_t *ch_temp, *chqdsp_temp, *ch_wcnss_temp; +int diag_event_num_bytes; +int diag_event_config; struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 }; struct diag_hdlc_dest_type enc = { NULL, NULL, 0 }; +struct mask_info { + int equip_id; + int num_items; + int index; +}; + +#define CREATE_MSG_MASK_TBL_ROW(XX) \ +do { \ + *(int *)(msg_mask_tbl_ptr) = MSG_SSID_ ## XX; \ + msg_mask_tbl_ptr += 4; \ + *(int *)(msg_mask_tbl_ptr) = MSG_SSID_ ## XX ## _LAST; \ + msg_mask_tbl_ptr += 4; \ + /* increment by MAX_SSID_PER_RANGE cells */ \ + msg_mask_tbl_ptr += MAX_SSID_PER_RANGE * sizeof(int); \ +} while (0) #define ENCODE_RSP_AND_SEND(buf_length) \ do { \ @@ -61,7 +75,7 @@ do { \ send.terminate = 1; \ if (!driver->in_busy_1) { \ enc.dest = driver->buf_in_1; \ - enc.dest_last = (void *)(driver->buf_in_1 + 499); \ + enc.dest_last = (void *)(driver->buf_in_1 + APPS_BUF_SIZE - 1);\ diag_hdlc_encode(&send, &enc); \ driver->write_ptr_1->buf = driver->buf_in_1; \ driver->write_ptr_1->length = (int)(enc.dest - \ @@ -69,40 +83,79 @@ do { \ driver->in_busy_1 = 1; \ diag_device_write(driver->buf_in_1, MODEM_DATA, \ driver->write_ptr_1); \ - memset(driver->apps_rsp_buf, '\0', 500); \ + memset(driver->apps_rsp_buf, '\0', APPS_BUF_SIZE); \ } \ } while (0) #define CHK_OVERFLOW(bufStart, start, end, length) \ ((bufStart <= start) && (end - start >= length)) ? 1 : 0 -int chk_config_get_id() +int chk_config_get_id(void) { + /* For all Fusion targets, Modem will always be present */ + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) + return 0; + switch (socinfo_get_id()) { case APQ8060_MACHINE_ID: case MSM8660_MACHINE_ID: return APQ8060_TOOLS_ID; case AO8960_MACHINE_ID: + case MSM8260A_MACHINE_ID: return AO8960_TOOLS_ID; + case APQ8064_MACHINE_ID: + return APQ8064_TOOLS_ID; + case MSM8930_MACHINE_ID: + return MSM8930_TOOLS_ID; + case MSM8974_MACHINE_ID: + return MSM8974_TOOLS_ID; default: return 0; } } +/* + * This will return TRUE for targets which support apps only mode and hence SSR. + * This applies to 8960 and newer targets. + */ +int chk_apps_only(void) +{ + switch (socinfo_get_id()) { + case AO8960_MACHINE_ID: + case APQ8064_MACHINE_ID: + case MSM8930_MACHINE_ID: + case MSM8630_MACHINE_ID: + case MSM8230_MACHINE_ID: + case APQ8030_MACHINE_ID: + case MSM8627_MACHINE_ID: + case MSM8227_MACHINE_ID: + case MSM8974_MACHINE_ID: + case MDM9615_MACHINE_ID: + case MSM8260A_MACHINE_ID: + return 1; + default: + return 0; + } +} + +/* + * This will return TRUE for targets which support apps as master. + * Thus, SW DLOAD and Mode Reset are supported on apps processor. + * This applies to 8960 and newer targets. + */ +int chk_apps_master(void) +{ + if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm9615()) + return 1; + else + return 0; +} + void __diag_smd_send_req(void) { void *buf = NULL; int *in_busy_ptr = NULL; struct diag_request *write_ptr_modem = NULL; -#if DIAG_XPST - int type; -#endif - -#ifdef SDQXDM_DEBUG - static struct timeval t0 = {0, 0}, t1; - static int full, empty; - long diff; -#endif if (!driver->in_busy_1) { buf = driver->buf_in_1; @@ -135,68 +188,12 @@ void __diag_smd_send_req(void) APPEND_DEBUG('i'); smd_read(driver->ch, buf, r); APPEND_DEBUG('j'); - if (diag7k_debug_mask) { - switch (diag7k_debug_mask) { - case DIAGLOG_MODE_HEAD: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem(first 16 bytes)", 16, 1, DUMP_PREFIX_ADDRESS, buf, 16, 1); - break; - case DIAGLOG_MODE_FULL: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem(first 16 bytes)", 16, 1, DUMP_PREFIX_ADDRESS, buf, 16, 1); - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem(last 16 bytes) ", 16, 1, DUMP_PREFIX_ADDRESS, buf+r-16, 16, 1); - break; - default: - #if 0 - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem ", 16, 1, DUMP_PREFIX_ADDRESS, buf, r, 1); - #endif - break; - } - } - -#if DIAG_XPST - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 0); - return; - } -#endif - -#ifdef SDQXDM_DEBUG - if (full) { - pr_err("[diag-dbg] buffer become available %d %d, read %d\n", - driver->in_busy_1, driver->in_busy_2, r); - full = 0; - } - do_gettimeofday(&t1); - diff = (t1.tv_sec-t0.tv_sec)*1000 + (t1.tv_usec-t0.tv_usec)/1000; - if (diff > 1000) { - pr_err("[diag-dbg] Over time (%ld) %ld.%04ld -> %ld.%04ld empty = %d\n", - diff, (long)t0.tv_sec, t0.tv_usec/1000, - (long)t1.tv_sec, t1.tv_usec/1000, empty); - } - write_ptr_modem->second = t1.tv_sec; - t0 = t1; - empty = 0; -#endif write_ptr_modem->length = r; *in_busy_ptr = 1; diag_device_write(buf, MODEM_DATA, write_ptr_modem); } } -#ifdef SDQXDM_DEBUG - else - empty++; -#endif - } else { -#ifdef SDQXDM_DEBUG - if (!full && driver->ch) - pr_info("[diag-dbg] Buffer full, %d bytes pending.\n", smd_read_avail(driver->ch)); - full = 1; -#endif } } @@ -214,35 +211,18 @@ int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr) #ifdef DIAG_DEBUG pr_debug("diag: ENQUEUE buf ptr" " and length is %x , %d\n", - (unsigned int)(driver->buf_tbl[i].buf), driver->buf_tbl[i].length); + (unsigned int)(driver->buf_ + tbl[i].buf), driver->buf_tbl[i].length); #endif break; } } -#ifdef CONFIG_DIAG_SDIO_PIPE - if (proc_num == SDIO_DATA) { - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == - driver->logging_process_id) - break; - - if (i < driver->num_mdmclients) { - driver->mdmdata_ready[i] |= USERMODE_DIAGFWD; - wake_up_interruptible(&driver->mdmwait_q); - - return err; - } else - return -EINVAL; - } -#endif for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == driver->logging_process_id) break; if (i < driver->num_clients) { - wake_lock_timeout(&driver->wake_lock, HZ / 2); - driver->data_ready[i] |= USERMODE_DIAGFWD; + driver->data_ready[i] |= USER_SPACE_LOG_TYPE; wake_up_interruptible(&driver->wait_q); } else return -EINVAL; @@ -262,6 +242,13 @@ int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr) queue_work(driver->diag_wq, &(driver-> diag_read_smd_wcnss_work)); } +#ifdef CONFIG_DIAG_SDIO_PIPE + else if (proc_num == SDIO_DATA) { + driver->in_busy_sdio = 0; + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); + } +#endif err = -1; } #ifdef CONFIG_DIAG_OVER_USB @@ -296,11 +283,23 @@ int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr) } #ifdef CONFIG_DIAG_SDIO_PIPE else if (proc_num == SDIO_DATA) { - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || + machine_is_msm8x60_fusn_ffa()) { + write_ptr->buf = buf; + err = usb_diag_write(driver->mdm_ch, write_ptr); + } else + pr_err("diag: Incorrect sdio data " + "while USB write\n"); + } +#endif +#ifdef CONFIG_DIAG_HSIC_PIPE + else if (proc_num == HSIC_DATA) { + if (driver->hsic_device_enabled) { write_ptr->buf = buf; err = usb_diag_write(driver->mdm_ch, write_ptr); } else - pr_err("diag: Incorrect data while USB write"); + pr_err("diag: Incorrect hsic data " + "while USB write\n"); } #endif APPEND_DEBUG('d'); @@ -314,9 +313,6 @@ void __diag_smd_wcnss_send_req(void) void *buf = driver->buf_in_wcnss; int *in_busy_wcnss_ptr = &(driver->in_busy_wcnss); struct diag_request *write_ptr_wcnss = driver->write_ptr_wcnss; -#if DIAG_XPST - int type; -#endif if ((!driver->in_busy_wcnss) && driver->ch_wcnss && buf) { int r = smd_read_avail(driver->ch_wcnss); @@ -336,13 +332,6 @@ void __diag_smd_wcnss_send_req(void) APPEND_DEBUG('i'); smd_read(driver->ch_wcnss, buf, r); APPEND_DEBUG('j'); -#if DIAG_XPST - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 0); - return; - } -#endif write_ptr_wcnss->length = r; *in_busy_wcnss_ptr = 1; diag_device_write(buf, WCNSS_DATA, @@ -357,9 +346,7 @@ void __diag_smd_qdsp_send_req(void) void *buf = NULL; int *in_busy_qdsp_ptr = NULL; struct diag_request *write_ptr_qdsp = NULL; -#if DIAG_XPST - int type; -#endif + if (!driver->in_busy_qdsp_1) { buf = driver->buf_in_qdsp_1; write_ptr_qdsp = driver->write_ptr_qdsp_1; @@ -391,13 +378,6 @@ void __diag_smd_qdsp_send_req(void) APPEND_DEBUG('i'); smd_read(driver->chqdsp, buf, r); APPEND_DEBUG('j'); -#if DIAG_XPST - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 0); - return; - } -#endif write_ptr_qdsp->length = r; *in_busy_qdsp_ptr = 1; diag_device_write(buf, QDSP_DATA, @@ -415,7 +395,7 @@ static void diag_print_mask_table(void) int last; uint8_t *ptr = driver->msg_masks; int i = 0; - + pr_info("diag: F3 message mask table\n"); while (*(uint32_t *)(ptr + 4)) { first = *(uint32_t *)ptr; ptr += 4; @@ -424,12 +404,63 @@ static void diag_print_mask_table(void) printk(KERN_INFO "SSID %d - %d\n", first, last); for (i = 0 ; i <= last - first ; i++) printk(KERN_INFO "MASK:%x\n", *((uint32_t *)ptr + i)); - ptr += ((last - first) + 1)*4; + ptr += MAX_SSID_PER_RANGE*4; } #endif } +void diag_create_msg_mask_table(void) +{ + uint8_t *msg_mask_tbl_ptr = driver->msg_masks; + + CREATE_MSG_MASK_TBL_ROW(0); + CREATE_MSG_MASK_TBL_ROW(1); + CREATE_MSG_MASK_TBL_ROW(2); + CREATE_MSG_MASK_TBL_ROW(3); + CREATE_MSG_MASK_TBL_ROW(4); + CREATE_MSG_MASK_TBL_ROW(5); + CREATE_MSG_MASK_TBL_ROW(6); + CREATE_MSG_MASK_TBL_ROW(7); + CREATE_MSG_MASK_TBL_ROW(8); + CREATE_MSG_MASK_TBL_ROW(9); + CREATE_MSG_MASK_TBL_ROW(10); + CREATE_MSG_MASK_TBL_ROW(11); + CREATE_MSG_MASK_TBL_ROW(12); + CREATE_MSG_MASK_TBL_ROW(13); + CREATE_MSG_MASK_TBL_ROW(14); + CREATE_MSG_MASK_TBL_ROW(15); + CREATE_MSG_MASK_TBL_ROW(16); + CREATE_MSG_MASK_TBL_ROW(17); + CREATE_MSG_MASK_TBL_ROW(18); + CREATE_MSG_MASK_TBL_ROW(19); + CREATE_MSG_MASK_TBL_ROW(20); + CREATE_MSG_MASK_TBL_ROW(21); + CREATE_MSG_MASK_TBL_ROW(22); +} + +static void diag_set_msg_mask(int rt_mask) +{ + int first_ssid, last_ssid, i; + uint8_t *parse_ptr, *ptr = driver->msg_masks; + + mutex_lock(&driver->diagchar_mutex); + while (*(uint32_t *)(ptr + 4)) { + first_ssid = *(uint32_t *)ptr; + ptr += 4; + last_ssid = *(uint32_t *)ptr; + ptr += 4; + parse_ptr = ptr; + pr_debug("diag: updating range %d %d\n", first_ssid, last_ssid); + for (i = 0; i < last_ssid - first_ssid + 1; i++) { + *(int *)parse_ptr = rt_mask; + parse_ptr += 4; + } + ptr += MAX_SSID_PER_RANGE * 4; + } + mutex_unlock(&driver->diagchar_mutex); +} + static void diag_update_msg_mask(int start, int end , uint8_t *buf) { int found = 0; @@ -440,8 +471,8 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) uint8_t *ptr_buffer_end = &(*(driver->msg_masks)) + MSG_MASK_SIZE; mutex_lock(&driver->diagchar_mutex); - /* First SSID can be zero : So check that last is non-zero */ + /* First SSID can be zero : So check that last is non-zero */ while (*(uint32_t *)(ptr + 4)) { first = *(uint32_t *)ptr; ptr += 4; @@ -452,9 +483,11 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) if (end <= last) if (CHK_OVERFLOW(ptr_buffer_start, ptr, ptr_buffer_end, - (((end - start)+1)*4))) + (((end - start)+1)*4))) { + pr_debug("diag: update ssid start %d," + " end %d\n", start, end); memcpy(ptr, buf , ((end - start)+1)*4); - else + } else printk(KERN_CRIT "Not enough" " buffer space for" " MSG_MASK\n"); @@ -465,7 +498,7 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) found = 1; break; } else { - ptr += ((last - first) + 1)*4; + ptr += MAX_SSID_PER_RANGE*4; } } /* Entry was not found - add new table */ @@ -476,6 +509,8 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) ptr += 4; memcpy(ptr, &(end), 4); ptr += 4; + pr_debug("diag: adding NEW ssid start %d, end %d\n", + start, end); memcpy(ptr, buf , ((end - start) + 1)*4); } else printk(KERN_CRIT " Not enough buffer" @@ -486,7 +521,19 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) } -static void diag_update_event_mask(uint8_t *buf, int toggle, int num_bits) +void diag_toggle_event_mask(int toggle) +{ + uint8_t *ptr = driver->event_masks; + + mutex_lock(&driver->diagchar_mutex); + if (toggle) + memset(ptr, 0xFF, EVENT_MASK_SIZE); + else + memset(ptr, 0, EVENT_MASK_SIZE); + mutex_unlock(&driver->diagchar_mutex); +} + +static void diag_update_event_mask(uint8_t *buf, int toggle, int num_bytes) { uint8_t *ptr = driver->event_masks; uint8_t *temp = buf + 2; @@ -496,27 +543,41 @@ static void diag_update_event_mask(uint8_t *buf, int toggle, int num_bits) memset(ptr, 0 , EVENT_MASK_SIZE); else if (CHK_OVERFLOW(ptr, ptr, - ptr+EVENT_MASK_SIZE, - num_bits/8 + 1)) - memcpy(ptr, temp , num_bits/8 + 1); + ptr+EVENT_MASK_SIZE, num_bytes)) + memcpy(ptr, temp , num_bytes); else printk(KERN_CRIT "Not enough buffer space " "for EVENT_MASK\n"); mutex_unlock(&driver->diagchar_mutex); } +static void diag_disable_log_mask(void) +{ + int i = 0; + struct mask_info *parse_ptr = (struct mask_info *)(driver->log_masks); + + pr_debug("diag: disable log masks\n"); + mutex_lock(&driver->diagchar_mutex); + for (i = 0; i < MAX_EQUIP_ID; i++) { + pr_debug("diag: equip id %d\n", parse_ptr->equip_id); + if (!(parse_ptr->equip_id)) /* Reached a null entry */ + break; + memset(driver->log_masks + parse_ptr->index, 0, + (parse_ptr->num_items + 7)/8); + parse_ptr++; + } + mutex_unlock(&driver->diagchar_mutex); +} + static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items) { uint8_t *temp = buf; - struct mask_info { - int equip_id; - int index; - }; int i = 0; unsigned char *ptr_data; - int offset = 8*MAX_EQUIP_ID; - struct mask_info *ptr = (struct mask_info *)driver->log_masks; + int offset = (sizeof(struct mask_info))*MAX_EQUIP_ID; + struct mask_info *ptr = (struct mask_info *)(driver->log_masks); + pr_debug("diag: received equip id = %d\n", equip_id); mutex_lock(&driver->diagchar_mutex); /* Check if we already know index of this equipment ID */ for (i = 0; i < MAX_EQUIP_ID; i++) { @@ -525,8 +586,9 @@ static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items) break; } if ((ptr->equip_id == 0) && (ptr->index == 0)) { - /*Reached a null entry */ + /* Reached a null entry */ ptr->equip_id = equip_id; + ptr->num_items = num_items; ptr->index = driver->log_masks_length; offset = driver->log_masks_length; driver->log_masks_length += ((num_items+7)/8); @@ -539,7 +601,7 @@ static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items) + LOG_MASK_SIZE, (num_items+7)/8)) memcpy(ptr_data, temp , (num_items+7)/8); else - printk(KERN_CRIT " Not enough buffer space for LOG_MASK\n"); + pr_err("diag: Not enough buffer space for LOG_MASK\n"); mutex_unlock(&driver->diagchar_mutex); } @@ -592,9 +654,10 @@ void diag_send_data(struct diag_master_table entry, unsigned char *buf, } else { if (len > 0) { if (entry.client_id == MODEM_PROC && driver->ch) { - if (cpu_is_msm8960() && + if (chk_apps_master() && (int)(*(char *)buf) == MODE_CMD) - if ((int)(*(char *)(buf+1)) == RESET_ID) + if ((int)(*(char *)(buf+1)) == + RESET_ID) return; smd_write(driver->ch, buf, len); } else if (entry.client_id == QDSP_PROC && @@ -610,11 +673,186 @@ void diag_send_data(struct diag_master_table entry, unsigned char *buf, } } +void diag_modem_mask_update_fn(struct work_struct *work) +{ + diag_send_msg_mask_update(driver->ch_cntl, ALL_SSID, + ALL_SSID, MODEM_PROC); + diag_send_log_mask_update(driver->ch_cntl, ALL_EQUIP_ID); + diag_send_event_mask_update(driver->ch_cntl, diag_event_num_bytes); +} + +void diag_qdsp_mask_update_fn(struct work_struct *work) +{ + diag_send_msg_mask_update(driver->chqdsp_cntl, ALL_SSID, + ALL_SSID, QDSP_PROC); + diag_send_log_mask_update(driver->chqdsp_cntl, ALL_EQUIP_ID); + diag_send_event_mask_update(driver->chqdsp_cntl, diag_event_num_bytes); +} + +void diag_wcnss_mask_update_fn(struct work_struct *work) +{ + diag_send_msg_mask_update(driver->ch_wcnss_cntl, ALL_SSID, + ALL_SSID, WCNSS_PROC); + diag_send_log_mask_update(driver->ch_wcnss_cntl, ALL_EQUIP_ID); + diag_send_event_mask_update(driver->ch_wcnss_cntl, + diag_event_num_bytes); +} + +void diag_send_log_mask_update(smd_channel_t *ch, int equip_id) +{ + void *buf = driver->buf_log_mask_update; + int header_size = sizeof(struct diag_ctrl_log_mask); + struct mask_info *ptr = (struct mask_info *)driver->log_masks; + int i, size, wr_size = -ENOMEM, retry_count = 0, timer; + + mutex_lock(&driver->diag_cntl_mutex); + for (i = 0; i < MAX_EQUIP_ID; i++) { + size = (ptr->num_items+7)/8; + /* reached null entry */ + if ((ptr->equip_id == 0) && (ptr->index == 0)) + break; + driver->log_mask->cmd_type = DIAG_CTRL_MSG_LOG_MASK; + driver->log_mask->num_items = ptr->num_items; + driver->log_mask->data_len = 11 + size; + driver->log_mask->stream_id = 1; /* 2, if dual stream */ + driver->log_mask->status = 3; /* status for valid mask */ + driver->log_mask->equip_id = ptr->equip_id; + driver->log_mask->log_mask_size = size; + /* send only desired update, NOT ALL */ + if (equip_id == ALL_EQUIP_ID || equip_id == + driver->log_mask->equip_id) { + memcpy(buf, driver->log_mask, header_size); + memcpy(buf+header_size, driver->log_masks+ptr->index, + size); + if (ch) { + while (retry_count < 3) { + wr_size = smd_write(ch, buf, + header_size + size); + if (wr_size == -ENOMEM) { + retry_count++; + for (timer = 0; timer < 5; + timer++) + udelay(2000); + } else + break; + } + if (wr_size != header_size + size) + pr_err("diag: log mask update failed" + " %d, tried %d", wr_size, header_size + size); + else + pr_debug("diag: updated log equip ID %d" + ",len %d\n", driver->log_mask->equip_id, + driver->log_mask->log_mask_size); + } else + pr_err("diag: ch not valid for log update\n"); + } + ptr++; + } + mutex_unlock(&driver->diag_cntl_mutex); +} + +void diag_send_event_mask_update(smd_channel_t *ch, int num_bytes) +{ + void *buf = driver->buf_event_mask_update; + int header_size = sizeof(struct diag_ctrl_event_mask); + int wr_size = -ENOMEM, retry_count = 0, timer; + + mutex_lock(&driver->diag_cntl_mutex); + if (num_bytes == 0) { + pr_debug("diag: event mask not set yet, so no update\n"); + mutex_unlock(&driver->diag_cntl_mutex); + return; + } + /* send event mask update */ + driver->event_mask->cmd_type = DIAG_CTRL_MSG_EVENT_MASK; + driver->event_mask->data_len = 7 + num_bytes; + driver->event_mask->stream_id = 1; /* 2, if dual stream */ + driver->event_mask->status = 3; /* status for valid mask */ + driver->event_mask->event_config = diag_event_config; /* event config */ + driver->event_mask->event_mask_size = num_bytes; + memcpy(buf, driver->event_mask, header_size); + memcpy(buf+header_size, driver->event_masks, num_bytes); + if (ch) { + while (retry_count < 3) { + wr_size = smd_write(ch, buf, header_size + num_bytes); + if (wr_size == -ENOMEM) { + retry_count++; + for (timer = 0; timer < 5; timer++) + udelay(2000); + } else + break; + } + if (wr_size != header_size + num_bytes) + pr_err("diag: error writing event mask %d, tried %d\n", + wr_size, header_size + num_bytes); + } else + pr_err("diag: ch not valid for event update\n"); + mutex_unlock(&driver->diag_cntl_mutex); +} + +void diag_send_msg_mask_update(smd_channel_t *ch, int updated_ssid_first, + int updated_ssid_last, int proc) +{ + void *buf = driver->buf_msg_mask_update; + int first, last, size = -ENOMEM, retry_count = 0, timer; + int header_size = sizeof(struct diag_ctrl_msg_mask); + uint8_t *ptr = driver->msg_masks; + + mutex_lock(&driver->diag_cntl_mutex); + while (*(uint32_t *)(ptr + 4)) { + first = *(uint32_t *)ptr; + ptr += 4; + last = *(uint32_t *)ptr; + ptr += 4; + if ((updated_ssid_first >= first && updated_ssid_last <= last) + || (updated_ssid_first == ALL_SSID)) { + /* send f3 mask update */ + driver->msg_mask->cmd_type = DIAG_CTRL_MSG_F3_MASK; + driver->msg_mask->msg_mask_size = last - first + 1; + driver->msg_mask->data_len = 11 + + 4 * (driver->msg_mask->msg_mask_size); + driver->msg_mask->stream_id = 1; /* 2, if dual stream */ + driver->msg_mask->status = 3; /* status valid mask */ + driver->msg_mask->msg_mode = 0; /* Legcay mode */ + driver->msg_mask->ssid_first = first; + driver->msg_mask->ssid_last = last; + memcpy(buf, driver->msg_mask, header_size); + memcpy(buf+header_size, ptr, + 4 * (driver->msg_mask->msg_mask_size)); + if (ch) { + while (retry_count < 3) { + size = smd_write(ch, buf, header_size + + 4*(driver->msg_mask->msg_mask_size)); + if (size == -ENOMEM) { + retry_count++; + for (timer = 0; timer < 5; + timer++) + udelay(2000); + } else + break; + } + if (size != header_size + + 4*(driver->msg_mask->msg_mask_size)) + pr_err("diag: proc %d, msg mask update " + "fail %d, tried %d\n", proc, size, + header_size + 4*(driver->msg_mask->msg_mask_size)); + else + pr_debug("diag: sending mask update for" + "ssid first %d, last %d on PROC %d\n", first, last, proc); + } else + pr_err("diag: proc %d, ch invalid msg mask" + "update\n", proc); + } + ptr += MAX_SSID_PER_RANGE*4; + } + mutex_unlock(&driver->diag_cntl_mutex); +} + static int diag_process_apps_pkt(unsigned char *buf, int len) { uint16_t subsys_cmd_code; int subsys_id, ssid_first, ssid_last, ssid_range; - int packet_type = 1, i, cmd_code; + int packet_type = 1, i, cmd_code, rt_mask; unsigned char *temp = buf; int data_type; #if defined(CONFIG_DIAG_OVER_USB) @@ -622,6 +860,167 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) unsigned char *ptr; #endif + /* Set log masks */ + if (*buf == 0x73 && *(int *)(buf+4) == 3) { + buf += 8; + /* Read Equip ID and pass as first param below*/ + diag_update_log_mask(*(int *)buf, buf+8, *(int *)(buf+4)); + diag_update_userspace_clients(LOG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x73; + *(int *)(driver->apps_rsp_buf + 4) = 0x3; /* op. ID */ + *(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success */ + payload_length = 8 + ((*(int *)(buf + 4)) + 7)/8; + for (i = 0; i < payload_length; i++) + *(int *)(driver->apps_rsp_buf+12+i) = *(buf+i); + if (driver->ch_cntl) + diag_send_log_mask_update(driver->ch_cntl, + *(int *)buf); + if (driver->chqdsp_cntl) + diag_send_log_mask_update(driver->chqdsp_cntl, + *(int *)buf); + if (driver->ch_wcnss_cntl) + diag_send_log_mask_update(driver->ch_wcnss_cntl, + *(int *)buf); + ENCODE_RSP_AND_SEND(12 + payload_length - 1); + return 0; + } else + buf = temp; +#endif + } /* Disable log masks */ + else if (*buf == 0x73 && *(int *)(buf+4) == 0) { + buf += 8; + /* Disable mask for each log code */ + diag_disable_log_mask(); + diag_update_userspace_clients(LOG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x73; + driver->apps_rsp_buf[1] = 0x0; + driver->apps_rsp_buf[2] = 0x0; + driver->apps_rsp_buf[3] = 0x0; + *(int *)(driver->apps_rsp_buf + 4) = 0x0; + if (driver->ch_cntl) + diag_send_log_mask_update(driver->ch_cntl, + ALL_EQUIP_ID); + if (driver->chqdsp_cntl) + diag_send_log_mask_update(driver->chqdsp_cntl, + ALL_EQUIP_ID); + if (driver->ch_wcnss_cntl) + diag_send_log_mask_update(driver->ch_wcnss_cntl, + ALL_EQUIP_ID); + ENCODE_RSP_AND_SEND(7); + return 0; + } else + buf = temp; +#endif + } /* Set runtime message mask */ + else if ((*buf == 0x7d) && (*(buf+1) == 0x4)) { + ssid_first = *(uint16_t *)(buf + 2); + ssid_last = *(uint16_t *)(buf + 4); + ssid_range = 4 * (ssid_last - ssid_first + 1); + pr_debug("diag: received mask update for ssid_first = %d," + " ssid_last = %d", ssid_first, ssid_last); + diag_update_msg_mask(ssid_first, ssid_last , buf + 8); + diag_update_userspace_clients(MSG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + for (i = 0; i < 8 + ssid_range; i++) + *(driver->apps_rsp_buf + i) = *(buf+i); + *(driver->apps_rsp_buf + 6) = 0x1; + if (driver->ch_cntl) + diag_send_msg_mask_update(driver->ch_cntl, + ssid_first, ssid_last, MODEM_PROC); + if (driver->chqdsp_cntl) + diag_send_msg_mask_update(driver->chqdsp_cntl, + ssid_first, ssid_last, QDSP_PROC); + if (driver->ch_wcnss_cntl) + diag_send_msg_mask_update(driver->ch_wcnss_cntl, + ssid_first, ssid_last, WCNSS_PROC); + ENCODE_RSP_AND_SEND(8 + ssid_range - 1); + return 0; + } else + buf = temp; +#endif + } /* Set ALL runtime message mask */ + else if ((*buf == 0x7d) && (*(buf+1) == 0x5)) { + rt_mask = *(int *)(buf + 4); + diag_set_msg_mask(rt_mask); + diag_update_userspace_clients(MSG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x7d; /* cmd_code */ + driver->apps_rsp_buf[1] = 0x5; /* set subcommand */ + driver->apps_rsp_buf[2] = 1; /* success */ + driver->apps_rsp_buf[3] = 0; /* rsvd */ + *(int *)(driver->apps_rsp_buf + 4) = rt_mask; + /* send msg mask update to peripheral */ + if (driver->ch_cntl) + diag_send_msg_mask_update(driver->ch_cntl, + ALL_SSID, ALL_SSID, MODEM_PROC); + if (driver->chqdsp_cntl) + diag_send_msg_mask_update(driver->chqdsp_cntl, + ALL_SSID, ALL_SSID, QDSP_PROC); + if (driver->ch_wcnss_cntl) + diag_send_msg_mask_update(driver->ch_wcnss_cntl, + ALL_SSID, ALL_SSID, WCNSS_PROC); + ENCODE_RSP_AND_SEND(7); + return 0; + } else + buf = temp; +#endif + } else if (*buf == 0x82) { /* event mask change */ + buf += 4; + diag_event_num_bytes = (*(uint16_t *)buf)/8+1; + diag_update_event_mask(buf, 1, (*(uint16_t *)buf)/8+1); + diag_update_userspace_clients(EVENT_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x82; + driver->apps_rsp_buf[1] = 0x0; + *(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0; + *(uint16_t *)(driver->apps_rsp_buf + 4) = + EVENT_LAST_ID + 1; + memcpy(driver->apps_rsp_buf+6, driver->event_masks, + EVENT_LAST_ID/8+1); + if (driver->ch_cntl) + diag_send_event_mask_update(driver->ch_cntl, + diag_event_num_bytes); + if (driver->chqdsp_cntl) + diag_send_event_mask_update(driver->chqdsp_cntl, + diag_event_num_bytes); + if (driver->ch_wcnss_cntl) + diag_send_event_mask_update( + driver->ch_wcnss_cntl, diag_event_num_bytes); + ENCODE_RSP_AND_SEND(6 + EVENT_LAST_ID/8); + return 0; + } else + buf = temp; +#endif + } else if (*buf == 0x60) { + diag_event_config = *(buf+1); + diag_toggle_event_mask(*(buf+1)); + diag_update_userspace_clients(EVENT_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x60; + driver->apps_rsp_buf[1] = 0x0; + driver->apps_rsp_buf[2] = 0x0; + if (driver->ch_cntl) + diag_send_event_mask_update(driver->ch_cntl, + diag_event_num_bytes); + if (driver->chqdsp_cntl) + diag_send_event_mask_update(driver->chqdsp_cntl, + diag_event_num_bytes); + if (driver->ch_wcnss_cntl) + diag_send_event_mask_update( + driver->ch_wcnss_cntl, diag_event_num_bytes); + ENCODE_RSP_AND_SEND(2); + return 0; + } +#endif + } /* Check for registered clients and forward packet to apropriate proc */ cmd_code = (int)(*(char *)buf); temp++; @@ -631,13 +1030,13 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) temp += 2; data_type = APPS_DATA; /* Dont send any command other than mode reset */ - if (cpu_is_msm8960() && cmd_code == MODE_CMD) { + if (chk_apps_master() && cmd_code == MODE_CMD) { if (subsys_id != RESET_ID) data_type = MODEM_DATA; } pr_debug("diag: %d %d %d", cmd_code, subsys_id, subsys_cmd_code); - for (i = 0; i < diag_max_registration; i++) { + for (i = 0; i < diag_max_reg; i++) { entry = driver->table[i]; if (entry.process_id != NO_PROCESS) { if (entry.cmd_code == cmd_code && entry.subsys_id == @@ -671,71 +1070,9 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) } } } - /* set event mask */ - if (*buf == 0x82) { - buf += 4; - diag_update_event_mask(buf, 1, *(uint16_t *)buf); - diag_update_userspace_clients(EVENT_MASKS_TYPE); - } - /* event mask change */ - else if ((*buf == 0x60) && (*(buf+1) == 0x0)) { - diag_update_event_mask(buf+1, 0, 0); - diag_update_userspace_clients(EVENT_MASKS_TYPE); -#if defined(CONFIG_DIAG_OVER_USB) - /* Check for Apps Only 8960 */ - if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) { - /* echo response back for apps only DIAG */ - driver->apps_rsp_buf[0] = 0x60; - driver->apps_rsp_buf[1] = 0x0; - driver->apps_rsp_buf[2] = 0x0; - ENCODE_RSP_AND_SEND(2); - return 0; - } -#endif - } - /* Set log masks */ - else if (*buf == 0x73 && *(int *)(buf+4) == 3) { - buf += 8; - /* Read Equip ID and pass as first param below*/ - diag_update_log_mask(*(int *)buf, buf+8, *(int *)(buf+4)); - diag_update_userspace_clients(LOG_MASKS_TYPE); #if defined(CONFIG_DIAG_OVER_USB) - /* Check for Apps Only 8960 */ - if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) { - /* echo response back for Apps only DIAG */ - driver->apps_rsp_buf[0] = 0x73; - *(int *)(driver->apps_rsp_buf + 4) = 0x3; /* op. ID */ - *(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success */ - payload_length = 8 + ((*(int *)(buf + 4)) + 7)/8; - for (i = 0; i < payload_length; i++) - *(int *)(driver->apps_rsp_buf+12+i) = - *(buf+8+i); - ENCODE_RSP_AND_SEND(12 + payload_length - 1); - return 0; - } -#endif - } - /* Check for set message mask */ - else if ((*buf == 0x7d) && (*(buf+1) == 0x4)) { - ssid_first = *(uint16_t *)(buf + 2); - ssid_last = *(uint16_t *)(buf + 4); - ssid_range = 4 * (ssid_last - ssid_first + 1); - diag_update_msg_mask(ssid_first, ssid_last , buf + 8); - diag_update_userspace_clients(MSG_MASKS_TYPE); -#if defined(CONFIG_DIAG_OVER_USB) - if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) { - /* echo response back for apps only DIAG */ - for (i = 0; i < 8 + ssid_range; i++) - *(driver->apps_rsp_buf + i) = *(buf+i); - ENCODE_RSP_AND_SEND(8 + ssid_range - 1); - return 0; - } -#endif - } -#if defined(CONFIG_DIAG_OVER_USB) - /* Check for Apps Only 8960 & get event mask request */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) - && *buf == 0x81) { + /* Check for Apps Only & get event mask request */ + if (!(driver->ch) && chk_apps_only() && *buf == 0x81) { driver->apps_rsp_buf[0] = 0x81; driver->apps_rsp_buf[1] = 0x0; *(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0; @@ -745,8 +1082,8 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) ENCODE_RSP_AND_SEND(6 + EVENT_LAST_ID/8); return 0; } - /* Get log ID range & Check for Apps Only 8960 */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) + /* Get log ID range & Check for Apps Only */ + else if (!(driver->ch) && chk_apps_only() && (*buf == 0x73) && *(int *)(buf+4) == 1) { driver->apps_rsp_buf[0] = 0x73; *(int *)(driver->apps_rsp_buf + 4) = 0x1; /* operation ID */ @@ -771,7 +1108,7 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) return 0; } /* Respond to Get SSID Range request message */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) + else if (!(driver->ch) && chk_apps_only() && (*buf == 0x7d) && (*(buf+1) == 0x1)) { driver->apps_rsp_buf[0] = 0x7d; driver->apps_rsp_buf[1] = 0x1; @@ -816,11 +1153,19 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) *(uint16_t *)(driver->apps_rsp_buf + 78) = MSG_SSID_17_LAST; *(uint16_t *)(driver->apps_rsp_buf + 80) = MSG_SSID_18; *(uint16_t *)(driver->apps_rsp_buf + 82) = MSG_SSID_18_LAST; - ENCODE_RSP_AND_SEND(83); + *(uint16_t *)(driver->apps_rsp_buf + 84) = MSG_SSID_19; + *(uint16_t *)(driver->apps_rsp_buf + 86) = MSG_SSID_19_LAST; + *(uint16_t *)(driver->apps_rsp_buf + 88) = MSG_SSID_20; + *(uint16_t *)(driver->apps_rsp_buf + 90) = MSG_SSID_20_LAST; + *(uint16_t *)(driver->apps_rsp_buf + 92) = MSG_SSID_21; + *(uint16_t *)(driver->apps_rsp_buf + 94) = MSG_SSID_21_LAST; + *(uint16_t *)(driver->apps_rsp_buf + 96) = MSG_SSID_22; + *(uint16_t *)(driver->apps_rsp_buf + 98) = MSG_SSID_22_LAST; + ENCODE_RSP_AND_SEND(99); return 0; } - /* Check for AO8960 Respond to Get Subsys Build mask */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) + /* Check for Apps Only Respond to Get Subsys Build mask */ + else if (!(driver->ch) && chk_apps_only() && (*buf == 0x7d) && (*(buf+1) == 0x2)) { ssid_first = *(uint16_t *)(buf + 2); ssid_last = *(uint16_t *)(buf + 4); @@ -911,12 +1256,28 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_18[i/4]; break; + case MSG_SSID_19: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_19[i/4]; + break; + case MSG_SSID_20: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_20[i/4]; + break; + case MSG_SSID_21: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_21[i/4]; + break; + case MSG_SSID_22: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_22[i/4]; + break; } ENCODE_RSP_AND_SEND(8 + ssid_range - 1); return 0; } /* Check for download command */ - else if ((cpu_is_msm8x60() || cpu_is_msm8960()) && (*buf == 0x3A)) { + else if ((cpu_is_msm8x60() || chk_apps_master()) && (*buf == 0x3A)) { /* send response back */ driver->apps_rsp_buf[0] = *buf; ENCODE_RSP_AND_SEND(0); @@ -971,6 +1332,11 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) void diag_send_error_rsp(int index) { int i; + + if (index > 490) { + pr_err("diag: error response too huge, aborting\n"); + return; + } driver->apps_rsp_buf[0] = 0x13; /* error code 13 */ for (i = 0; i < index; i++) driver->apps_rsp_buf[i+1] = *(driver->hdlc_buf+i); @@ -984,10 +1350,6 @@ void diag_process_hdlc(void *data, unsigned len) { struct diag_hdlc_decode_type hdlc; int ret, type = 0; -#ifdef DIAG_DEBUG - int i; -#endif - pr_debug("diag: HDLC decode fn, len of data %d\n", len); hdlc.dest_ptr = driver->hdlc_buf; hdlc.dest_size = USB_MAX_OUT_BUF; @@ -1011,13 +1373,13 @@ void diag_process_hdlc(void *data, unsigned len) driver->debug_flag = 0; } /* send error responses from APPS for Central Routing */ - if (type == 1 && chk_config_get_id() == AO8960_TOOLS_ID) { + if (type == 1 && chk_apps_only()) { diag_send_error_rsp(hdlc.dest_idx); type = 0; } /* implies this packet is NOT meant for apps */ if (!(driver->ch) && type == 1) { - if (chk_config_get_id() == AO8960_TOOLS_ID) { + if (chk_apps_only()) { diag_send_error_rsp(hdlc.dest_idx); } else { /* APQ 8060, Let Q6 respond */ if (driver->chqdsp) @@ -1036,11 +1398,7 @@ void diag_process_hdlc(void *data, unsigned len) /* ignore 2 bytes for CRC, one for 7E and send */ if ((driver->ch) && (ret) && (type) && (hdlc.dest_idx > 3)) { APPEND_DEBUG('g'); -#ifdef CONFIG_MODEM_DIAG_MASTER - smd_write(driver->ch, data, len); -#else smd_write(driver->ch, driver->hdlc_buf, hdlc.dest_idx - 3); -#endif APPEND_DEBUG('h'); #ifdef DIAG_DEBUG printk(KERN_INFO "writing data to SMD, pkt length %d\n", len); @@ -1076,17 +1434,14 @@ int diagfwd_connect(void) queue_work(driver->diag_wq, &(driver->diag_read_smd_work)); queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); - - if (chk_config_get_id() == AO8960_TOOLS_ID) { - /* Poll SMD CNTL channels to check for data */ - queue_work(driver->diag_wq, &(driver->diag_read_smd_cntl_work)); - queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_cntl_work)); - queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_cntl_work)); - } + /* Poll SMD CNTL channels to check for data */ + diag_smd_cntl_notify(NULL, SMD_EVENT_DATA); + diag_smd_qdsp_cntl_notify(NULL, SMD_EVENT_DATA); + diag_smd_wcnss_cntl_notify(NULL, SMD_EVENT_DATA); /* Poll USB channel to check for data*/ queue_work(driver->diag_wq, &(driver->diag_read_work)); #ifdef CONFIG_DIAG_SDIO_PIPE - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { if (driver->mdm_ch && !IS_ERR(driver->mdm_ch)) diagfwd_connect_sdio(); else @@ -1101,8 +1456,7 @@ int diagfwd_disconnect(void) printk(KERN_DEBUG "diag: USB disconnected\n"); driver->usb_connected = 0; driver->debug_flag = 1; - if (driver->usb_connected) - usb_diag_free_req(driver->legacy_ch); + usb_diag_free_req(driver->legacy_ch); if (driver->logging_mode == USB_MODE) { driver->in_busy_1 = 1; driver->in_busy_2 = 1; @@ -1111,7 +1465,7 @@ int diagfwd_disconnect(void) driver->in_busy_wcnss = 1; } #ifdef CONFIG_DIAG_SDIO_PIPE - if (diag_support_mdm9k) + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) if (driver->mdm_ch && !IS_ERR(driver->mdm_ch)) diagfwd_disconnect_sdio(); #endif @@ -1140,26 +1494,19 @@ int diagfwd_write_complete(struct diag_request *diag_write_ptr) driver->in_busy_qdsp_2 = 0; APPEND_DEBUG('P'); queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); - } else if (is_wcnss_used && buf == (void *)driver->buf_in_wcnss) { + } else if (buf == (void *)driver->buf_in_wcnss) { driver->in_busy_wcnss = 0; APPEND_DEBUG('R'); queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); -#if DIAG_XPST - } else if (driver->in_busy_dmrounter == 1) { - driver->in_busy_dmrounter = 0; -#endif } #ifdef CONFIG_DIAG_SDIO_PIPE - else if (buf == (void *)driver->buf_in_sdio_1) { - driver->in_busy_sdio_1 = 0; - APPEND_DEBUG('q'); - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - } else if (buf == (void *)driver->buf_in_sdio_2) { - driver->in_busy_sdio_2 = 0; - APPEND_DEBUG('Q'); - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - } + else if (buf == (void *)driver->buf_in_sdio) + if (machine_is_msm8x60_fusion() || + machine_is_msm8x60_fusn_ffa()) + diagfwd_write_complete_sdio(); + else + pr_err("diag: Incorrect buffer pointer while WRITE"); #endif else { diagmem_free(driver, (unsigned char *)buf, POOL_TYPE_HDLC); @@ -1186,14 +1533,6 @@ int diagfwd_read_complete(struct diag_request *diag_read_ptr) DUMP_PREFIX_ADDRESS, diag_read_ptr->buf, diag_read_ptr->actual, 1); #endif /* DIAG DEBUG */ -#if DIAG_XPST - if (driver->nohdlc) { - driver->usb_read_ptr->buf = driver->usb_buf_out; - driver->usb_read_ptr->length = USB_MAX_OUT_BUF; - usb_diag_read(driver->legacy_ch, driver->usb_read_ptr); - return 0; - } -#endif if (driver->logging_mode == USB_MODE) { if (status != -ECONNRESET && status != -ESHUTDOWN) queue_work(driver->diag_wq, @@ -1205,7 +1544,8 @@ int diagfwd_read_complete(struct diag_request *diag_read_ptr) } #ifdef CONFIG_DIAG_SDIO_PIPE else if (buf == (void *)driver->usb_buf_mdm_out) { - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || + machine_is_msm8x60_fusn_ffa()) { driver->read_len_mdm = diag_read_ptr->actual; diagfwd_read_complete_sdio(); } else @@ -1267,11 +1607,7 @@ static void diag_smd_notify(void *ctxt, unsigned event) driver->ch = 0; return; } else if (event == SMD_EVENT_OPEN) { - if (ch_temp) - driver->ch = ch_temp; - else - DIAGFWD_INFO("%s: smd_open(%s):, ch_temp:%p, driver->ch:%p, &driver->ch:%p\n", - __func__, SMDDIAG_NAME, ch_temp, driver->ch, &driver->ch); + driver->ch = ch_temp; } queue_work(driver->diag_wq, &(driver->diag_read_smd_work)); } @@ -1279,74 +1615,54 @@ static void diag_smd_notify(void *ctxt, unsigned event) #if defined(CONFIG_MSM_N_WAY_SMD) static void diag_smd_qdsp_notify(void *ctxt, unsigned event) { + if (event == SMD_EVENT_CLOSE) { + pr_info("diag: clean lpass registration\n"); + diag_clear_reg(QDSP_PROC); + driver->chqdsp = 0; + return; + } else if (event == SMD_EVENT_OPEN) { + driver->chqdsp = chqdsp_temp; + } queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); } #endif static void diag_smd_wcnss_notify(void *ctxt, unsigned event) { - queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); -} - -#if DIAG_XPST -void diag_smd_enable(smd_channel_t *ch, char *src, int mode) -{ - int r = 0; - static smd_channel_t *_ch; - DIAGFWD_INFO("smd_try_open(%s): mode=%d\n", src, mode); - - mutex_lock(&driver->smd_lock); - diag_smd_function_mode = mode; - if (mode) { - if (!driver->ch) { - r = smd_open(SMDDIAG_NAME, &driver->ch, driver, diag_smd_notify); - if (!r) - _ch = driver->ch; - } else - _ch = driver->ch; - } else { - if (driver->ch) { - r = smd_close(driver->ch); - driver->ch = NULL; - if (!r) - _ch = driver->ch; - } + if (event == SMD_EVENT_CLOSE) { + pr_info("diag: clean wcnss registration\n"); + diag_clear_reg(WCNSS_PROC); + driver->ch_wcnss = 0; + return; + } else if (event == SMD_EVENT_OPEN) { + driver->ch_wcnss = ch_wcnss_temp; } - ch = _ch; - mutex_unlock(&driver->smd_lock); - DIAGFWD_INFO("smd_try_open(%s): r=%d _ch=%x\n", src, r, (unsigned int)ch); + queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); } -#endif static int diag_smd_probe(struct platform_device *pdev) { int r = 0; if (pdev->id == SMD_APPS_MODEM) { - r = smd_open(SMDDIAG_NAME, &driver->ch, driver, diag_smd_notify); - wmb(); + r = smd_open("DIAG", &driver->ch, driver, diag_smd_notify); ch_temp = driver->ch; - DIAGFWD_INFO("%s: smd_open(%s):%d, ch_temp:%p, driver->ch:%p, &driver->ch:%p\n", - __func__, SMDDIAG_NAME, r, ch_temp, driver->ch, &driver->ch); } #if defined(CONFIG_MSM_N_WAY_SMD) if (pdev->id == SMD_APPS_QDSP) { -#if defined(CONFIG_MACH_MECHA) || defined(CONFIG_ARCH_MSM8X60_LTE) \ - || defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) r = smd_named_open_on_edge("DIAG", SMD_APPS_QDSP , &driver->chqdsp, driver, diag_smd_qdsp_notify); -#else - r = smd_open("DSP_DIAG", &driver->chqdsp, driver, diag_smd_qdsp_notify); -#endif + chqdsp_temp = driver->chqdsp; } #endif - if (pdev->id == SMD_APPS_WCNSS) + if (pdev->id == SMD_APPS_WCNSS) { r = smd_named_open_on_edge("APPS_RIVA_DATA", SMD_APPS_WCNSS , &driver->ch_wcnss, driver, diag_smd_wcnss_notify); + ch_wcnss_temp = driver->ch_wcnss; + } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pr_debug("diag: open SMD port, Id = %d, r = %d\n", pdev->id, r); - smd_diag_initialized = 1; return 0; } @@ -1372,7 +1688,7 @@ static struct platform_driver msm_smd_ch1_driver = { .probe = diag_smd_probe, .driver = { - .name = SMDDIAG_NAME, + .name = "DIAG", .owner = THIS_MODULE, .pm = &diagfwd_dev_pm_ops, }, @@ -1392,16 +1708,26 @@ void diagfwd_init(void) { diag_debug_buf_idx = 0; driver->read_len_legacy = 0; + mutex_init(&driver->diag_cntl_mutex); - /* FIXME: there should be a better way to know if wcnss enabled */ - if (chk_config_get_id() == AO8960_TOOLS_ID) { - is_wcnss_used = 1; - DIAGFWD_INFO("wcnss channel was enabled in the platform\n"); - } else { - is_wcnss_used = 0; - DIAGFWD_INFO("wcnss channel was not enabled in the platform\n"); + if (driver->event_mask == NULL) { + driver->event_mask = kzalloc(sizeof( + struct diag_ctrl_event_mask), GFP_KERNEL); + if (driver->event_mask == NULL) + goto err; + } + if (driver->msg_mask == NULL) { + driver->msg_mask = kzalloc(sizeof( + struct diag_ctrl_msg_mask), GFP_KERNEL); + if (driver->msg_mask == NULL) + goto err; + } + if (driver->log_mask == NULL) { + driver->log_mask = kzalloc(sizeof( + struct diag_ctrl_log_mask), GFP_KERNEL); + if (driver->log_mask == NULL) + goto err; } - if (driver->buf_in_1 == NULL) { driver->buf_in_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_1 == NULL) @@ -1422,11 +1748,29 @@ void diagfwd_init(void) if (driver->buf_in_qdsp_2 == NULL) goto err; } - if (is_wcnss_used && driver->buf_in_wcnss == NULL) { + if (driver->buf_in_wcnss == NULL) { driver->buf_in_wcnss = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_wcnss == NULL) goto err; } + if (driver->buf_msg_mask_update == NULL) { + driver->buf_msg_mask_update = kzalloc(APPS_BUF_SIZE, + GFP_KERNEL); + if (driver->buf_msg_mask_update == NULL) + goto err; + } + if (driver->buf_log_mask_update == NULL) { + driver->buf_log_mask_update = kzalloc(APPS_BUF_SIZE, + GFP_KERNEL); + if (driver->buf_log_mask_update == NULL) + goto err; + } + if (driver->buf_event_mask_update == NULL) { + driver->buf_event_mask_update = kzalloc(APPS_BUF_SIZE, + GFP_KERNEL); + if (driver->buf_event_mask_update == NULL) + goto err; + } if (driver->usb_buf_out == NULL && (driver->usb_buf_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL)) == NULL) @@ -1442,10 +1786,12 @@ void diagfwd_init(void) && (driver->msg_masks = kzalloc(MSG_MASK_SIZE, GFP_KERNEL)) == NULL) goto err; + diag_create_msg_mask_table(); + diag_event_num_bytes = 0; if (driver->log_masks == NULL && (driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL)) == NULL) goto err; - driver->log_masks_length = 8*MAX_EQUIP_ID; + driver->log_masks_length = (sizeof(struct mask_info))*MAX_EQUIP_ID; if (driver->event_masks == NULL && (driver->event_masks = kzalloc(EVENT_MASK_SIZE, GFP_KERNEL)) == NULL) @@ -1455,13 +1801,6 @@ void diagfwd_init(void) ((driver->num_clients) * sizeof(struct diag_client_map), GFP_KERNEL)) == NULL) goto err; -#ifdef CONFIG_DIAG_SDIO_PIPE - if (driver->mdmclient_map == NULL && - (driver->mdmclient_map = kzalloc - ((driver->num_mdmclients) * sizeof(struct diag_client_map), - GFP_KERNEL)) == NULL) - goto err; -#endif if (driver->buf_tbl == NULL) driver->buf_tbl = kzalloc(buf_tbl_size * sizeof(struct diag_write_device), GFP_KERNEL); @@ -1471,14 +1810,8 @@ void diagfwd_init(void) (driver->data_ready = kzalloc(driver->num_clients * sizeof(int) , GFP_KERNEL)) == NULL) goto err; -#ifdef CONFIG_DIAG_SDIO_PIPE - if (driver->mdmdata_ready == NULL && - (driver->mdmdata_ready = kzalloc(driver->num_mdmclients * sizeof(struct - diag_client_map), GFP_KERNEL)) == NULL) - goto err; -#endif if (driver->table == NULL && - (driver->table = kzalloc(diag_max_registration* + (driver->table = kzalloc(diag_max_reg* sizeof(struct diag_master_table), GFP_KERNEL)) == NULL) goto err; @@ -1523,7 +1856,7 @@ void diagfwd_init(void) GFP_KERNEL)) == NULL) goto err; if (driver->apps_rsp_buf == NULL) { - driver->apps_rsp_buf = kzalloc(500, GFP_KERNEL); + driver->apps_rsp_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->apps_rsp_buf == NULL) goto err; } @@ -1531,15 +1864,18 @@ void diagfwd_init(void) #ifdef CONFIG_DIAG_OVER_USB INIT_WORK(&(driver->diag_proc_hdlc_work), diag_process_hdlc_fn); INIT_WORK(&(driver->diag_read_work), diag_read_work_fn); + INIT_WORK(&(driver->diag_modem_mask_update_work), + diag_modem_mask_update_fn); + INIT_WORK(&(driver->diag_qdsp_mask_update_work), + diag_qdsp_mask_update_fn); + INIT_WORK(&(driver->diag_wcnss_mask_update_work), + diag_wcnss_mask_update_fn); driver->legacy_ch = usb_diag_open(DIAG_LEGACY, driver, diag_usb_legacy_notifier); if (IS_ERR(driver->legacy_ch)) { printk(KERN_ERR "Unable to open USB diag legacy channel\n"); goto err; } -#endif -#if DIAG_XPST - mutex_init(&driver->smd_lock); #endif platform_driver_register(&msm_smd_ch1_driver); platform_driver_register(&diag_smd_lite_driver); @@ -1547,11 +1883,17 @@ void diagfwd_init(void) return; err: pr_err("diag: Could not initialize diag buffers"); + kfree(driver->event_mask); + kfree(driver->log_mask); + kfree(driver->msg_mask); kfree(driver->buf_in_1); kfree(driver->buf_in_2); kfree(driver->buf_in_qdsp_1); kfree(driver->buf_in_qdsp_2); kfree(driver->buf_in_wcnss); + kfree(driver->buf_msg_mask_update); + kfree(driver->buf_log_mask_update); + kfree(driver->buf_event_mask_update); kfree(driver->usb_buf_out); kfree(driver->hdlc_buf); kfree(driver->msg_masks); @@ -1582,7 +1924,6 @@ void diagfwd_exit(void) driver->ch = 0; /* SMD can make this NULL */ driver->chqdsp = 0; driver->ch_wcnss = 0; - smd_diag_initialized = 0; #ifdef CONFIG_DIAG_OVER_USB if (driver->usb_connected) usb_diag_free_req(driver->legacy_ch); @@ -1590,11 +1931,17 @@ void diagfwd_exit(void) #endif platform_driver_unregister(&msm_smd_ch1_driver); platform_driver_unregister(&diag_smd_lite_driver); + kfree(driver->event_mask); + kfree(driver->log_mask); + kfree(driver->msg_mask); kfree(driver->buf_in_1); kfree(driver->buf_in_2); kfree(driver->buf_in_qdsp_1); kfree(driver->buf_in_qdsp_2); kfree(driver->buf_in_wcnss); + kfree(driver->buf_msg_mask_update); + kfree(driver->buf_log_mask_update); + kfree(driver->buf_event_mask_update); kfree(driver->usb_buf_out); kfree(driver->hdlc_buf); kfree(driver->msg_masks); diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h index 0843bb39..57444591 100644 --- a/drivers/char/diag/diagfwd.h +++ b/drivers/char/diag/diagfwd.h @@ -1,5 +1,4 @@ - -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,11 +16,6 @@ #define NO_PROCESS 0 #define NON_APPS_PROC -1 -#define DIAGLOG_MODE_NONE 0 -#define DIAGLOG_MODE_HEAD 1 -#define DIAGLOG_MODE_FULL 2 -#define DIAGLOG_MODE_PING 3 - void diagfwd_init(void); void diagfwd_exit(void); void diag_process_hdlc(void *data, unsigned len); @@ -32,23 +26,18 @@ void diag_usb_legacy_notifier(void *, unsigned, struct diag_request *); long diagchar_ioctl(struct file *, unsigned int, unsigned long); int diag_device_write(void *, int, struct diag_request *); int mask_request_validate(unsigned char mask_buf[]); -int chk_config_get_id(void); void diag_clear_reg(int); - +int chk_apps_only(void); +void diag_send_event_mask_update(smd_channel_t *, int num_bytes); +void diag_send_msg_mask_update(smd_channel_t *, int ssid_first, + int ssid_last, int proc); +void diag_send_log_mask_update(smd_channel_t *, int); /* State for diag forwarding */ #ifdef CONFIG_DIAG_OVER_USB int diagfwd_connect(void); int diagfwd_disconnect(void); #endif -extern int diag_support_mdm9k; extern int diag_debug_buf_idx; extern unsigned char diag_debug_buf[1024]; -extern unsigned diag7k_debug_mask; -extern unsigned diag9k_debug_mask; - -#define SMD_FUNC_CLOSE 0 -#define SMD_FUNC_OPEN_DIAG 1 -#define SMD_FUNC_OPEN_BT 2 -void diag_smd_enable(smd_channel_t *ch, char *src, int mode); - +extern int diag_event_num_bytes; #endif diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 13c3c478..fcf16d05 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,12 +16,81 @@ #include "diagchar.h" #include "diagfwd.h" #include "diagfwd_cntl.h" -#ifdef CONFIG_DIAG_OVER_USB -#include -#endif #define HDR_SIZ 8 +void diag_smd_cntl_notify(void *ctxt, unsigned event) +{ + int r1, r2; + + if (!(driver->ch_cntl)) + return; + + switch (event) { + case SMD_EVENT_DATA: + r1 = smd_read_avail(driver->ch_cntl); + r2 = smd_cur_packet_size(driver->ch_cntl); + if (r1 > 0 && r1 == r2) + queue_work(driver->diag_wq, + &(driver->diag_read_smd_cntl_work)); + else + pr_debug("diag: incomplete pkt on Modem CNTL ch\n"); + break; + case SMD_EVENT_OPEN: + queue_work(driver->diag_cntl_wq, + &(driver->diag_modem_mask_update_work)); + break; + } +} + +void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event) +{ + int r1, r2; + + if (!(driver->chqdsp_cntl)) + return; + + switch (event) { + case SMD_EVENT_DATA: + r1 = smd_read_avail(driver->chqdsp_cntl); + r2 = smd_cur_packet_size(driver->chqdsp_cntl); + if (r1 > 0 && r1 == r2) + queue_work(driver->diag_wq, + &(driver->diag_read_smd_qdsp_cntl_work)); + else + pr_debug("diag: incomplete pkt on LPASS CNTL ch\n"); + break; + case SMD_EVENT_OPEN: + queue_work(driver->diag_cntl_wq, + &(driver->diag_qdsp_mask_update_work)); + break; + } +} + +void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event) +{ + int r1, r2; + + if (!(driver->ch_wcnss_cntl)) + return; + + switch (event) { + case SMD_EVENT_DATA: + r1 = smd_read_avail(driver->ch_wcnss_cntl); + r2 = smd_cur_packet_size(driver->ch_wcnss_cntl); + if (r1 > 0 && r1 == r2) + queue_work(driver->diag_wq, + &(driver->diag_read_smd_wcnss_cntl_work)); + else + pr_debug("diag: incomplete pkt on WCNSS CNTL ch\n"); + break; + case SMD_EVENT_OPEN: + queue_work(driver->diag_cntl_wq, + &(driver->diag_wcnss_mask_update_work)); + break; + } +} + static void diag_smd_cntl_send_req(int proc_num) { int data_len = 0, type = -1, count_bytes = 0, j, r, flag = 0; @@ -30,12 +99,13 @@ static void diag_smd_cntl_send_req(int proc_num) struct diag_ctrl_msg *msg; struct cmd_code_range *range; struct bindpkt_params *temp; - void *buf = NULL, *dump_buf = NULL; + void *buf = NULL; smd_channel_t *smd_ch = NULL; - DIAG_INFO("%s: %s\n", __func__, - (proc_num == MODEM_PROC)?"MODEM_PROC": - (proc_num == QDSP_PROC)?"QDSP_PROC":"WCNSS_PROC"); + if (pkt_params == NULL) { + pr_alert("diag: Memory allocation failure\n"); + return; + } if (proc_num == MODEM_PROC) { buf = driver->buf_in_cntl; @@ -69,24 +139,29 @@ static void diag_smd_cntl_send_req(int proc_num) while (count_bytes + HDR_SIZ <= r) { type = *(uint32_t *)(buf); data_len = *(uint32_t *)(buf + 4); + if (type < DIAG_CTRL_MSG_REG || + type > DIAG_CTRL_MSG_F3_MASK_V2) { + pr_alert("diag: Invalid Msg type %d proc %d", + type, proc_num); + break; + } + if (data_len < 0 || data_len > r) { + pr_alert("diag: Invalid data len %d proc %d", + data_len, proc_num); + break; + } count_bytes = count_bytes+HDR_SIZ+data_len; if (type == DIAG_CTRL_MSG_REG && r >= count_bytes) { msg = buf+HDR_SIZ; - if (!msg->count_entries) { - DIAG_ERR("version: %d, cmd_code: %d," - " subsysid: %d, count_entries: %d," - " port:%d\n", msg->version, - msg->cmd_code, msg->subsysid, - msg->count_entries, msg->port); - dump_buf = kmalloc(r, GFP_KERNEL); - memcpy(dump_buf, buf, r); - continue; - } range = buf+HDR_SIZ+ sizeof(struct diag_ctrl_msg); pkt_params->count = msg->count_entries; temp = kzalloc(pkt_params->count * sizeof(struct bindpkt_params), GFP_KERNEL); + if (temp == NULL) { + pr_alert("diag: Memory alloc fail\n"); + return; + } for (j = 0; j < pkt_params->count; j++) { temp->cmd_code = msg->cmd_code; temp->subsys_id = msg->subsysid; @@ -103,23 +178,19 @@ static void diag_smd_cntl_send_req(int proc_num) diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG, (unsigned long)pkt_params); kfree(temp); - buf = buf + HDR_SIZ + data_len; } + buf = buf + HDR_SIZ + data_len; } } - if (dump_buf) { - print_hex_dump(KERN_DEBUG, "diag_debug_buf:", - 16, 1, DUMP_PREFIX_ADDRESS, dump_buf, r, 1); - kfree(dump_buf); - } kfree(pkt_params); if (flag) { /* Poll SMD CNTL channels to check for data */ - queue_work(driver->diag_wq, &(driver->diag_read_smd_cntl_work)); - queue_work(driver->diag_wq, - &(driver->diag_read_smd_qdsp_cntl_work)); - queue_work(driver->diag_wq, - &(driver->diag_read_smd_wcnss_cntl_work)); + if (proc_num == MODEM_PROC) + diag_smd_cntl_notify(NULL, SMD_EVENT_DATA); + else if (proc_num == QDSP_PROC) + diag_smd_qdsp_cntl_notify(NULL, SMD_EVENT_DATA); + else if (proc_num == WCNSS_PROC) + diag_smd_wcnss_cntl_notify(NULL, SMD_EVENT_DATA); } } @@ -138,27 +209,12 @@ void diag_read_smd_wcnss_cntl_work_fn(struct work_struct *work) diag_smd_cntl_send_req(WCNSS_PROC); } -static void diag_smd_cntl_notify(void *ctxt, unsigned event) -{ - queue_work(driver->diag_wq, &(driver->diag_read_smd_cntl_work)); -} - -static void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event) -{ - queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_cntl_work)); -} - -static void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event) -{ - queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_cntl_work)); -} - static int diag_smd_cntl_probe(struct platform_device *pdev) { int r = 0; - /* open control ports only on 8960 */ - if (chk_config_get_id() == AO8960_TOOLS_ID) { + /* open control ports only on 8960 & newer targets */ + if (chk_apps_only()) { if (pdev->id == SMD_APPS_MODEM) r = smd_open("DIAG_CNTL", &driver->ch_cntl, driver, diag_smd_cntl_notify); @@ -214,6 +270,7 @@ static struct platform_driver diag_smd_lite_cntl_driver = { void diagfwd_cntl_init(void) { + driver->diag_cntl_wq = create_singlethread_workqueue("diag_cntl_wq"); if (driver->buf_in_cntl == NULL) { driver->buf_in_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_cntl == NULL) @@ -238,6 +295,8 @@ void diagfwd_cntl_init(void) kfree(driver->buf_in_cntl); kfree(driver->buf_in_qdsp_cntl); kfree(driver->buf_in_wcnss_cntl); + if (driver->diag_cntl_wq) + destroy_workqueue(driver->diag_cntl_wq); } void diagfwd_cntl_exit(void) @@ -248,6 +307,7 @@ void diagfwd_cntl_exit(void) driver->ch_cntl = 0; driver->chqdsp_cntl = 0; driver->ch_wcnss_cntl = 0; + destroy_workqueue(driver->diag_cntl_wq); platform_driver_unregister(&msm_smd_ch1_cntl_driver); platform_driver_unregister(&diag_smd_lite_cntl_driver); diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h index 542138df..ad1fec96 100644 --- a/drivers/char/diag/diagfwd_cntl.h +++ b/drivers/char/diag/diagfwd_cntl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,7 +13,22 @@ #ifndef DIAGFWD_CNTL_H #define DIAGFWD_CNTL_H -#define DIAG_CTRL_MSG_REG 1 /* Message registration commands */ +/* Message registration commands */ +#define DIAG_CTRL_MSG_REG 1 +/* Message passing for DTR events */ +#define DIAG_CTRL_MSG_DTR 2 +/* Control Diag sleep vote, buffering etc */ +#define DIAG_CTRL_MSG_DIAGMODE 3 +/* Diag data based on "light" diag mask */ +#define DIAG_CTRL_MSG_DIAGDATA 4 +/* Send diag internal feature mask 'diag_int_feature_mask' */ +#define DIAG_CTRL_MSG_FEATURE 8 +/* Send Diag log mask for a particular equip id */ +#define DIAG_CTRL_MSG_EQUIP_LOG_MASK 9 +/* Send Diag event mask */ +#define DIAG_CTRL_MSG_EVENT_MASK_V2 10 +/* Send Diag F3 mask */ +#define DIAG_CTRL_MSG_F3_MASK_V2 11 struct cmd_code_range { uint16_t cmd_code_lo; @@ -29,10 +44,46 @@ struct diag_ctrl_msg { uint16_t port; }; +struct diag_ctrl_event_mask { + uint32_t cmd_type; + uint32_t data_len; + uint8_t stream_id; + uint8_t status; + uint8_t event_config; + uint32_t event_mask_size; + /* Copy event mask here */ +} __packed; + +struct diag_ctrl_log_mask { + uint32_t cmd_type; + uint32_t data_len; + uint8_t stream_id; + uint8_t status; + uint8_t equip_id; + uint32_t num_items; /* Last log code for this equip_id */ + uint32_t log_mask_size; /* Size of log mask stored in log_mask[] */ + /* Copy log mask here */ +} __packed; + +struct diag_ctrl_msg_mask { + uint32_t cmd_type; + uint32_t data_len; + uint8_t stream_id; + uint8_t status; + uint8_t msg_mode; + uint16_t ssid_first; /* Start of range of supported SSIDs */ + uint16_t ssid_last; /* Last SSID in range */ + uint32_t msg_mask_size; /* ssid_last - ssid_first + 1 */ + /* Copy msg mask here */ +} __packed; + void diagfwd_cntl_init(void); void diagfwd_cntl_exit(void); void diag_read_smd_cntl_work_fn(struct work_struct *); void diag_read_smd_qdsp_cntl_work_fn(struct work_struct *); void diag_read_smd_wcnss_cntl_work_fn(struct work_struct *); +void diag_smd_cntl_notify(void *ctxt, unsigned event); +void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event); +void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event); #endif diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c new file mode 100644 index 00000000..ac5722f3 --- /dev/null +++ b/drivers/char/diag/diagfwd_hsic.c @@ -0,0 +1,530 @@ +/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DIAG_OVER_USB +#include +#endif +#include "diagchar_hdlc.h" +#include "diagmem.h" +#include "diagchar.h" +#include "diagfwd.h" +#include "diagfwd_hsic.h" + +static void diag_read_hsic_work_fn(struct work_struct *work) +{ + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + /* + * If there is no hsic data being read from the hsic and there + * is no hsic data being written to the usb mdm channel + */ + if (!driver->in_busy_hsic_read && !driver->in_busy_hsic_write_on_mdm) { + /* + * Initiate the read from the hsic. The hsic read is + * asynchronous. Once the read is complete the read + * callback function will be called. + */ + int err; + driver->in_busy_hsic_read = 1; + APPEND_DEBUG('i'); + err = diag_bridge_read((char *)driver->buf_in_hsic, + IN_BUF_SIZE); + if (err) { + pr_err("DIAG: Error initiating HSIC read, err: %d\n", + err); + /* + * If the error is recoverable, then clear + * the read flag, so we will resubmit a + * read on the next frame. Otherwise, don't + * resubmit a read on the next frame. + */ + if ((-ESHUTDOWN) != err) + driver->in_busy_hsic_read = 0; + } + } + + /* + * If for some reason there was no hsic data, set up + * the next read + */ + if (!driver->in_busy_hsic_read) + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); +} + +static void diag_hsic_read_complete_callback(void *ctxt, char *buf, + int buf_size, int actual_size) +{ + /* The read of the data from the HSIC bridge is complete */ + driver->in_busy_hsic_read = 0; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + APPEND_DEBUG('j'); + if (actual_size > 0) { + if (!buf) { + pr_err("Out of diagmem for HSIC\n"); + } else { + driver->write_ptr_mdm->length = actual_size; + /* + * Set flag to denote hsic data is currently + * being written to the usb mdm channel. + * driver->buf_in_hsic was given to + * diag_bridge_read(), so buf here should be + * driver->buf_in_hsic + */ + driver->in_busy_hsic_write_on_mdm = 1; + diag_device_write((void *)buf, HSIC_DATA, + driver->write_ptr_mdm); + } + } else { + pr_err("DIAG in %s: actual_size: %d\n", __func__, actual_size); + } + + /* + * If for some reason there was no hsic data to write to the + * mdm channel, set up another read + */ + if (!driver->in_busy_hsic_write_on_mdm) + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); +} + +static void diag_hsic_write_complete_callback(void *ctxt, char *buf, + int buf_size, int actual_size) +{ + /* The write of the data to the HSIC bridge is complete */ + driver->in_busy_hsic_write = 0; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + if (actual_size < 0) + pr_err("DIAG in %s: actual_size: %d\n", __func__, actual_size); + + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); +} + +static struct diag_bridge_ops hsic_diag_bridge_ops = { + .ctxt = NULL, + .read_complete_cb = diag_hsic_read_complete_callback, + .write_complete_cb = diag_hsic_write_complete_callback, +}; + +static int diag_hsic_close(void) +{ + if (driver->hsic_device_enabled) { + driver->hsic_ch = 0; + if (driver->hsic_device_opened) { + driver->hsic_device_opened = 0; + diag_bridge_close(); + } + pr_debug("DIAG in %s: closed successfully\n", __func__); + } else { + pr_debug("DIAG in %s: already closed\n", __func__); + } + + return 0; +} + +/* diagfwd_connect_hsic is called when the USB mdm channel is connected */ +static int diagfwd_connect_hsic(void) +{ + int err; + + pr_debug("DIAG in %s\n", __func__); + + err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE, N_MDM_READ); + if (err) + pr_err("DIAG: unable to alloc USB req on mdm ch err:%d\n", err); + + driver->usb_mdm_connected = 1; + driver->in_busy_hsic_write_on_mdm = 0; + driver->in_busy_hsic_read_on_mdm = 0; + driver->in_busy_hsic_write = 0; + driver->in_busy_hsic_read = 0; + + /* If the hsic (diag_bridge) platform device is not open */ + if (driver->hsic_device_enabled) { + if (!driver->hsic_device_opened) { + err = diag_bridge_open(&hsic_diag_bridge_ops); + if (err) { + pr_err("DIAG: HSIC channel open error: %d\n", + err); + } else { + pr_info("DIAG: opened HSIC channel\n"); + driver->hsic_device_opened = 1; + } + } else { + pr_info("DIAG: HSIC channel already open\n"); + } + + /* + * Turn on communication over usb mdm and hsic, if the hsic + * device driver is enabled and opened + */ + if (driver->hsic_device_opened) + driver->hsic_ch = 1; + + /* Poll USB mdm channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); + + /* Poll HSIC channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); + } else { + /* The hsic device driver has not yet been enabled */ + pr_info("DIAG: HSIC channel not yet enabled\n"); + } + + return 0; +} + +/* + * diagfwd_disconnect_hsic is called when the USB mdm channel + * is disconnected + */ +static int diagfwd_disconnect_hsic(void) +{ + pr_debug("DIAG in %s\n", __func__); + + driver->usb_mdm_connected = 0; + usb_diag_free_req(driver->mdm_ch); + driver->in_busy_hsic_write_on_mdm = 1; + driver->in_busy_hsic_read_on_mdm = 1; + driver->in_busy_hsic_write = 1; + driver->in_busy_hsic_read = 1; + + /* Turn off communication over usb mdm and hsic */ + driver->hsic_ch = 0; + + return 0; +} + +/* + * diagfwd_write_complete_hsic is called after the asynchronous + * usb_diag_write() on mdm channel is complete + */ +static int diagfwd_write_complete_hsic(void) +{ + /* + * Clear flag to denote that the write of the hsic data on the + * usb mdm channel is complete + */ + driver->in_busy_hsic_write_on_mdm = 0; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return 0; + } + + APPEND_DEBUG('q'); + + /* Read data from the hsic */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); + + return 0; +} + +/* Called after the asychronous usb_diag_read() on mdm channel is complete */ +static int diagfwd_read_complete_hsic(struct diag_request *diag_read_ptr) +{ + /* The read of the usb driver on the mdm (not hsic) has completed */ + driver->in_busy_hsic_read_on_mdm = 0; + driver->read_len_mdm = diag_read_ptr->actual; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return 0; + } + + /* + * The read of the usb driver on the mdm channel has completed. + * If there is no write on the hsic in progress, check if the + * read has data to pass on to the hsic. If so, pass the usb + * mdm data on to the hsic. + */ + if (!driver->in_busy_hsic_write && driver->usb_buf_mdm_out && + (driver->read_len_mdm > 0)) { + + /* + * Initiate the hsic write. The hsic write is + * asynchronous. When complete the write + * complete callback function will be called + */ + int err; + driver->in_busy_hsic_write = 1; + err = diag_bridge_write(driver->usb_buf_mdm_out, + driver->read_len_mdm); + if (err) { + pr_err("DIAG: mdm data on hsic write err: %d\n", err); + /* + * If the error is recoverable, then clear + * the write flag, so we will resubmit a + * write on the next frame. Otherwise, don't + * resubmit a write on the next frame. + */ + if ((-ESHUTDOWN) != err) + driver->in_busy_hsic_write = 0; + } + } + + /* + * If there is no write of the usb mdm data on the + * hsic channel + */ + if (!driver->in_busy_hsic_write) + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); + + return 0; +} + +static void diagfwd_hsic_notifier(void *priv, unsigned event, + struct diag_request *d_req) +{ + switch (event) { + case USB_DIAG_CONNECT: + diagfwd_connect_hsic(); + break; + case USB_DIAG_DISCONNECT: + diagfwd_disconnect_hsic(); + break; + case USB_DIAG_READ_DONE: + diagfwd_read_complete_hsic(d_req); + break; + case USB_DIAG_WRITE_DONE: + diagfwd_write_complete_hsic(); + break; + default: + pr_err("DIAG in %s: Unknown event from USB diag:%u\n", + __func__, event); + break; + } +} + +static void diag_read_mdm_work_fn(struct work_struct *work) +{ + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + /* + * If there is no data being read from the usb mdm channel + * and there is no mdm channel data currently being written + * to the hsic + */ + if (!driver->in_busy_hsic_read_on_mdm && !driver->in_busy_hsic_write) { + APPEND_DEBUG('x'); + + /* Setup the next read from usb mdm channel */ + driver->in_busy_hsic_read_on_mdm = 1; + driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out; + driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF; + usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr); + APPEND_DEBUG('y'); + } + + /* + * If for some reason there was no mdm channel read initiated, + * queue up the reading of data from the mdm channel + */ + if (!driver->in_busy_hsic_read_on_mdm) + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); +} + +int diag_hsic_enable(void) +{ + pr_debug("DIAG in %s\n", __func__); + + driver->read_len_mdm = 0; + if (driver->buf_in_hsic == NULL) + driver->buf_in_hsic = kzalloc(IN_BUF_SIZE, GFP_KERNEL); + if (driver->buf_in_hsic == NULL) + goto err; + if (driver->usb_buf_mdm_out == NULL) + driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); + if (driver->usb_buf_mdm_out == NULL) + goto err; + if (driver->write_ptr_mdm == NULL) + driver->write_ptr_mdm = kzalloc( + sizeof(struct diag_request), GFP_KERNEL); + if (driver->write_ptr_mdm == NULL) + goto err; + if (driver->usb_read_mdm_ptr == NULL) + driver->usb_read_mdm_ptr = kzalloc( + sizeof(struct diag_request), GFP_KERNEL); + if (driver->usb_read_mdm_ptr == NULL) + goto err; + driver->diag_hsic_wq = create_singlethread_workqueue("diag_hsic_wq"); +#ifdef CONFIG_DIAG_OVER_USB + INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn); +#endif + INIT_WORK(&(driver->diag_read_hsic_work), diag_read_hsic_work_fn); + + driver->hsic_device_enabled = 1; + + return 0; +err: + pr_err("DIAG could not initialize buf for HSIC\n"); + kfree(driver->buf_in_hsic); + kfree(driver->usb_buf_mdm_out); + kfree(driver->write_ptr_mdm); + kfree(driver->usb_read_mdm_ptr); + if (driver->diag_hsic_wq) + destroy_workqueue(driver->diag_hsic_wq); + + return -ENOMEM; +} + +static int diag_hsic_probe(struct platform_device *pdev) +{ + int err; + + if (!driver->hsic_device_enabled) { + err = diag_hsic_enable(); + if (err) { + pr_err("DIAG could not enable HSIC, err: %d\n", err); + return err; + } + } + + /* The hsic (diag_bridge) platform device driver is enabled */ + err = diag_bridge_open(&hsic_diag_bridge_ops); + if (err) { + pr_err("DIAG could not open HSIC channel, err: %d\n", err); + driver->hsic_device_opened = 0; + return err; + } + + pr_info("DIAG opened HSIC channel\n"); + driver->hsic_device_opened = 1; + + /* + * The probe function was called after the usb was connected + * on the legacy channel. Communication over usb mdm and hsic + * needs to be turned on. + */ + if (driver->usb_connected) { + driver->hsic_ch = 1; + driver->in_busy_hsic_write_on_mdm = 0; + driver->in_busy_hsic_read_on_mdm = 0; + driver->in_busy_hsic_write = 0; + driver->in_busy_hsic_read = 0; + + /* Poll USB mdm channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); + + /* Poll HSIC channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); + } + + return err; +} + +static int diag_hsic_remove(struct platform_device *pdev) +{ + pr_info("DIAG: %s called\n", __func__); + diag_hsic_close(); + return 0; +} + +static int diagfwd_hsic_runtime_suspend(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: suspending...\n"); + return 0; +} + +static int diagfwd_hsic_runtime_resume(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: resuming...\n"); + return 0; +} + +static const struct dev_pm_ops diagfwd_hsic_dev_pm_ops = { + .runtime_suspend = diagfwd_hsic_runtime_suspend, + .runtime_resume = diagfwd_hsic_runtime_resume, +}; + +static struct platform_driver msm_hsic_ch_driver = { + .probe = diag_hsic_probe, + .remove = diag_hsic_remove, + .driver = { + .name = "diag_bridge", + .owner = THIS_MODULE, + .pm = &diagfwd_hsic_dev_pm_ops, + }, +}; + + +void __init diagfwd_hsic_init(void) +{ + int ret; + + pr_debug("DIAG in %s\n", __func__); + +#ifdef CONFIG_DIAG_OVER_USB + driver->mdm_ch = usb_diag_open(DIAG_MDM, driver, diagfwd_hsic_notifier); + if (IS_ERR(driver->mdm_ch)) { + pr_err("DIAG Unable to open USB diag MDM channel\n"); + goto err; + } +#endif + ret = platform_driver_register(&msm_hsic_ch_driver); + if (ret) + pr_err("DIAG could not register HSIC device, ret: %d\n", ret); + else + driver->hsic_initialized = 1; + + return; +err: + pr_err("DIAG could not initialize for HSIC execution\n"); +} + +void __exit diagfwd_hsic_exit(void) +{ + pr_debug("DIAG in %s\n", __func__); + + if (driver->hsic_initialized) + diag_hsic_close(); + +#ifdef CONFIG_DIAG_OVER_USB + if (driver->usb_mdm_connected) + usb_diag_free_req(driver->mdm_ch); +#endif + platform_driver_unregister(&msm_hsic_ch_driver); +#ifdef CONFIG_DIAG_OVER_USB + usb_diag_close(driver->mdm_ch); +#endif + kfree(driver->buf_in_hsic); + kfree(driver->usb_buf_mdm_out); + kfree(driver->write_ptr_mdm); + kfree(driver->usb_read_mdm_ptr); + destroy_workqueue(driver->diag_hsic_wq); + + driver->hsic_device_enabled = 0; +} diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h new file mode 100644 index 00000000..67690525 --- /dev/null +++ b/drivers/char/diag/diagfwd_hsic.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef DIAGFWD_HSIC_H +#define DIAGFWD_HSIC_H + +#include +#define N_MDM_WRITE 1 /* Upgrade to 2 with ping pong buffer */ +#define N_MDM_READ 1 + +void __init diagfwd_hsic_init(void); +void __exit diagfwd_hsic_exit(void); + +#endif diff --git a/drivers/char/diag/diagfwd_sdio.c b/drivers/char/diag/diagfwd_sdio.c index 84b662d9..a145c066 100644 --- a/drivers/char/diag/diagfwd_sdio.c +++ b/drivers/char/diag/diagfwd_sdio.c @@ -32,32 +32,19 @@ void __diag_sdio_send_req(void) { int r = 0; - void *buf = NULL; - int *in_busy_ptr = NULL; - struct diag_request *write_ptr_modem = NULL; - int retry = 0, type; + void *buf = driver->buf_in_sdio; - if (!driver->in_busy_sdio_1) { - buf = driver->buf_in_sdio_1; - write_ptr_modem = driver->write_ptr_mdm_1; - in_busy_ptr = &(driver->in_busy_sdio_1); - } else if (!driver->in_busy_sdio_2) { - buf = driver->buf_in_sdio_2; - write_ptr_modem = driver->write_ptr_mdm_2; - in_busy_ptr = &(driver->in_busy_sdio_2); - } - - if (driver->sdio_ch && buf) { + if (driver->sdio_ch && (!driver->in_busy_sdio)) { r = sdio_read_avail(driver->sdio_ch); if (r > IN_BUF_SIZE) { if (r < MAX_IN_BUF_SIZE) { pr_err("diag: SDIO sending" - " in packets more than %d bytes", r); + " packets more than %d bytes\n", r); buf = krealloc(buf, r, GFP_KERNEL); } else { pr_err("diag: SDIO sending" - " in packets more than %d bytes", MAX_IN_BUF_SIZE); + " in packets more than %d bytes\n", MAX_IN_BUF_SIZE); return; } } @@ -65,60 +52,20 @@ void __diag_sdio_send_req(void) if (!buf) printk(KERN_INFO "Out of diagmem for SDIO\n"); else { -drop: APPEND_DEBUG('i'); sdio_read(driver->sdio_ch, buf, r); - if ((driver->qxdm2sd_drop) && (driver->logging_mode == USB_MODE)) { - /*Drop the diag payload */ - DIAG_INFO("%s:Drop the diag payload :%d\n", __func__, retry); - print_hex_dump(KERN_DEBUG, "Drop Packet Data" - " from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1); - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; - r = sdio_read_avail(driver->sdio_ch); - if (++retry > 20) { - driver->qxdm2sd_drop = 0; - return; - } - if (r) - goto drop; - else { - driver->qxdm2sd_drop = 0; - return; - } - } - APPEND_DEBUG('j'); - - if (diag9k_debug_mask) { - switch (diag9k_debug_mask) { - case DIAGLOG_MODE_HEAD: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1); - break; - case DIAGLOG_MODE_FULL: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1); - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K(last 16 bytes) ", 16, 1, DUMP_PREFIX_ADDRESS, buf+r-16, 16, 1); - break; - default: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K ", DUMP_PREFIX_ADDRESS, 16, 1, buf, r, 1); - - } - } - - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 1); + if (((!driver->usb_connected) && (driver-> + logging_mode == USB_MODE)) || (driver-> + logging_mode == NO_LOGGING_MODE)) { + /* Drop the diag payload */ + driver->in_busy_sdio = 0; return; } - - write_ptr_modem->length = r; - *in_busy_ptr = 1; + APPEND_DEBUG('j'); + driver->write_ptr_mdm->length = r; + driver->in_busy_sdio = 1; diag_device_write(buf, SDIO_DATA, - write_ptr_modem); - + driver->write_ptr_mdm); } } } @@ -129,6 +76,31 @@ static void diag_read_sdio_work_fn(struct work_struct *work) __diag_sdio_send_req(); } +static void diag_sdio_notify(void *ctxt, unsigned event) +{ + if (event == SDIO_EVENT_DATA_READ_AVAIL) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); + + if (event == SDIO_EVENT_DATA_WRITE_AVAIL) + wake_up_interruptible(&driver->wait_q); +} + +static int diag_sdio_close(void) +{ + queue_work(driver->diag_sdio_wq, &(driver->diag_close_sdio_work)); + return 0; +} + +static void diag_close_sdio_work_fn(struct work_struct *work) +{ + pr_debug("diag: sdio close called\n"); + if (sdio_close(driver->sdio_ch)) + pr_err("diag: could not close SDIO channel\n"); + else + driver->sdio_ch = NULL; /* channel successfully closed */ +} + int diagfwd_connect_sdio(void) { int err; @@ -136,10 +108,20 @@ int diagfwd_connect_sdio(void) err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE, N_MDM_READ); if (err) - printk(KERN_ERR "diag: unable to alloc USB req on mdm ch"); + pr_err("diag: unable to alloc USB req on mdm ch\n"); + + driver->in_busy_sdio = 0; + if (!driver->sdio_ch) { + err = sdio_open("SDIO_DIAG", &driver->sdio_ch, driver, + diag_sdio_notify); + if (err) + pr_info("diag: could not open SDIO channel\n"); + else + pr_info("diag: opened SDIO channel\n"); + } else { + pr_info("diag: SDIO channel already open\n"); + } - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; /* Poll USB channel to check for data*/ queue_work(driver->diag_sdio_wq, &(driver->diag_read_mdm_work)); /* Poll SDIO channel to check for data*/ @@ -149,18 +131,17 @@ int diagfwd_connect_sdio(void) int diagfwd_disconnect_sdio(void) { - /* driver->in_busy_sdio = 1; */ - /* Clear variable to Flush remaining data from SDIO channel */ - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; usb_diag_free_req(driver->mdm_ch); + if (driver->sdio_ch && (driver->logging_mode == USB_MODE)) { + driver->in_busy_sdio = 1; + diag_sdio_close(); + } return 0; } int diagfwd_write_complete_sdio(void) { - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; + driver->in_busy_sdio = 0; APPEND_DEBUG('q'); queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); return 0; @@ -174,12 +155,14 @@ int diagfwd_read_complete_sdio(void) void diag_read_mdm_work_fn(struct work_struct *work) { - if (diag9k_debug_mask) - DIAG_INFO("%s \n", __func__); - if (driver->sdio_ch) { - wait_event_interruptible(driver->wait_q, (sdio_write_avail - (driver->sdio_ch) >= driver->read_len_mdm)); + wait_event_interruptible(driver->wait_q, ((sdio_write_avail + (driver->sdio_ch) >= driver->read_len_mdm) || + !(driver->sdio_ch))); + if (!(driver->sdio_ch)) { + pr_alert("diag: sdio channel not valid"); + return; + } if (driver->sdio_ch && driver->usb_buf_mdm_out && (driver->read_len_mdm > 0)) sdio_write(driver->sdio_ch, driver->usb_buf_mdm_out, @@ -192,21 +175,9 @@ void diag_read_mdm_work_fn(struct work_struct *work) } } -static void diag_sdio_notify(void *ctxt, unsigned event) -{ - if (event == SDIO_EVENT_DATA_READ_AVAIL) - queue_work(driver->diag_sdio_wq, - &(driver->diag_read_sdio_work)); - - if (event == SDIO_EVENT_DATA_WRITE_AVAIL) - wake_up_interruptible(&driver->wait_q); -} - static int diag_sdio_probe(struct platform_device *pdev) { int err; - if (diag9k_debug_mask) - DIAG_INFO("%s\n", __func__); err = sdio_open("SDIO_DIAG", &driver->sdio_ch, driver, diag_sdio_notify); @@ -217,28 +188,15 @@ static int diag_sdio_probe(struct platform_device *pdev) queue_work(driver->diag_sdio_wq, &(driver->diag_read_mdm_work)); } - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; - driver->qxdm2sd_drop = 0; - sdio_diag_initialized = 1; - return err; } static int diag_sdio_remove(struct platform_device *pdev) -{ - queue_work(driver->diag_sdio_wq, &(driver->diag_remove_sdio_work)); - return 0; -} - -static void diag_remove_sdio_work_fn(struct work_struct *work) { pr_debug("\n diag: sdio remove called"); - /*Disable SDIO channel to prevent further read/write */ + /* Disable SDIO channel to prevent further read/write */ driver->sdio_ch = NULL; - sdio_diag_initialized = 0; - driver->in_busy_sdio_1 = 1; - driver->in_busy_sdio_2 = 1; + return 0; } static int diagfwd_sdio_runtime_suspend(struct device *dev) @@ -272,31 +230,19 @@ void diagfwd_sdio_init(void) { int ret; - if (diag9k_debug_mask) - DIAG_INFO("%s\n", __func__); - driver->read_len_mdm = 0; - if (driver->buf_in_sdio_1 == NULL) - driver->buf_in_sdio_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); - if (driver->buf_in_sdio_1 == NULL) - goto err; - if (driver->buf_in_sdio_2 == NULL) - driver->buf_in_sdio_2 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); - if (driver->buf_in_sdio_2 == NULL) + if (driver->buf_in_sdio == NULL) + driver->buf_in_sdio = kzalloc(IN_BUF_SIZE, GFP_KERNEL); + if (driver->buf_in_sdio == NULL) goto err; if (driver->usb_buf_mdm_out == NULL) driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); if (driver->usb_buf_mdm_out == NULL) goto err; - if (driver->write_ptr_mdm_1 == NULL) - driver->write_ptr_mdm_1 = kzalloc( - sizeof(struct diag_request), GFP_KERNEL); - if (driver->write_ptr_mdm_1 == NULL) - goto err; - if (driver->write_ptr_mdm_2 == NULL) - driver->write_ptr_mdm_2 = kzalloc( + if (driver->write_ptr_mdm == NULL) + driver->write_ptr_mdm = kzalloc( sizeof(struct diag_request), GFP_KERNEL); - if (driver->write_ptr_mdm_2 == NULL) + if (driver->write_ptr_mdm == NULL) goto err; if (driver->usb_read_mdm_ptr == NULL) driver->usb_read_mdm_ptr = kzalloc( @@ -314,7 +260,7 @@ void diagfwd_sdio_init(void) INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn); #endif INIT_WORK(&(driver->diag_read_sdio_work), diag_read_sdio_work_fn); - INIT_WORK(&(driver->diag_remove_sdio_work), diag_remove_sdio_work_fn); + INIT_WORK(&(driver->diag_close_sdio_work), diag_close_sdio_work_fn); ret = platform_driver_register(&msm_sdio_ch_driver); if (ret) printk(KERN_INFO "DIAG could not register SDIO device"); @@ -324,11 +270,9 @@ void diagfwd_sdio_init(void) return; err: printk(KERN_INFO "\n Could not initialize diag buf for SDIO"); - kfree(driver->buf_in_sdio_1); - kfree(driver->buf_in_sdio_2); + kfree(driver->buf_in_sdio); kfree(driver->usb_buf_mdm_out); - kfree(driver->write_ptr_mdm_1); - kfree(driver->write_ptr_mdm_2); + kfree(driver->write_ptr_mdm); kfree(driver->usb_read_mdm_ptr); if (driver->diag_sdio_wq) destroy_workqueue(driver->diag_sdio_wq); @@ -344,11 +288,9 @@ void diagfwd_sdio_exit(void) #ifdef CONFIG_DIAG_OVER_USB usb_diag_close(driver->mdm_ch); #endif - kfree(driver->buf_in_sdio_1); - kfree(driver->buf_in_sdio_2); + kfree(driver->buf_in_sdio); kfree(driver->usb_buf_mdm_out); - kfree(driver->write_ptr_mdm_1); - kfree(driver->write_ptr_mdm_2); + kfree(driver->write_ptr_mdm); kfree(driver->usb_read_mdm_ptr); destroy_workqueue(driver->diag_sdio_wq); } diff --git a/drivers/char/diag/diagfwd_sdio.h b/drivers/char/diag/diagfwd_sdio.h index 57a35d29..40982c33 100644 --- a/drivers/char/diag/diagfwd_sdio.h +++ b/drivers/char/diag/diagfwd_sdio.h @@ -14,7 +14,7 @@ #define DIAGFWD_SDIO_H #include -#define N_MDM_WRITE 2 /* Upgrade to 2 with ping pong buffer */ +#define N_MDM_WRITE 1 /* Upgrade to 2 with ping pong buffer */ #define N_MDM_READ 1 void diagfwd_sdio_init(void); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 5bee20b3..aa1fa639 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -627,11 +627,11 @@ config USB_GADGET_DUMMY_HCD side is the master; the gadget side is the slave. Gadget drivers can be high, full, or low speed; and they have access to endpoints like those from NET2280, PXA2xx, or SA1100 hardware. - + This may help in some stages of creating a driver to embed in a Linux device, since it lets you debug several parts of the gadget driver without its hardware or drivers being involved. - + Since such a gadget side driver needs to interoperate with a host side Linux-USB device driver, this may help to debug both sides of a USB protocol stack. @@ -737,7 +737,7 @@ config USB_ETH help This driver implements Ethernet style communication, in one of several ways: - + - The "Communication Device Class" (CDC) Ethernet Control Model. That protocol is often avoided with pure Ethernet adapters, in favor of simpler vendor-specific hardware, but is widely @@ -777,7 +777,7 @@ config USB_ETH_RNDIS If you say "y" here, the Ethernet gadget driver will try to provide a second device configuration, supporting RNDIS to talk to such Microsoft USB hosts. - + To make MS-Windows work with this, use Documentation/usb/linux.inf as the "driver info file". For versions of MS-Windows older than XP, you'll need to download drivers from Microsoft's website; a URL @@ -962,241 +962,6 @@ config USB_G_ANDROID The functions can be configured via a board file and may be enabled and disabled dynamically. -config USB_ANDROID_ACM - boolean "Android gadget ACM serial function" - depends on USB_G_ANDROID - help - Provides ACM serial function for android gadget driver. - -config PASCAL_DETECT - boolean "PASCAL DETECT" - depends on USB_ANDROID_ACM - default n - help - Provides ACM serial function for pascal mode. - -config LISMO - boolean "LISMO" - depends on PASCAL_DETECT - default n - help - Provides LISMO service. - -config USB_ANDROID_ADB - boolean "Android gadget adb function" - depends on USB_G_ANDROID - default y - help - Provides adb function for android gadget driver. - -config USB_ANDROID_DIAG - boolean "USB MSM7K Diag Function" - depends on USB_G_ANDROID - default y - help - Qualcomm diagnostics interface support. - -config USB_ANDROID_MDM9K_DIAG - boolean "USB MDM 9k Diag Function" - depends on USB_ANDROID_DIAG - help - Qualcomm diagnostics interface support for 9K. - -config USB_ANDROID_MDM9K_MODEM - boolean "USB MDM 9k Modem Function" - depends on USB_G_ANDROID - help - Qualcomm Modem interface support for 9K. - -config USB_ANDROID_MASS_STORAGE - boolean "Android gadget mass storage function" - depends on USB_G_ANDROID && SWITCH - default y - help - Provides USB mass storage function for android gadget driver. - -config USB_ANDROID_MTP - boolean "Android MTP function" - depends on USB_G_ANDROID - help - Provides Media Transfer Protocol (MTP) support for android gadget driver. - -config USB_ANDROID_RNDIS - boolean "Android gadget RNDIS ethernet function" - depends on USB_G_ANDROID - default n - help - Provides RNDIS ethernet function for android gadget driver. - -config USB_ANDROID_RMNET - boolean "RmNet function driver" - depends on USB_G_ANDROID - default n - help - Enabling this option adds rmnet support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and modem found in MSM chipsets. - -config RMNET_SMD_CTL_CHANNEL - string "control SMD channel name" - depends on USB_ANDROID_RMNET - default "" - help - Control SMD channel for transferring QMI messages - -config RMNET_SMD_DATA_CHANNEL - string "Data SMD channel name" - depends on USB_ANDROID_RMNET - default "" - help - Data SMD channel for transferring network data - -config USB_ANDROID_RMNET_SDIO - boolean "RmNet over SDIO function driver" - depends on USB_G_ANDROID && MSM_SDIO_CMUX && MSM_SDIO_DMUX - default n - help - Enabling this option adds rmnet over sdio support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and modem found in MSM chipsets. - -config RMNET_SDIO_CTL_CHANNEL - int "control SDIO channel id" - depends on USB_ANDROID_RMNET_SDIO - help - Control SDIO channel for transferring QMI messages - -config RMNET_SDIO_DATA_CHANNEL - int "Data SDIO channel id" - depends on USB_ANDROID_RMNET_SDIO - help - Data SDIO channel for transferring network data - -config USB_ANDROID_RMNET_SMD_SDIO - boolean "RmNet over SMD/SDIO function driver" - depends on USB_G_ANDROID && MSM_SDIO_CMUX && MSM_SDIO_DMUX - default n - help - Enabling this option adds rmnet over sdio support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and modem found in MSM chipsets. - -config RMNET_SMD_SDIO_CTL_CHANNEL - int "control SDIO channel id" - depends on USB_ANDROID_RMNET_SMD_SDIO - default 8 - help - Control SDIO channel for transferring QMI messages - -config RMNET_SMD_SDIO_DATA_CHANNEL - int "Data SDIO channel id" - default 8 - depends on USB_ANDROID_RMNET_SMD_SDIO - help - Data SDIO channel for transferring network data - -config RMNET_SDIO_SMD_DATA_CHANNEL - string "Data SMD channel name" - depends on USB_ANDROID_RMNET_SMD_SDIO - default "DATA40" - help - Data SMD channel for transferring network data - -config USB_ANDROID_RMNET_BAM - boolean "RmNet over BAM driver" - depends on USB_G_ANDROID && MSM_BAM_DMUX - help - Enabling this option adds rmnet over BAM support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and Modem processor using bam-dmux interface. - This option enables only DATA interface. Control - interface has to be enabled separately - -config USB_ANDROID_RMNET_CTRL_SMD - boolean "RmNet control over SMD driver" - depends on USB_G_ANDROID && MSM_SMD - help - Enabling this option adds rmnet control over SMD - support to the android gadget. Rmnet is an - alternative to CDC-ECM and Windows RNDIS. - It uses QUALCOMM MSM Interface for control - transfers. This option enables only control interface. - Data interface has to be enabled separately - -config USB_F_SERIAL - boolean "generic serial function driver" - depends on USB_G_ANDROID - default n - help - Say "y" to link the driver statically, or "m" to build - as a part of "g_android" - -config MODEM_SUPPORT - boolean "modem support in generic serial function driver" - depends on USB_F_SERIAL - default y - help - This feature enables the modem functionality in the - generic serial. - adds interrupt endpoint support to send modem notifications - to host. - adds CDC descriptors to enumerate the generic serial as MODEM. - adds CDC class requests to configure MODEM line settings. - Say "y" to enable MODEM support in the generic serial driver. - -config USB_ANDROID_SERIAL - boolean "Android gadget Serial function" - depends on USB_G_ANDROID - help - -config USB_ANDROID_PROJECTOR - boolean "Android gadget Projector function" - depends on USB_G_ANDROID - help - -config USB_ANDROID_ECM - boolean "Android gadget ECM function" - depends on USB_G_ANDROID - help - -config USB_F_SERIAL_SDIO - boolean "generic serial function driver over SDIO" - depends on USB_G_ANDROID && MSM_SDIO_CMUX && MSM_SDIO_AL - default n - help - Gadget serial driver is one way to communicate with modem - device. This option enables serial driver to communicate - to modem device using sdio. It makes very minimal changes - to Gadget usb serial interface(f_serial.c) and adds - separate transport layer u_sdio(very similar to u_serial.c) - Say "y" to link the driver staticall - -config USB_F_SERIAL_SMD - boolean "generic serial function driver over SMD" - depends on USB_G_ANDROID && MSM_SMD - default n - help - Gadget serial driver is one way to communicate with modem - device. This option enables serial driver to communicate - to modem device using smd. It makes very minimal changes - to Gadget usb serial interface(f_serial.c) and adds - separate transport layer u_smd(very similar to u_serial.c) - Say "y" to link the driver statically - -config USB_ANDROID_USBNET - boolean "Android gadget USBNET function for VoIP" - depends on USB_G_ANDROID - default n - help - config USB_CDC_COMPOSITE tristate "CDC Composite Device (Ethernet and ACM)" depends on NET @@ -1318,10 +1083,6 @@ config USB_G_WEBCAM endchoice -config USB_ACCESSORY_DETECT_BY_ADC - boolean "DETECT USB ACCESSORY BY PMIC ADC" - default n - config USB_CSW_HACK boolean "USB Mass storage csw hack Feature" default y @@ -1402,9 +1163,4 @@ config USB_ANDROID_RMNET_CTRL_SMD transfers. This option enables only control interface. Data interface used is BAM. -config USB_GADGET_VERIZON_PRODUCT_ID - boolean "Verizon Product ID Mapping" - depends on USB_G_ANDROID - default n - endif # USB_GADGET diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c index c9db8320..403c3737 100644 --- a/drivers/usb/gadget/android.c +++ b/drivers/usb/gadget/android.c @@ -30,13 +30,9 @@ #include #include #include -#include +#include #include "gadget_chips.h" -#include -#ifdef CONFIG_PERFLOCK -#include -#endif /* * Kbuild is not very cooperative with respect to linking separately @@ -51,73 +47,28 @@ #include "composite.c" #include "f_diag.c" -#if defined(CONFIG_USB_ANDROID_RMNET_SMD) #include "f_rmnet_smd.c" -#elif defined(CONFIG_USB_ANDROID_RMNET_SDIO) #include "f_rmnet_sdio.c" -#elif defined(CONFIG_USB_ANDROID_RMNET_SMD_SDIO) #include "f_rmnet_smd_sdio.c" -#elif defined(CONFIG_USB_ANDROID_RMNET_BAM) #include "f_rmnet.c" -#endif #include "f_mass_storage.c" #include "u_serial.c" #include "u_sdio.c" #include "u_smd.c" #include "u_bam.c" #include "u_rmnet_ctrl_smd.c" +#include "u_ctrl_hsic.c" +#include "u_data_hsic.c" #include "f_serial.c" -#ifdef CONFIG_USB_ANDROID_ACM #include "f_acm.c" -#endif #include "f_adb.c" -#if 0 #include "f_ccid.c" -#endif -#ifdef CONFIG_USB_ANDROID_MTP #include "f_mtp.c" -#endif #include "f_accessory.c" #define USB_ETH_RNDIS y -#ifdef CONFIG_USB_ANDROID_RNDIS #include "f_rndis.c" #include "rndis.c" -#endif -#ifdef CONFIG_USB_ANDROID_ECM -#include "f_ecm.c" -#endif #include "u_ether.c" -#ifdef CONFIG_USB_ANDROID_PROJECTOR -#include "f_projector.c" -#endif -#ifdef CONFIG_USB_ANDROID_USBNET -#include "f_usbnet.c" -#endif -#include - -#ifdef pr_debug -#undef pr_debug -#endif -#define pr_debug(fmt, args...) \ - printk(KERN_DEBUG "[USB] " pr_fmt(fmt), ## args) - -#ifdef pr_err -#undef pr_err -#endif -#define pr_err(fmt, args...) \ - printk(KERN_ERR "[USB] " pr_fmt(fmt), ## args) - -#ifdef pr_warning -#undef pr_warning -#endif -#define pr_warning(fmt, args...) \ - printk(KERN_WARNING "[USB] " pr_fmt(fmt), ## args) - -#ifdef pr_info -#undef pr_info -#endif -#define pr_info(fmt, args...) \ - printk(KERN_INFO "[USB] " pr_fmt(fmt), ## args) MODULE_AUTHOR("Mike Lockwood"); MODULE_DESCRIPTION("Android Composite USB Driver"); @@ -130,8 +81,6 @@ static const char longname[] = "Gadget Android"; #define VENDOR_ID 0x18D1 #define PRODUCT_ID 0x0001 -static bool connect2pc; - struct android_usb_function { char *name; void *config; @@ -152,14 +101,10 @@ struct android_usb_function { /* Optional: called when the configuration is removed */ void (*unbind_config)(struct android_usb_function *, struct usb_configuration *); - /* Optional: handle ctrl requests before the device is configured - * and/or before the function is enabled */ + /* Optional: handle ctrl requests before the device is configured */ int (*ctrlrequest)(struct android_usb_function *, struct usb_composite_dev *, const struct usb_ctrlrequest *); - - /* for performance requirement */ - int performance_lock; }; struct android_dev { @@ -173,34 +118,13 @@ struct android_dev { bool connected; bool sw_connected; struct work_struct work; - struct delayed_work init_work; - /* waiting for enabling functions */ - struct list_head function_list; - - int num_products; - struct android_usb_product *products; - int num_functions; - char **in_house_functions; - - int product_id; - void (*enable_fast_charge)(bool enable); - bool RndisDisableMPDecision; - int (*match)(int product_id, int intrsharing); }; static struct class *android_class; static struct android_dev *_android_dev; - -static struct wake_lock android_usb_idle_wake_lock; -#ifdef CONFIG_PERFLOCK -static struct perf_lock android_usb_perf_lock; -#endif - - static int android_bind_config(struct usb_configuration *c); static void android_unbind_config(struct usb_configuration *c); - /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 @@ -247,6 +171,12 @@ static struct usb_configuration android_config_driver = { .bMaxPower = 0xFA, /* 500ma */ }; +enum android_device_state { + USB_DISCONNECTED, + USB_CONNECTED, + USB_CONFIGURED, +}; + static void android_work(struct work_struct *data) { struct android_dev *dev = container_of(data, struct android_dev, work); @@ -254,119 +184,57 @@ static void android_work(struct work_struct *data) char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL }; char *connected[2] = { "USB_STATE=CONNECTED", NULL }; char *configured[2] = { "USB_STATE=CONFIGURED", NULL }; + char **uevent_envp = NULL; + static enum android_device_state last_uevent, next_state; unsigned long flags; - struct android_usb_function *f; - int count = 0; - - /* release performance related locks first */ - if (wake_lock_active(&android_usb_idle_wake_lock)) - wake_unlock(&android_usb_idle_wake_lock); -#ifdef CONFIG_PERFLOCK - if (is_perf_lock_active(&android_usb_perf_lock)) - perf_unlock(&android_usb_perf_lock); -#endif - spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) { - spin_unlock_irqrestore(&cdev->lock, flags); - kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, - configured); - pr_info("USB_STATE=CONFIGURED"); - - /* hold perflock, wakelock for performance consideration */ - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - if (f->performance_lock) { - pr_info("Performance lock for '%s'\n", f->name); - count++; - } - } - if (count) { - if (!wake_lock_active(&android_usb_idle_wake_lock)) - wake_lock(&android_usb_idle_wake_lock); -#ifdef CONFIG_PERFLOCK - if (!is_perf_lock_active(&android_usb_perf_lock)) - perf_lock(&android_usb_perf_lock); -#endif - } - - if (!connect2pc && dev->connected) { - connect2pc = true; - switch_set_state(&cdev->sw_connect2pc, 1); - pr_info("set usb_connect2pc = 1\n"); - } - return; + uevent_envp = configured; + next_state = USB_CONFIGURED; + } else if (dev->connected != dev->sw_connected) { + uevent_envp = dev->connected ? connected : disconnected; + next_state = dev->connected ? USB_CONNECTED : USB_DISCONNECTED; } + dev->sw_connected = dev->connected; + spin_unlock_irqrestore(&cdev->lock, flags); - if (dev->connected != dev->sw_connected) { - dev->sw_connected = dev->connected; - spin_unlock_irqrestore(&cdev->lock, flags); - kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, - dev->sw_connected ? connected : disconnected); + if (uevent_envp) { + /* + * Some userspace modules, e.g. MTP, work correctly only if + * CONFIGURED uevent is preceded by DISCONNECT uevent. + * Check if we missed sending out a DISCONNECT uevent. This can + * happen if host PC resets and configures device really quick. + */ + if (((uevent_envp == connected) && + (last_uevent != USB_DISCONNECTED)) || + ((uevent_envp == configured) && + (last_uevent == USB_CONFIGURED))) { + pr_info("%s: sent missed DISCONNECT event\n", __func__); + kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, + disconnected); + msleep(20); + } + /* + * Before sending out CONFIGURED uevent give function drivers + * a chance to wakeup userspace threads and notify disconnect + */ + if (uevent_envp == configured) + msleep(50); - pr_info("%s\n", dev->connected ? connected[0] : disconnected[0]); + kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp); + last_uevent = next_state; + pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]); } else { - spin_unlock_irqrestore(&cdev->lock, flags); - } - - if (connect2pc && !dev->connected) { - connect2pc = false; - switch_set_state(&cdev->sw_connect2pc, 0); - pr_info("set usb_connect2pc = 0\n"); + pr_info("%s: did not send uevent (%d %d %p)\n", __func__, + dev->connected, dev->sw_connected, cdev->config); } } /*-------------------------------------------------------------------------*/ /* Supported functions initialization */ -static ssize_t func_en_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct android_usb_function *func = dev_get_drvdata(dev); - struct android_usb_function *f; - int ebl = 0; - - list_for_each_entry(f, &_android_dev->enabled_functions, enabled_list) { - if (!strcmp(func->name, f->name)) { - ebl = 1; - break; - } - } - return sprintf(buf, "%d", ebl); -} - -static ssize_t func_en_store( - struct device *dev, struct device_attribute *attr, - const char *buf, size_t size) -{ - struct android_usb_function *func = dev_get_drvdata(dev); - struct android_usb_function *f; - int ebl = 0; - int value; - - sscanf(buf, "%d", &value); - list_for_each_entry(f, &_android_dev->enabled_functions, enabled_list) { - if (!strcmp(func->name, f->name)) { - ebl = 1; - break; - } - } - if (!!value == ebl) { - pr_info("%s function is already %s\n", func->name - , ebl ? "enable" : "disable"); - return size; - } - if (value) - htc_usb_enable_function(func->name, 1); - else - htc_usb_enable_function(func->name, 0); - - return size; -} -static DEVICE_ATTR(on, S_IRUGO | S_IWUSR | S_IWGRP, func_en_show, func_en_store); - -#if defined(CONFIG_USB_ANDROID_RMNET_SMD) /* RMNET_SMD */ static int rmnet_smd_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) @@ -375,11 +243,9 @@ static int rmnet_smd_function_bind_config(struct android_usb_function *f, } static struct android_usb_function rmnet_smd_function = { - .name = "rmnet", + .name = "rmnet_smd", .bind_config = rmnet_smd_function_bind_config, - .performance_lock = 1, }; -#elif defined(CONFIG_USB_ANDROID_RMNET_SDIO) /* RMNET_SDIO */ static int rmnet_sdio_function_bind_config(struct android_usb_function *f, @@ -389,12 +255,10 @@ static int rmnet_sdio_function_bind_config(struct android_usb_function *f, } static struct android_usb_function rmnet_sdio_function = { - .name = "rmnet", + .name = "rmnet_sdio", .bind_config = rmnet_sdio_function_bind_config, - .performance_lock = 1, }; -#elif defined(CONFIG_USB_ANDROID_RMNET_SMD_SDIO) /* RMNET_SMD_SDIO */ static int rmnet_smd_sdio_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) @@ -417,22 +281,16 @@ static struct device_attribute *rmnet_smd_sdio_attributes[] = { &dev_attr_transport, NULL }; static struct android_usb_function rmnet_smd_sdio_function = { - .name = "rmnet", + .name = "rmnet_smd_sdio", .init = rmnet_smd_sdio_function_init, .cleanup = rmnet_smd_sdio_function_cleanup, .bind_config = rmnet_smd_sdio_bind_config, .attributes = rmnet_smd_sdio_attributes, - .performance_lock = 1, }; -#elif defined(CONFIG_USB_ANDROID_RMNET_BAM) -/* RMNET - used with BAM */ -#define MAX_RMNET_INSTANCES 1 -static int rmnet_instances = 1; -static int rmnet_function_init(struct android_usb_function *f, - struct usb_composite_dev *cdev) -{ - return frmnet_init_port(MAX_RMNET_INSTANCES); -} + +/*rmnet transport string format(per port):"ctrl0,data0,ctrl1,data1..." */ +#define MAX_XPORT_STR_LEN 50 +static char rmnet_transports[MAX_XPORT_STR_LEN]; static void rmnet_function_cleanup(struct android_usb_function *f) { @@ -443,52 +301,78 @@ static int rmnet_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) { int i; - int ret = 0; + int err = 0; + char *ctrl_name; + char *data_name; + char buf[MAX_XPORT_STR_LEN], *b; + static int rmnet_initialized, ports; + + if (!rmnet_initialized) { + rmnet_initialized = 1; + strlcpy(buf, rmnet_transports, sizeof(buf)); + b = strim(buf); + while (b) { + ctrl_name = strsep(&b, ","); + data_name = strsep(&b, ","); + if (ctrl_name && data_name) { + err = frmnet_init_port(ctrl_name, data_name); + if (err) { + pr_err("rmnet: Cannot open ctrl port:" + "'%s' data port:'%s'\n", + ctrl_name, data_name); + goto out; + } + ports++; + } + } - for (i = 0; i < rmnet_instances; i++) { - ret = frmnet_bind_config(c, i); - if (ret) { + err = rmnet_gport_setup(); + if (err) { + pr_err("rmnet: Cannot setup transports"); + goto out; + } + } + + for (i = 0; i < ports; i++) { + err = frmnet_bind_config(c, i); + if (err) { pr_err("Could not bind rmnet%u config\n", i); break; } } - - return ret; +out: + return err; } -static ssize_t rmnet_instances_show(struct device *dev, +static ssize_t rmnet_transports_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", rmnet_instances); + return snprintf(buf, PAGE_SIZE, "%s\n", rmnet_transports); } -static ssize_t rmnet_instances_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t rmnet_transports_store( + struct device *device, struct device_attribute *attr, + const char *buff, size_t size) { - int value; + strlcpy(rmnet_transports, buff, sizeof(rmnet_transports)); - pr_info("%s, buff: %s\n", __func__, buf); - sscanf(buf, "%d", &value); - if (value > MAX_RMNET_INSTANCES) - value = MAX_RMNET_INSTANCES; - rmnet_instances = value; return size; } -static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, rmnet_instances_show, - rmnet_instances_store); +static struct device_attribute dev_attr_rmnet_transports = + __ATTR(transports, S_IRUGO | S_IWUSR, + rmnet_transports_show, + rmnet_transports_store); static struct device_attribute *rmnet_function_attributes[] = { - &dev_attr_instances, NULL }; + &dev_attr_rmnet_transports, + NULL }; static struct android_usb_function rmnet_function = { .name = "rmnet", - .init = rmnet_function_init, .cleanup = rmnet_function_cleanup, .bind_config = rmnet_function_bind_config, .attributes = rmnet_function_attributes, - .performance_lock = 1, }; -#endif /* DIAG */ static char diag_clients[32]; /*enabled DIAG clients- "diag[,diag_mdm]" */ @@ -496,7 +380,6 @@ static ssize_t clients_store( struct device *device, struct device_attribute *attr, const char *buff, size_t size) { - pr_info("%s, buff: %s\n", __func__, buff); strlcpy(diag_clients, buff, sizeof(diag_clients)); return size; @@ -520,16 +403,6 @@ static void diag_function_cleanup(struct android_usb_function *f) static int diag_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) { -#if 1 - int err; - int (*notify)(uint32_t, const char *); - - notify = _android_dev->pdata->update_pid_and_serial_num; - - err = diag_function_add(c, "diag", notify); - if (err) - pr_err("diag: Cannot open channel 'diag'"); -#else char *name; char buf[32], *b; int once = 0, err = -1; @@ -551,7 +424,7 @@ static int diag_function_bind_config(struct android_usb_function *f, pr_err("diag: Cannot open channel '%s'", name); } } -#endif + return err; } @@ -563,149 +436,65 @@ static struct android_usb_function diag_function = { .attributes = diag_function_attributes, }; -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) -static int diag_mdm_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) +/* SERIAL */ +static char serial_transports[32]; /*enabled FSERIAL ports - "tty[,sdio]"*/ +static ssize_t serial_transports_store( + struct device *device, struct device_attribute *attr, + const char *buff, size_t size) { - int err; - int (*notify)(uint32_t, const char *); + strlcpy(serial_transports, buff, sizeof(serial_transports)); - notify = NULL; + return size; +} - err = diag_function_add(c, "diag_mdm", notify); - if (err) - pr_err("diag: Cannot open channel 'diag_mdm'"); +static DEVICE_ATTR(transports, S_IWUSR, NULL, serial_transports_store); +static struct device_attribute *serial_function_attributes[] = + { &dev_attr_transports, NULL }; - return 0; +static void serial_function_cleanup(struct android_usb_function *f) +{ + gserial_cleanup(); } -static struct android_usb_function diag_mdm_function = { - .name = "diag_mdm", - .bind_config = diag_mdm_function_bind_config, -}; -#endif - -/* Serial, Modem */ -static int serial_driver_initial(struct usb_configuration *c) +static int serial_function_bind_config(struct android_usb_function *f, + struct usb_configuration *c) { - char *name, *str[2]; - char buf[80], *b; - int err = -1; + char *name; + char buf[32], *b; + int err = -1, i; static int serial_initialized = 0, ports = 0; - char *init_string; - - if (serial_initialized) { - pr_info("%s: already initial\n", __func__); - return ports; - } - serial_initialized = 1; - init_string = _android_dev->pdata->fserial_init_string ? - _android_dev->pdata->fserial_init_string : - "smd:modem,tty,tty,tty:serial"; - - strncpy(buf, init_string, sizeof(buf)); - buf[79] = 0; - pr_info("%s: init string: %s\n", __func__, buf); + if (serial_initialized) + goto bind_config; + serial_initialized = 1; + strlcpy(buf, serial_transports, sizeof(buf)); b = strim(buf); while (b) { - str[0] = str[1] = 0; name = strsep(&b, ","); + if (name) { - str[0] = strsep(&name, ":"); - if (str[0]) - str[1] = strsep(&name, ":"); - } - err = gserial_init_port(ports, str[0], str[1]); - if (err) { - pr_err("serial: Cannot open port '%s'\n", str[0]); - goto out; + err = gserial_init_port(ports, name); + if (err) { + pr_err("serial: Cannot open port '%s'", name); + goto out; + } + ports++; } - ports++; } - err = gport_setup(c); if (err) { pr_err("serial: Cannot setup transports"); goto out; } - return ports; - -out: - return err; -} - -/* Modem */ -static void modem_function_cleanup(struct android_usb_function *f) -{ - struct android_dev *dev = _android_dev; - - /* ToDo: need to cleanup by different channel */ - gsmd_cleanup(dev->cdev->gadget, 1); -} - -static int modem_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - int err = -1; - int i, ports; - - ports = serial_driver_initial(c); - if (ports < 0) - goto out; - - - for (i = 0; i < ports; i++) { - if (gserial_ports[i].func_type == USB_FSER_FUNC_MODEM) { - err = gser_bind_config(c, i); - if (err) { - pr_err("serial: bind_config failed for port %d", i); - goto out; - } - } - } - -out: - return err; -} - -static struct android_usb_function modem_function = { - .name = "modem", - .cleanup = modem_function_cleanup, - .bind_config = modem_function_bind_config, - .performance_lock = 1, -}; - -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM -/* Modem_Mdm */ -static void modem_mdm_function_cleanup(struct android_usb_function *f) -{ - struct android_dev *dev = _android_dev; - - /* ToDo: need to cleanup by different channel */ - gsmd_cleanup(dev->cdev->gadget, 1); -} - -static int modem_mdm_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - int err = -1; - int i, ports; - ports = serial_driver_initial(c); - if (ports < 0) - goto out; - - - for (i = 0; i < ports; i++) { - if (gserial_ports[i].func_type == USB_FSER_FUNC_MODEM_MDM) { - err = gser_bind_config(c, i); - if (err) { - pr_err("serial: bind_config failed for port %d", i); - goto out; - } +bind_config: + for (i = 0; i < ports; i++) { + err = gser_bind_config(c, i); + if (err) { + pr_err("serial: bind_config failed for port %d", i); + goto out; } } @@ -713,103 +502,83 @@ static int modem_mdm_function_bind_config(struct android_usb_function *f, return err; } -static struct android_usb_function modem_mdm_function = { - .name = "modem_mdm", - .cleanup = modem_mdm_function_cleanup, - .bind_config = modem_mdm_function_bind_config, - .performance_lock = 1, +static struct android_usb_function serial_function = { + .name = "serial", + .cleanup = serial_function_cleanup, + .bind_config = serial_function_bind_config, + .attributes = serial_function_attributes, }; -#endif -/* SERIAL */ -static char serial_transports[32]; /*enabled FSERIAL ports - "tty[,sdio]"*/ -static ssize_t serial_transports_store( +/* ACM */ +static char acm_transports[32]; /*enabled ACM ports - "tty[,sdio]"*/ +static ssize_t acm_transports_store( struct device *device, struct device_attribute *attr, const char *buff, size_t size) { - pr_info("%s: %s\n", __func__, buff); - strlcpy(serial_transports, buff, sizeof(serial_transports)); + strlcpy(acm_transports, buff, sizeof(acm_transports)); return size; } -static DEVICE_ATTR(transports, S_IWUSR, NULL, serial_transports_store); -static struct device_attribute *serial_function_attributes[] = - { &dev_attr_transports, NULL }; +static DEVICE_ATTR(acm_transports, S_IWUSR, NULL, acm_transports_store); +static struct device_attribute *acm_function_attributes[] = { + &dev_attr_acm_transports, NULL }; -static void serial_function_cleanup(struct android_usb_function *f) +static void acm_function_cleanup(struct android_usb_function *f) { gserial_cleanup(); } -static int serial_function_bind_config(struct android_usb_function *f, +static int acm_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) { -#if 1 - int err = -1; - int i, ports; - - ports = serial_driver_initial(c); - if (ports < 0) - goto out; - for (i = 0; i < ports; i++) { - if (gserial_ports[i].func_type == USB_FSER_FUNC_SERIAL) { - err = gser_bind_config(c, i); - if (err) { - pr_err("serial: bind_config failed for port %d", i); - goto out; - } - } - } -#else char *name; char buf[32], *b; int err = -1, i; - static int serial_initialized = 0, ports = 0; + static int acm_initialized, ports; - if (serial_initialized) + if (acm_initialized) goto bind_config; - serial_initialized = 1; - strlcpy(buf, serial_transports, sizeof(buf)); + acm_initialized = 1; + strlcpy(buf, acm_transports, sizeof(buf)); b = strim(buf); while (b) { name = strsep(&b, ","); + if (name) { - err = gserial_init_port(ports, name); + err = acm_init_port(ports, name); if (err) { - pr_err("serial: Cannot open port '%s'", name); + pr_err("acm: Cannot open port '%s'", name); goto out; } ports++; } } - err = gport_setup(c); + err = acm_port_setup(c); if (err) { - pr_err("serial: Cannot setup transports"); + pr_err("acm: Cannot setup transports"); goto out; } bind_config: for (i = 0; i < ports; i++) { - err = gser_bind_config(c, i); + err = acm_bind_config(c, i); if (err) { - pr_err("serial: bind_config failed for port %d", i); + pr_err("acm: bind_config failed for port %d", i); goto out; } } -#endif out: return err; } - -static struct android_usb_function serial_function = { - .name = "serial", - .cleanup = serial_function_cleanup, - .bind_config = serial_function_bind_config, - .attributes = serial_function_attributes, +static struct android_usb_function acm_function = { + .name = "acm", + .cleanup = acm_function_cleanup, + .bind_config = acm_function_bind_config, + .attributes = acm_function_attributes, }; /* ADB */ @@ -836,7 +605,6 @@ static struct android_usb_function adb_function = { }; /* CCID */ -#if 0 static int ccid_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) { @@ -860,85 +628,7 @@ static struct android_usb_function ccid_function = { .cleanup = ccid_function_cleanup, .bind_config = ccid_function_bind_config, }; -#endif - -#ifdef CONFIG_USB_ANDROID_ACM -#define MAX_ACM_INSTANCES 4 -struct acm_function_config { - int instances; -}; - -static int acm_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) -{ - struct acm_function_config *config; - f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL); - if (!f->config) - return -ENOMEM; - - config = f->config; - config->instances = 1; - return gserial_setup(cdev->gadget, MAX_ACM_INSTANCES); -} - -static void acm_function_cleanup(struct android_usb_function *f) -{ - gserial_cleanup(); - kfree(f->config); - f->config = NULL; -} - -static int acm_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) -{ - int i; - int ret = 0; - struct acm_function_config *config = f->config; - - for (i = 0; i < config->instances; i++) { - ret = acm_bind_config(c, i); - if (ret) { - pr_err("Could not bind acm%u config\n", i); - break; - } - } - - return ret; -} - -static ssize_t acm_instances_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct acm_function_config *config = f->config; - return sprintf(buf, "%d\n", config->instances); -} - -static ssize_t acm_instances_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct acm_function_config *config = f->config; - int value; - - sscanf(buf, "%d", &value); - if (value > MAX_ACM_INSTANCES) - value = MAX_ACM_INSTANCES; - config->instances = value; - return size; -} - -static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show, acm_instances_store); -static struct device_attribute *acm_function_attributes[] = { &dev_attr_instances, NULL }; - -static struct android_usb_function acm_function = { - .name = "acm", - .init = acm_function_init, - .cleanup = acm_function_cleanup, - .bind_config = acm_function_bind_config, - .attributes = acm_function_attributes, -}; -#endif -#ifdef CONFIG_USB_ANDROID_MTP static int mtp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) { return mtp_setup(); @@ -992,108 +682,8 @@ static struct android_usb_function ptp_function = { .cleanup = ptp_function_cleanup, .bind_config = ptp_function_bind_config, }; -#endif - -#ifdef CONFIG_USB_ANDROID_ECM -/* ECM */ -struct ecm_function_config { - u8 ethaddr[ETH_ALEN]; -}; - -static int ecm_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) -{ - struct ecm_function_config *ecm; - f->config = kzalloc(sizeof(struct ecm_function_config), GFP_KERNEL); - if (!f->config) - return -ENOMEM; - - ecm = f->config; - return 0; -} - -static void ecm_function_cleanup(struct android_usb_function *f) -{ - kfree(f->config); - f->config = NULL; -} - -static int ecm_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - int ret; - struct ecm_function_config *ecm = f->config; - - if (!ecm) { - pr_err("%s: ecm_pdata\n", __func__); - return -1; - } - - - pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__, - ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2], - ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]); - - ret = gether_setup_name(c->cdev->gadget, ecm->ethaddr, "usb"); - if (ret) { - pr_err("%s: gether_setup failed\n", __func__); - return ret; - } - - - return ecm_bind_config(c, ecm->ethaddr); -} - -static void ecm_function_unbind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - gether_cleanup(); -} - -static ssize_t ecm_ethaddr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct ecm_function_config *ecm = f->config; - return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n", - ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2], - ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]); -} - -static ssize_t ecm_ethaddr_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct ecm_function_config *ecm = f->config; - - if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n", - (int *)&ecm->ethaddr[0], (int *)&ecm->ethaddr[1], - (int *)&ecm->ethaddr[2], (int *)&ecm->ethaddr[3], - (int *)&ecm->ethaddr[4], (int *)&ecm->ethaddr[5]) == 6) - return size; - return -EINVAL; -} - -static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show, - ecm_ethaddr_store); - -static struct device_attribute *ecm_function_attributes[] = { - &dev_attr_ecm_ethaddr, - NULL -}; -static struct android_usb_function ecm_function = { - .name = "cdc_ethernet", - .init = ecm_function_init, - .cleanup = ecm_function_cleanup, - .bind_config = ecm_function_bind_config, - .unbind_config = ecm_function_unbind_config, - .attributes = ecm_function_attributes, - .performance_lock = 1, -}; -#endif -#ifdef CONFIG_USB_ANDROID_RNDIS -/* RNDIS */ struct rndis_function_config { u8 ethaddr[ETH_ALEN]; u32 vendorID; @@ -1103,18 +693,9 @@ struct rndis_function_config { static int rndis_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) { - struct rndis_function_config *rndis; - struct android_dev *dev = _android_dev; - f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL); if (!f->config) return -ENOMEM; - - rndis = f->config; - - strncpy(rndis->manufacturer, dev->pdata->manufacturer_name, sizeof(rndis->manufacturer)); - rndis->vendorID = dev->pdata->vendor_id; - return 0; } @@ -1139,7 +720,7 @@ static int rndis_function_bind_config(struct android_usb_function *f, rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2], rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]); - ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "usb"); + ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis"); if (ret) { pr_err("%s: gether_setup failed\n", __func__); return ret; @@ -1283,9 +864,8 @@ static struct android_usb_function rndis_function = { .bind_config = rndis_function_bind_config, .unbind_config = rndis_function_unbind_config, .attributes = rndis_function_attributes, - .performance_lock = 1, }; -#endif + struct mass_storage_function_config { struct fsg_config fsg; @@ -1298,38 +878,14 @@ static int mass_storage_function_init(struct android_usb_function *f, struct mass_storage_function_config *config; struct fsg_common *common; int err; - struct android_dev *dev = _android_dev; - int i; config = kzalloc(sizeof(struct mass_storage_function_config), GFP_KERNEL); if (!config) return -ENOMEM; - - if (dev->pdata->nluns) { - config->fsg.nluns = dev->pdata->nluns; - if (config->fsg.nluns > FSG_MAX_LUNS) - config->fsg.nluns = FSG_MAX_LUNS; - for (i = 0; i < config->fsg.nluns; i++) { - if (dev->pdata->cdrom_lun & (1 << i)) { - config->fsg.luns[i].cdrom = 1; - config->fsg.luns[i].removable = 1; - config->fsg.luns[i].ro = 1; - } else { - config->fsg.luns[i].cdrom = 0; - config->fsg.luns[i].removable = 1; - config->fsg.luns[i].ro = 0; - } - } - } else { - /* default value */ - config->fsg.nluns = 1; - config->fsg.luns[0].removable = 1; - } - - config->fsg.vendor_name = dev->pdata->manufacturer_name; - config->fsg.product_name= dev->pdata->product_name; + config->fsg.nluns = 1; + config->fsg.luns[0].removable = 1; common = fsg_common_init(NULL, cdev, &config->fsg); if (IS_ERR(common)) { @@ -1337,15 +893,13 @@ static int mass_storage_function_init(struct android_usb_function *f, return PTR_ERR(common); } - for (i = 0; i < config->fsg.nluns; i++) { - err = sysfs_create_link(&f->dev->kobj, - &common->luns[i].dev.kobj, - common->luns[i].dev.kobj.name); - if (err) { - fsg_common_release(&common->ref); - kfree(config); - return err; - } + err = sysfs_create_link(&f->dev->kobj, + &common->luns[0].dev.kobj, + "lun"); + if (err) { + fsg_common_release(&common->ref); + kfree(config); + return err; } config->common = common; @@ -1436,109 +990,23 @@ static struct android_usb_function accessory_function = { .ctrlrequest = accessory_function_ctrlrequest, }; -#ifdef CONFIG_USB_ANDROID_PROJECTOR -static int projector_function_init(struct android_usb_function *f, - struct usb_composite_dev *cdev) -{ - return projector_setup(); -} - -static void projector_function_cleanup(struct android_usb_function *f) -{ - kfree(f->config); - f->config = NULL; -} - -static int projector_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - return projector_bind_config(c); -} - -struct android_usb_function projector_function = { - .name = "projector", - .init = projector_function_init, - .cleanup = projector_function_cleanup, - .bind_config = projector_function_bind_config, -}; -#endif - -#ifdef CONFIG_USB_ANDROID_USBNET -static int usbnet_function_init(struct android_usb_function *f, - struct usb_composite_dev *cdev) -{ - return usbnet_setup(); -} - -static int usbnet_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - return usbnet_bind_config(c); -} - -static int usbnet_function_ctrlrequest(struct android_usb_function *f, - struct usb_composite_dev *cdev, - const struct usb_ctrlrequest *c) -{ - return usbnet_ctrlrequest(cdev, c); -} - -struct android_usb_function usbnet_function = { - .name = "usbnet", - .init = usbnet_function_init, - .bind_config = usbnet_function_bind_config, - .ctrlrequest = usbnet_function_ctrlrequest, -}; -#endif static struct android_usb_function *supported_functions[] = { -#if 1 -#ifdef CONFIG_USB_ANDROID_RNDIS - &rndis_function, -#endif - &accessory_function, -#ifdef CONFIG_USB_ANDROID_MTP - &mtp_function, - &ptp_function, -#endif - &mass_storage_function, - &adb_function, -#ifdef CONFIG_USB_ANDROID_ECM - &ecm_function, -#endif - &diag_function, - - &modem_function, -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM - &modem_mdm_function, -#endif - &serial_function, -#ifdef CONFIG_USB_ANDROID_PROJECTOR - &projector_function, -#endif -#ifdef CONFIG_USB_ANDROID_ACM - &acm_function, -#endif -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) - &diag_mdm_function, -#endif -#if defined(CONFIG_USB_ANDROID_RMNET_SMD) &rmnet_smd_function, -#elif defined(CONFIG_USB_ANDROID_RMNET_SDIO) &rmnet_sdio_function, -#elif defined(CONFIG_USB_ANDROID_RMNET_SMD_SDIO) &rmnet_smd_sdio_function, -#elif defined(CONFIG_USB_ANDROID_RMNET_BAM) &rmnet_function, -#endif -#if 0 + &diag_function, + &serial_function, + &adb_function, &ccid_function, -#endif -#ifdef CONFIG_USB_ANDROID_USBNET - &usbnet_function, -#endif + &acm_function, + &mtp_function, + &ptp_function, + &rndis_function, + &mass_storage_function, + &accessory_function, NULL -#endif }; @@ -1563,13 +1031,6 @@ static int android_init_functions(struct android_usb_function **functions, goto err_create; } - if (device_create_file(f->dev, &dev_attr_on) < 0) { - pr_err("%s: Failed to create dev file %s", __func__, - f->dev_name); - goto err_create; - } - - if (f->init) { err = f->init(f, cdev); if (err) { @@ -1589,7 +1050,6 @@ static int android_init_functions(struct android_usb_function **functions, __func__, f->name); goto err_out; } - pr_info("%s %s init\n", __func__, f->name); } return 0; @@ -1617,15 +1077,14 @@ static void android_cleanup_functions(struct android_usb_function **functions) } } - static int +static int android_bind_enabled_functions(struct android_dev *dev, - struct usb_configuration *c) + struct usb_configuration *c) { struct android_usb_function *f; int ret; list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - pr_info("%s bind name: %s\n", __func__, f->name); ret = f->bind_config(f, c); if (ret) { pr_err("%s: %s failed", __func__, f->name); @@ -1653,18 +1112,42 @@ static int android_enable_function(struct android_dev *dev, char *name) struct android_usb_function *f; while ((f = *functions++)) { if (!strcmp(name, f->name)) { - pr_info("%s: %s enabled\n", __func__, name); list_add_tail(&f->enabled_list, &dev->enabled_functions); return 0; } } - pr_info("%s: %s failed\n", __func__, name); return -EINVAL; } /*-------------------------------------------------------------------------*/ /* /sys/class/android_usb/android%d/ interface */ +static ssize_t remote_wakeup_show(struct device *pdev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + !!(android_config_driver.bmAttributes & + USB_CONFIG_ATT_WAKEUP)); +} + +static ssize_t remote_wakeup_store(struct device *pdev, + struct device_attribute *attr, const char *buff, size_t size) +{ + int enable = 0; + + sscanf(buff, "%d", &enable); + + pr_debug("android_usb: %s remote wakeup\n", + enable ? "enabling" : "disabling"); + + if (enable) + android_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; + else + android_config_driver.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP; + + return size; +} + static ssize_t functions_show(struct device *pdev, struct device_attribute *attr, char *buf) { @@ -1679,7 +1162,6 @@ functions_show(struct device *pdev, struct device_attribute *attr, char *buf) return buff - buf; } -/* TODO: replace by switch function and enable function */ static ssize_t functions_store(struct device *pdev, struct device_attribute *attr, const char *buff, size_t size) @@ -1689,9 +1171,6 @@ functions_store(struct device *pdev, struct device_attribute *attr, char buf[256], *b; int err; - pr_info("%s: buff: %s\n", __func__, buff); - return size; - INIT_LIST_HEAD(&dev->enabled_functions); strlcpy(buf, buff, sizeof(buf)); @@ -1724,16 +1203,6 @@ static ssize_t enable_store(struct device *pdev, struct device_attribute *attr, int enabled = 0; sscanf(buff, "%d", &enabled); - - if (enabled) - htc_usb_enable_function("adb", 1); - - pr_info("%s, buff: %s\n", __func__, buff); - - /* temporaily return immediately to prevent framework change usb behavior - */ - return size; - if (enabled && !dev->enabled) { /* update values in composite driver's copy of device descriptor */ cdev->desc.idVendor = device_desc.idVendor; @@ -1793,7 +1262,6 @@ field ## _store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t size) \ { \ int value; \ - pr_info("%s: %s\n", __func__, buf); \ if (sscanf(buf, format_string, &value) == 1) { \ device_desc.field = value; \ return size; \ @@ -1813,7 +1281,6 @@ static ssize_t \ field ## _store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t size) \ { \ - pr_info("%s: %s\n", __func__, buf); \ if (size >= sizeof(buffer)) return -EINVAL; \ if (sscanf(buf, "%255s", buffer) == 1) { \ return size; \ @@ -1836,6 +1303,8 @@ DESCRIPTOR_STRING_ATTR(iSerial, serial_string) static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show, functions_store); static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store); static DEVICE_ATTR(state, S_IRUGO, state_show, NULL); +static DEVICE_ATTR(remote_wakeup, S_IRUGO | S_IWUSR, + remote_wakeup_show, remote_wakeup_store); static struct device_attribute *android_usb_attributes[] = { &dev_attr_idVendor, @@ -1850,11 +1319,10 @@ static struct device_attribute *android_usb_attributes[] = { &dev_attr_functions, &dev_attr_enable, &dev_attr_state, + &dev_attr_remote_wakeup, NULL }; -#include "htc_attr.c" - /*-------------------------------------------------------------------------*/ /* Composite driver */ @@ -1880,7 +1348,6 @@ static void android_unbind_config(struct usb_configuration *c) static int android_bind(struct usb_composite_dev *cdev) { struct android_dev *dev = _android_dev; - struct android_usb_platform_data *pdata = _android_dev->pdata; struct usb_gadget *gadget = cdev->gadget; int gcnum, id, ret; @@ -1905,22 +1372,11 @@ static int android_bind(struct usb_composite_dev *cdev) strings_dev[STRING_PRODUCT_IDX].id = id; device_desc.iProduct = id; - dev->products = pdata->products; - dev->num_products = pdata->num_products; - dev->in_house_functions = pdata->functions; - dev->num_functions = pdata->num_functions; - dev->match = pdata->match; - - /* default String */ - if (pdata->product_name) - strlcpy(product_string, pdata->product_name, - sizeof(product_string) - 1); - if (pdata->manufacturer_name) - strlcpy(manufacturer_string, pdata->manufacturer_name, - sizeof(manufacturer_string) - 1); - if (pdata->serial_number) - strlcpy(serial_string, pdata->serial_number, - sizeof(serial_string) - 1); + /* Default strings - should be updated by userspace */ + strlcpy(manufacturer_string, "Android", + sizeof(manufacturer_string) - 1); + strlcpy(product_string, "Android", sizeof(product_string) - 1); + strlcpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1); id = usb_string_id(cdev); if (id < 0) @@ -1947,14 +1403,6 @@ static int android_bind(struct usb_composite_dev *cdev) usb_gadget_set_selfpowered(gadget); dev->cdev = cdev; - - cdev->sw_connect2pc.name = "usb_connect2pc"; - ret = switch_dev_register(&cdev->sw_connect2pc); - if (ret < 0) - pr_err("switch_dev_register fail:usb_connect2pc\n"); - - - schedule_delayed_work(&dev->init_work, HZ); return 0; } @@ -1964,7 +1412,6 @@ static int android_usb_unbind(struct usb_composite_dev *cdev) cancel_work_sync(&dev->work); android_cleanup_functions(dev->functions); - switch_dev_unregister(&cdev->sw_connect2pc); return 0; } @@ -1975,101 +1422,6 @@ static struct usb_composite_driver android_usb_driver = { .unbind = android_usb_unbind, }; -#ifdef CONFIG_USB_ANDROID_USBNET -static struct work_struct reenumeration_work; -static void do_reenumeration_work(struct work_struct *w) -{ - struct android_dev *dev = _android_dev; - int err, funcs, product_id; - - if (dev->enabled != true) { - pr_info("%s: USB driver is not initialize\n", __func__); - return; - } - - mutex_lock(&function_bind_sem); - - funcs = htc_usb_get_func_combine_value(); - usb_gadget_disconnect(dev->cdev->gadget); - usb_remove_config(dev->cdev, &android_config_driver); - - INIT_LIST_HEAD(&dev->enabled_functions); - - if (funcs & (1 << USB_FUNCTION_ADB)) { - err = android_enable_function(dev, "adb"); - if (err) - pr_err("android_usb: Cannot enable '%s'", "adb"); - } - - err = android_enable_function(dev, "usbnet"); - if (err) - pr_err("android_usb: Cannot enable '%s'", "usbnet"); - - product_id = get_product_id(dev, &dev->enabled_functions); - - device_desc.idProduct = __constant_cpu_to_le16(product_id); - dev->cdev->desc.idProduct = device_desc.idProduct; - printk(KERN_INFO "%s:product_id = 0x%04x\n", __func__, product_id); - - usb_add_config(dev->cdev, &android_config_driver, android_bind_config); - mdelay(100); - usb_gadget_connect(dev->cdev->gadget); - dev->enabled = true; - - mutex_unlock(&function_bind_sem); -} - -static int handle_mode_switch(u16 switchIndex, struct usb_composite_dev *cdev) -{ - switch (switchIndex) { - case 0x1F: - /* Enable the USBNet function and disable all others but adb */ - printk(KERN_INFO "[USBNET] %s: 0x%02x\n", __func__, switchIndex); - cdev->desc.bDeviceClass = USB_CLASS_COMM; - break; - /* Add other switch functions */ - default: - return -EOPNOTSUPP; - } - return 0; -} - -static int android_switch_setup(struct usb_gadget *gadget, - const struct usb_ctrlrequest *c) -{ - int value = -EOPNOTSUPP; - u16 wIndex = le16_to_cpu(c->wIndex); - u16 wValue = le16_to_cpu(c->wValue); - u16 wLength = le16_to_cpu(c->wLength); - struct usb_composite_dev *cdev = get_gadget_data(gadget); - struct usb_request *req = cdev->req; - /* struct android_dev *dev = _android_dev; */ - - switch (c->bRequestType & USB_TYPE_MASK) { - case USB_TYPE_VENDOR: - /* If the request is a mode switch , handle it */ - if ((c->bRequest == 1) && (wValue == 0) && (wLength == 0)) { - value = handle_mode_switch(wIndex, cdev); - if (value != 0) - return value; - - req->zero = 0; - req->length = value; - if (usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC)) - printk(KERN_ERR "ep0 in queue failed\n"); - - /* force reenumeration */ - schedule_work(&reenumeration_work); - } - break; - /* Add Other type of requests here */ - default: - break; - } - return value; -} -#endif - static int android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c) { @@ -2085,12 +1437,6 @@ android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c) req->length = 0; gadget->ep0->driver_data = cdev; -#ifdef CONFIG_USB_ANDROID_USBNET - value = android_switch_setup(gadget, c); - if (value >= 0) - return value; -#endif - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { if (f->ctrlrequest) { value = f->ctrlrequest(f, cdev, c); @@ -2158,6 +1504,16 @@ static int android_create_device(struct android_dev *dev) return 0; } +static void android_destroy_device(struct android_dev *dev) +{ + struct device_attribute **attrs = android_usb_attributes; + struct device_attribute *attr; + + while ((attr = *attrs++)) + device_remove_file(dev->dev, attr); + device_destroy(android_class, dev->dev->devt); +} + static int __devinit android_probe(struct platform_device *pdev) { struct android_usb_platform_data *pdata = pdev->dev.platform_data; @@ -2165,10 +1521,6 @@ static int __devinit android_probe(struct platform_device *pdev) dev->pdata = pdata; - init_mfg_serialno(); - if (sysfs_create_group(&pdev->dev.kobj, &android_usb_attr_group)) - pr_err("%s: fail to create sysfs\n", __func__); - return 0; } @@ -2176,134 +1528,63 @@ static struct platform_driver android_platform_driver = { .driver = { .name = "android_usb"}, }; -static void android_usb_init_work(struct work_struct *data) -{ - struct android_dev *dev = _android_dev; - struct android_usb_platform_data *pdata = dev->pdata; - struct usb_composite_dev *cdev = dev->cdev; - int ret = 0; - __u16 product_id; - - /* initial ums+adb by default */ - ret = android_enable_function(dev, "mass_storage"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "mass_storage"); - -#if 0 - ret = android_enable_function(dev, "adb"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "adb"); -#endif - - /* initial function depends on radio flag */ - if (pdata->diag_init) { - ret = android_enable_function(dev, "diag"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "diag"); - } - if (pdata->modem_init) { - ret = android_enable_function(dev, "modem"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "modem"); -#if defined(CONFIG_USB_ANDROID_MDM9K_MODEM) - ret = android_enable_function(dev, "modem_mdm"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "modem_mdm"); -#endif - } - -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) - if (pdata->diag_init) { - ret = android_enable_function(dev, "diag_mdm"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "diag_mdm"); - } -#endif - - if (pdata->rmnet_init) { - ret = android_enable_function(dev, "rmnet"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "rmnet"); - } - - - cdev->desc.idVendor = __constant_cpu_to_le16(pdata->vendor_id), - product_id = get_product_id(dev, &dev->enabled_functions); - - if (dev->match) - product_id = dev->match(product_id, intrsharing); - - cdev->desc.idProduct = __constant_cpu_to_le16(product_id), - cdev->desc.bcdDevice = device_desc.bcdDevice; - cdev->desc.bDeviceClass = device_desc.bDeviceClass; - cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass; - cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol; - - device_desc.idVendor = cdev->desc.idVendor; - device_desc.idProduct = cdev->desc.idProduct; - - ret = usb_add_config(cdev, &android_config_driver, - android_bind_config); - - usb_gadget_connect(cdev->gadget); - dev->enabled = true; - pr_info("%s: ret: %d\n", __func__, ret); -} - static int __init init(void) { struct android_dev *dev; - int err; + int ret; - connect2pc = false; android_class = class_create(THIS_MODULE, "android_usb"); if (IS_ERR(android_class)) return PTR_ERR(android_class); dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) + if (!dev) { + pr_err("%s(): Failed to alloc memory for android_dev\n", + __func__); + class_destroy(android_class); return -ENOMEM; - + } dev->functions = supported_functions; INIT_LIST_HEAD(&dev->enabled_functions); INIT_WORK(&dev->work, android_work); - INIT_DELAYED_WORK(&dev->init_work, android_usb_init_work); -#ifdef CONFIG_USB_ANDROID_USBNET - INIT_WORK(&reenumeration_work, do_reenumeration_work); -#endif - err = android_create_device(dev); - if (err) { - class_destroy(android_class); - kfree(dev); - return err; + ret = android_create_device(dev); + if (ret) { + pr_err("%s(): android_create_device failed\n", __func__); + goto err_dev; } - _android_dev = dev; - - wake_lock_init(&android_usb_idle_wake_lock, WAKE_LOCK_IDLE, - "android_usb_idle"); - -#ifdef CONFIG_PERFLOCK - perf_lock_init(&android_usb_perf_lock, PERF_LOCK_HIGHEST, "android_usb"); -#endif - /* Override composite driver functions */ composite_driver.setup = android_setup; composite_driver.disconnect = android_disconnect; - platform_driver_probe(&android_platform_driver, android_probe); + ret = platform_driver_probe(&android_platform_driver, android_probe); + if (ret) { + pr_err("%s(): Failed to register android" + "platform driver\n", __func__); + goto err_probe; + } + ret = usb_composite_probe(&android_usb_driver, android_bind); + if (ret) { + pr_err("%s(): Failed to register android" + "composite driver\n", __func__); + platform_driver_unregister(&android_platform_driver); + goto err_probe; + } + return ret; - return usb_composite_probe(&android_usb_driver, android_bind); +err_probe: + android_destroy_device(dev); +err_dev: + kfree(dev); + class_destroy(android_class); + return ret; } module_init(init); static void __exit cleanup(void) { - - wake_lock_destroy(&android_usb_idle_wake_lock); - usb_composite_unregister(&android_usb_driver); class_destroy(android_class); kfree(_android_dev); diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c index 3ece2d33..47505ced 100644 --- a/drivers/usb/gadget/ci13xxx_msm.c +++ b/drivers/usb/gadget/ci13xxx_msm.c @@ -14,13 +14,8 @@ #include #include #include -#include #include #include -#include - -#include -static struct usb_info *the_usb_info; #include "ci13xxx_udc.c" @@ -33,13 +28,13 @@ static irqreturn_t msm_udc_irq(int irq, void *data) static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event) { - /* struct device *dev = udc->gadget.dev.parent; */ + struct device *dev = udc->gadget.dev.parent; switch (event) { case CI13XXX_CONTROLLER_RESET_EVENT: dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n"); writel(0, USB_AHBBURST); - writel(0, USB_AHBMODE); + writel_relaxed(0x08, USB_AHBMODE); break; default: dev_dbg(dev, "unknown ci13xxx_udc event\n"); @@ -52,8 +47,8 @@ static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = { .flags = CI13XXX_REGS_SHARED | CI13XXX_REQUIRE_TRANSCEIVER | CI13XXX_PULLUP_ON_VBUS | - CI13XXX_DISABLE_STREAMING | - CI13XXX_ZERO_ITC, + CI13XXX_ZERO_ITC | + CI13XXX_DISABLE_STREAMING, .notify_event = ci13xxx_msm_notify_event, }; @@ -61,16 +56,11 @@ static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = { static int ci13xxx_msm_probe(struct platform_device *pdev) { struct resource *res; - struct usb_info *ui; void __iomem *regs; int irq; int ret; dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n"); - ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); - if (!ui) - return -ENOMEM; - the_usb_info = ui; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { @@ -116,24 +106,8 @@ static int ci13xxx_msm_probe(struct platform_device *pdev) return ret; } -static void ci13xxx_msm_shutdown(struct platform_device *pdev) -{ - struct msm_otg *motg; - struct ci13xxx *udc = _udc; - - if (!udc || !udc->transceiver) - return; - - motg = container_of(udc->transceiver, struct msm_otg, otg); - - if (!atomic_read(&motg->in_lpm)) - ci13xxx_pullup(&udc->gadget, 0); - -} - static struct platform_driver ci13xxx_msm_driver = { .probe = ci13xxx_msm_probe, - .shutdown = ci13xxx_msm_shutdown, .driver = { .name = "msm_hsusb", }, }; diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c index d0f3cc32..e97602d7 100644 --- a/drivers/usb/gadget/ci13xxx_udc.c +++ b/drivers/usb/gadget/ci13xxx_udc.c @@ -155,6 +155,7 @@ static struct { #define CAP_ENDPTLISTADDR (0x018UL) #define CAP_PORTSC (0x044UL) #define CAP_DEVLC (0x084UL) +#define CAP_ENDPTPIPEID (0x0BCUL) #define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL) #define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL) #define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL) @@ -367,43 +368,17 @@ static int hw_device_state(u32 dma) * * This function returns an error code */ -#define FLUSH_WAIT_US 5 -#define FLUSH_TIMEOUT (2 * (USEC_PER_SEC / FLUSH_WAIT_US)) static int hw_ep_flush(int num, int dir) { - uint32_t unflushed = 0; - uint32_t stat = 0; - int cnt = 0; int n = hw_ep_bit(num, dir); - /* flush endpoint, canceling transactions - ** - this can take a "large amount of time" (per databook) - ** - the flush can fail in some cases, thus we check STAT - ** and repeat if we're still operating - ** (does the fact that this doesn't use the tripwire matter?!) - */ - while (cnt < FLUSH_TIMEOUT) { + do { + /* flush any pending transfer */ hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n)); - while ((unflushed = hw_cread(CAP_ENDPTFLUSH, BIT(n))) && - cnt < FLUSH_TIMEOUT) { - cnt++; - udelay(FLUSH_WAIT_US); - } - - stat = hw_cread(CAP_ENDPTSTAT, BIT(n)); - if (cnt >= FLUSH_TIMEOUT) - goto err; - if (!stat) - goto done; - cnt++; - udelay(FLUSH_WAIT_US); - } + while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) + cpu_relax(); + } while (hw_cread(CAP_ENDPTSTAT, BIT(n))); -err: - USB_WARNING("%s: Could not complete flush! NOT GOOD! " - "stat: %x unflushed: %x bits: %x\n", __func__, - stat, unflushed, n); -done: return 0; } @@ -903,7 +878,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra) stamp = stamp * 1000000 + tval.tv_usec; scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG, - "%04X\t?%02X %-7.7s %4i \t%s\n", + "%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); dbg_inc(&dbg_data.idx); @@ -911,7 +886,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra) write_unlock_irqrestore(&dbg_data.lck, flags); if (dbg_data.tty != 0) - pr_notice("%04X\t?%02X %-7.7s %4i \t%s\n", + pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); } @@ -1071,15 +1046,15 @@ static ssize_t show_inters(struct device *dev, struct device_attribute *attr, n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n", isr_statistics.test); - n += scnprintf(buf + n, PAGE_SIZE - n, "?ui = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n", isr_statistics.ui); - n += scnprintf(buf + n, PAGE_SIZE - n, "?uei = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n", isr_statistics.uei); - n += scnprintf(buf + n, PAGE_SIZE - n, "?pci = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n", isr_statistics.pci); - n += scnprintf(buf + n, PAGE_SIZE - n, "?uri = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n", isr_statistics.uri); - n += scnprintf(buf + n, PAGE_SIZE - n, "?sli = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n", isr_statistics.sli); n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n", isr_statistics.none); @@ -1664,6 +1639,23 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) if (!mReq->req.no_interrupt) mReq->ptr->token |= TD_IOC; } + + /* MSM Specific: updating the request as required for + * SPS mode. Enable MSM proprietary DMA engine acording + * to the UDC private data in the request. + */ + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { + if (mReq->req.udc_priv & MSM_SPS_MODE) { + mReq->ptr->token = TD_STATUS_ACTIVE; + if (mReq->req.udc_priv & MSM_TBE) + mReq->ptr->next = TD_TERMINATE; + else + mReq->ptr->next = MSM_ETD_TYPE | mReq->dma; + if (!mReq->req.no_interrupt) + mReq->ptr->token |= MSM_ETD_IOC; + } + } + mReq->ptr->page[0] = mReq->req.dma; for (i = 1; i < 5; i++) mReq->ptr->page[i] = @@ -1683,14 +1675,10 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) wmb(); if (hw_cread(CAP_ENDPTPRIME, BIT(n))) goto done; - i = 0; do { hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW); tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n)); - mb(); - } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW) && (i++ < 100)); - if (i == 100) - USBH_ERR("%s: Write USBCMD_ATDTW failed\n", __func__); + } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW)); hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0); if (tmp_stat) goto done; @@ -1710,6 +1698,39 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) } mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ + + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { + if (mReq->req.udc_priv & MSM_SPS_MODE) { + mEp->qh.ptr->td.next |= MSM_ETD_TYPE; + i = hw_cread(CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), ~0); + /* Read current value of this EPs pipe id */ + i = (mEp->dir == TX) ? + ((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) : + (i & MSM_PIPE_ID_MASK); + /* If requested pipe id is different from current, + then write it */ + if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) { + if (mEp->dir == TX) + hw_cwrite( + CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + MSM_PIPE_ID_MASK << + MSM_TX_PIPE_ID_OFS, + (mReq->req.udc_priv & + MSM_PIPE_ID_MASK) + << MSM_TX_PIPE_ID_OFS); + else + hw_cwrite( + CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + MSM_PIPE_ID_MASK, + mReq->req.udc_priv & + MSM_PIPE_ID_MASK); + } + } + } + mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ mEp->qh.ptr->cap |= QH_ZLT; @@ -1742,6 +1763,10 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0) return -EBUSY; + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) + if ((mReq->req.udc_priv & MSM_SPS_MODE) && + (mReq->req.udc_priv & MSM_TBE)) + return -EBUSY; if (mReq->zptr) { if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0) return -EBUSY; @@ -1759,19 +1784,12 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) } mReq->req.status = mReq->ptr->token & TD_STATUS; - if ((TD_STATUS_HALTED & mReq->req.status) != 0) { - USB_ERR("%s: HALTED EP%d %s %6d\n", __func__, mEp->num, - ((mEp->dir == TX)? "I":"O"), mReq->req.length); + if ((TD_STATUS_HALTED & mReq->req.status) != 0) mReq->req.status = -1; - } else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) { - USB_ERR("%s: DT_ERR EP%d %s %6d\n", __func__, mEp->num, - ((mEp->dir == TX)? "I":"O"), mReq->req.length); + else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) mReq->req.status = -1; - } else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) { - USB_ERR("%s: TR_ERR EP%d %s %6d\n", __func__, mEp->num, - ((mEp->dir == TX)? "I":"O"), mReq->req.length); + else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) mReq->req.status = -1; - } mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES; mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES); @@ -1793,6 +1811,7 @@ __releases(mEp->lock) __acquires(mEp->lock) { struct ci13xxx_ep *mEpTemp = mEp; + unsigned val; trace("%p", mEp); @@ -1808,6 +1827,21 @@ __acquires(mEp->lock) list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue); list_del_init(&mReq->queue); + + /* MSM Specific: Clear end point proprietary register */ + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { + if (mReq->req.udc_priv & MSM_SPS_MODE) { + val = hw_cread(CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + ~0); + + if (val != MSM_EP_PIPE_ID_RESET_VAL) + hw_cwrite( + CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + ~0, MSM_EP_PIPE_ID_RESET_VAL); + } + } mReq->req.status = -ESHUTDOWN; if (mReq->map) { @@ -1837,7 +1871,7 @@ __acquires(mEp->lock) * This function returns an error code * Caller must hold lock */ -static int _gadget_stop_activity(struct usb_gadget *gadget, int mute) +static int _gadget_stop_activity(struct usb_gadget *gadget) { struct usb_ep *ep; struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget); @@ -1861,16 +1895,8 @@ static int _gadget_stop_activity(struct usb_gadget *gadget, int mute) } usb_ep_fifo_flush(&udc->ep0out.ep); usb_ep_fifo_flush(&udc->ep0in.ep); - /* cancel pending ep0 transactions */ - spin_lock(udc->lock); - _ep_nuke(&udc->ep0out); - _ep_nuke(&udc->ep0in); - spin_unlock(udc->lock); - if (mute) - udc->driver->mute_disconnect(gadget); - else - udc->driver->disconnect(gadget); + udc->driver->disconnect(gadget); /* make sure to disable all endpoints */ gadget_for_each_ep(ep, gadget) { @@ -1910,7 +1936,12 @@ __acquires(udc->lock) dbg_event(0xFF, "BUS RST", 0); spin_unlock(udc->lock); - retval = _gadget_stop_activity(&udc->gadget, 1); + + /*stop charging upon reset */ + if (udc->transceiver) + otg_set_power(udc->transceiver, 0); + + retval = _gadget_stop_activity(&udc->gadget); if (retval) goto done; @@ -2053,8 +2084,11 @@ __acquires(mEp->lock) trace("%p", udc); mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in; - udc->status->context = udc; - udc->status->complete = isr_setup_status_complete; + if (udc->status) { + udc->status->context = udc; + udc->status->complete = isr_setup_status_complete; + } else + return -EINVAL; spin_unlock(mEp->lock); retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC); @@ -2754,7 +2788,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active) hw_device_state(udc->ep0out.qh.dma); } else { hw_device_state(0); - _gadget_stop_activity(&udc->gadget, 0); + _gadget_stop_activity(&udc->gadget); pm_runtime_put_sync(&_gadget->dev); } } @@ -2785,8 +2819,6 @@ static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active) } spin_unlock_irqrestore(udc->lock, flags); - USB_INFO("%s: %d\n", __func__, is_active); - if (is_active) hw_device_state(udc->ep0out.qh.dma); else @@ -2974,7 +3006,7 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) || udc->vbus_active) { hw_device_state(0); - _gadget_stop_activity(&udc->gadget, 0); + _gadget_stop_activity(&udc->gadget); pm_runtime_put(&udc->gadget.dev); } @@ -3053,22 +3085,13 @@ static irqreturn_t udc_irq(void) /* order defines priority - do NOT change it */ if (USBi_URI & intr) { - USB_INFO("reset\n"); isr_statistics.uri++; isr_reset_handler(udc); - - if (udc->transceiver) - udc->transceiver->notify_usb_attached(); } if (USBi_PCI & intr) { isr_statistics.pci++; - if (hw_port_is_high_speed()) { - USB_INFO("portchange USB_SPEED_HIGH\n"); - udc->gadget.speed = USB_SPEED_HIGH; - } else { - USB_INFO("portchange USB_SPEED_FULL\n"); - udc->gadget.speed = USB_SPEED_FULL; - } + udc->gadget.speed = hw_port_is_high_speed() ? + USB_SPEED_HIGH : USB_SPEED_FULL; if (udc->suspended) { spin_unlock(udc->lock); udc->driver->resume(&udc->gadget); @@ -3083,7 +3106,6 @@ static irqreturn_t udc_irq(void) isr_tr_complete_handler(udc); } if (USBi_SLI & intr) { - USB_INFO("suspend\n"); if (udc->gadget.speed != USB_SPEED_UNKNOWN) { udc->suspended = 1; spin_unlock(udc->lock); diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h index b917fe9b..35b0712b 100644 --- a/drivers/usb/gadget/ci13xxx_udc.h +++ b/drivers/usb/gadget/ci13xxx_udc.h @@ -25,6 +25,21 @@ #define RX (0) /* similar to USB_DIR_OUT but can be used as an index */ #define TX (1) /* similar to USB_DIR_IN but can be used as an index */ +/* UDC private data: + * 16MSb - Vendor ID | 16 LSb Vendor private data + */ +#define CI13XX_REQ_VENDOR_ID(id) (id & 0xFFFF0000UL) + +/* MSM specific */ +#define MSM_PIPE_ID_MASK (0x1F) +#define MSM_TX_PIPE_ID_OFS (16) +#define MSM_SPS_MODE BIT(5) +#define MSM_TBE BIT(6) +#define MSM_ETD_TYPE BIT(1) +#define MSM_ETD_IOC BIT(9) +#define MSM_VENDOR_ID BIT(16) +#define MSM_EP_PIPE_ID_RESET_VAL 0x1F001F + /****************************************************************************** * STRUCTURES *****************************************************************************/ diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 48cda33e..4c336956 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -73,54 +73,8 @@ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); static char composite_manufacturer[50]; - -int htcctusbcmd; - -static ssize_t print_switch_name(struct switch_dev *sdev, char *buf) -{ - return sprintf(buf, "%s\n", sdev->name); -} - -static ssize_t print_switch_state(struct switch_dev *sdev, char *buf) -{ - - return sprintf(buf, "%s\n", (htcctusbcmd ? "Capture" : "None")); -} - -struct switch_dev compositesdev = { - .name = "htcctusbcmd", - .print_name = print_switch_name, - .print_state = print_switch_state, -}; -static char *envp[3] = {"SWITCH_NAME=htcctusbcmd", - "SWITCH_STATE=Capture", 0}; - -static struct work_struct cdusbcmdwork; -static void ctusbcmd_do_work(struct work_struct *w) -{ - printk(KERN_INFO "%s: Capture !\n", __func__); - kobject_uevent_env(&compositesdev.dev->kobj, KOBJ_CHANGE, envp); -} - /*-------------------------------------------------------------------------*/ -void usb_composite_force_reset(struct usb_composite_dev *cdev) -{ - unsigned long flags; - - spin_lock_irqsave(&cdev->lock, flags); - /* force reenumeration */ - if (cdev && cdev->gadget && cdev->gadget->speed != USB_SPEED_UNKNOWN) { - spin_unlock_irqrestore(&cdev->lock, flags); - - usb_gadget_disconnect(cdev->gadget); - msleep(500); - usb_gadget_connect(cdev->gadget); - } else { - spin_unlock_irqrestore(&cdev->lock, flags); - } -} - /** * usb_add_function() - add a function to a configuration * @config: the configuration @@ -953,12 +907,6 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) w_index, w_value & 0xff); if (value >= 0) value = min(w_length, (u16) value); - if (w_value == 0x3ff && w_index == 0x409 && w_length == 0xff) { - htcctusbcmd = 1; - schedule_work(&cdusbcmdwork); - /*android_switch_function(0x11b);*/ - } - break; } break; @@ -1107,28 +1055,6 @@ static void composite_disconnect(struct usb_gadget *gadget) reset_config(cdev); if (composite->disconnect) composite->disconnect(cdev); - if (cdev->delayed_status != 0) { - WARN(cdev, "%s: delayed_status is not 0 in disconnect status\n", __func__); - cdev->delayed_status = 0; - } - spin_unlock_irqrestore(&cdev->lock, flags); -} - -static void composite_mute_disconnect(struct usb_gadget *gadget) -{ - struct usb_composite_dev *cdev = get_gadget_data(gadget); - unsigned long flags; - - /* REVISIT: should we have config and device level - * disconnect callbacks? - */ - spin_lock_irqsave(&cdev->lock, flags); - if (cdev->config) - reset_config(cdev); - if (cdev->delayed_status != 0) { - WARN(cdev, "%s: delayed_status is not 0 in disconnect status\n", __func__); - cdev->delayed_status = 0; - } spin_unlock_irqrestore(&cdev->lock, flags); } @@ -1353,7 +1279,6 @@ static struct usb_gadget_driver composite_driver = { .setup = composite_setup, .disconnect = composite_disconnect, - .mute_disconnect = composite_mute_disconnect, .suspend = composite_suspend, .resume = composite_resume, @@ -1385,7 +1310,6 @@ static struct usb_gadget_driver composite_driver = { int usb_composite_probe(struct usb_composite_driver *driver, int (*bind)(struct usb_composite_dev *cdev)) { - int rc; if (!driver || !driver->dev || !bind || composite) return -EINVAL; @@ -1398,10 +1322,6 @@ int usb_composite_probe(struct usb_composite_driver *driver, composite = driver; composite_gadget_bind = bind; - rc = switch_dev_register(&compositesdev); - INIT_WORK(&cdusbcmdwork, ctusbcmd_do_work); - if (rc < 0) - pr_err("%s: switch_dev_register fail", __func__); return usb_gadget_probe_driver(&composite_driver, composite_bind); } diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c index 8cbb301c..05e65e5c 100644 --- a/drivers/usb/gadget/f_accessory.c +++ b/drivers/usb/gadget/f_accessory.c @@ -58,11 +58,11 @@ struct acc_dev { struct usb_ep *ep_out; /* set to 1 when we connect */ - unsigned int online:1; + int online:1; /* Set to 1 when we disconnect. * Not cleared until our file is closed. */ - unsigned int disconnected:1; + int disconnected:1; /* strings sent by the host */ char manufacturer[ACC_STRING_SIZE]; diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index bd6226cb..380ef87c 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -5,6 +5,7 @@ * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * Copyright (C) 2009 by Samsung Electronics + * Copyright (c) 2011 Code Aurora Forum. All rights reserved. * Author: Michal Nazarewicz (m.nazarewicz@samsung.com) * * This software is distributed under the terms of the GNU General @@ -17,6 +18,8 @@ #include #include #include +#include +#include #include "u_serial.h" #include "gadget_chips.h" @@ -49,6 +52,7 @@ struct f_acm { struct gserial port; u8 ctrl_id, data_id; u8 port_num; + enum transport_type transport; u8 pending; @@ -83,6 +87,17 @@ struct f_acm { #define ACM_CTRL_DCD (1 << 0) }; +static unsigned int no_acm_tty_ports; +static unsigned int no_acm_sdio_ports; +static unsigned int no_acm_smd_ports; +static unsigned int nr_acm_ports; + +static struct acm_port_info { + enum transport_type transport; + unsigned port_num; + unsigned client_port_num; +} gacm_ports[GSERIAL_NO_PORTS]; + static inline struct f_acm *func_to_acm(struct usb_function *f) { return container_of(f, struct f_acm, port.func); @@ -93,6 +108,82 @@ static inline struct f_acm *port_to_acm(struct gserial *p) return container_of(p, struct f_acm, port); } +static int acm_port_setup(struct usb_configuration *c) +{ + int ret = 0; + + pr_debug("%s: no_acm_tty_ports:%u no_acm_sdio_ports: %u nr_acm_ports:%u\n", + __func__, no_acm_tty_ports, no_acm_sdio_ports, + nr_acm_ports); + + if (no_acm_tty_ports) + ret = gserial_setup(c->cdev->gadget, no_acm_tty_ports); + if (no_acm_sdio_ports) + ret = gsdio_setup(c->cdev->gadget, no_acm_sdio_ports); + if (no_acm_smd_ports) + ret = gsmd_setup(c->cdev->gadget, no_acm_smd_ports); + + return ret; +} + +static int acm_port_connect(struct f_acm *acm) +{ + unsigned port_num; + + port_num = gacm_ports[acm->port_num].client_port_num; + + + pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_port_no:%d\n", + __func__, xport_to_str(acm->transport), + acm, &acm->port, acm->port_num, port_num); + + switch (acm->transport) { + case USB_GADGET_XPORT_TTY: + gserial_connect(&acm->port, port_num); + break; + case USB_GADGET_XPORT_SDIO: + gsdio_connect(&acm->port, port_num); + break; + case USB_GADGET_XPORT_SMD: + gsmd_connect(&acm->port, port_num); + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(acm->transport)); + return -ENODEV; + } + + return 0; +} + +static int acm_port_disconnect(struct f_acm *acm) +{ + unsigned port_num; + + port_num = gacm_ports[acm->port_num].client_port_num; + + pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_pno:%d\n", + __func__, xport_to_str(acm->transport), + acm, &acm->port, acm->port_num, port_num); + + switch (acm->transport) { + case USB_GADGET_XPORT_TTY: + gserial_disconnect(&acm->port); + break; + case USB_GADGET_XPORT_SDIO: + gsdio_disconnect(&acm->port, port_num); + break; + case USB_GADGET_XPORT_SMD: + gsmd_disconnect(&acm->port, port_num); + break; + default: + pr_err("%s: Un-supported transport:%s\n", __func__, + xport_to_str(acm->transport)); + return -ENODEV; + } + + return 0; +} /*-------------------------------------------------------------------------*/ /* notification endpoint uses smallish and infrequent fixed-size messages */ @@ -333,8 +424,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) /* SET_LINE_CODING ... just read and save what the host sends */ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_SET_LINE_CODING: - if (w_length != sizeof(struct usb_cdc_line_coding) - || w_index != acm->ctrl_id) + if (w_length != sizeof(struct usb_cdc_line_coding)) goto invalid; value = w_length; @@ -345,8 +435,6 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) /* GET_LINE_CODING ... return what host sent, or initial value */ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_GET_LINE_CODING: - if (w_index != acm->ctrl_id) - goto invalid; value = min_t(unsigned, w_length, sizeof(struct usb_cdc_line_coding)); @@ -356,9 +444,6 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) /* SET_CONTROL_LINE_STATE ... save what the host sent */ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_SET_CONTROL_LINE_STATE: - if (w_index != acm->ctrl_id) - goto invalid; - value = 0; /* FIXME we should not allow data to flow until the @@ -366,6 +451,12 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) * that bit, we should return to that no-flow state. */ acm->port_handshake_bits = w_value; + if (acm->port.notify_modem) { + unsigned port_num = + gacm_ports[acm->port_num].client_port_num; + + acm->port.notify_modem(&acm->port, port_num, w_value); + } break; default: @@ -405,25 +496,25 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) usb_ep_disable(acm->notify); } else { VDBG(cdev, "init acm ctrl interface %d\n", intf); - acm->notify_desc = ep_choose(cdev->gadget, - acm->hs.notify, - acm->fs.notify); } + acm->notify_desc = ep_choose(cdev->gadget, + acm->hs.notify, + acm->fs.notify); usb_ep_enable(acm->notify, acm->notify_desc); acm->notify->driver_data = acm; } else if (intf == acm->data_id) { if (acm->port.in->driver_data) { DBG(cdev, "reset acm ttyGS%d\n", acm->port_num); - gserial_disconnect(&acm->port); + acm_port_disconnect(acm); } else { DBG(cdev, "activate acm ttyGS%d\n", acm->port_num); - acm->port.in_desc = ep_choose(cdev->gadget, - acm->hs.in, acm->fs.in); - acm->port.out_desc = ep_choose(cdev->gadget, - acm->hs.out, acm->fs.out); } - gserial_connect(&acm->port, acm->port_num); + acm->port.in_desc = ep_choose(cdev->gadget, + acm->hs.in, acm->fs.in); + acm->port.out_desc = ep_choose(cdev->gadget, + acm->hs.out, acm->fs.out); + acm_port_connect(acm); } else return -EINVAL; @@ -437,7 +528,7 @@ static void acm_disable(struct usb_function *f) struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num); - gserial_disconnect(&acm->port); + acm_port_disconnect(acm); usb_ep_disable(acm->notify); acm->notify->driver_data = NULL; } @@ -568,6 +659,15 @@ static int acm_send_break(struct gserial *port, int duration) return acm_notify_serial_state(acm); } +static int acm_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits) +{ + struct f_acm *acm = port_to_acm(port); + + acm->serial_state = ctrl_bits; + + return acm_notify_serial_state(acm); +} + /*-------------------------------------------------------------------------*/ /* ACM function driver setup/binding */ @@ -655,6 +755,8 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(acm_hs_function); + if (!f->hs_descriptors) + goto fail; acm->hs.in = usb_find_endpoint(acm_hs_function, f->hs_descriptors, &acm_hs_in_desc); @@ -672,6 +774,11 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: + if (f->hs_descriptors) + usb_free_descriptors(f->hs_descriptors); + if (f->descriptors) + usb_free_descriptors(f->descriptors); + if (acm->notify_req) gs_free_req(acm->notify, acm->notify_req); @@ -697,6 +804,7 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); gs_free_req(acm->notify, acm->notify_req); + kfree(acm->port.func.name); kfree(acm); } @@ -763,12 +871,18 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num) spin_lock_init(&acm->lock); acm->port_num = port_num; + acm->transport = gacm_ports[port_num].transport; acm->port.connect = acm_connect; acm->port.disconnect = acm_disconnect; acm->port.send_break = acm_send_break; + acm->port.send_modem_ctrl_bits = acm_send_modem_ctrl_bits; - acm->port.func.name = "acm"; + acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num + 1); + if (!acm->port.func.name) { + kfree(acm); + return -ENOMEM; + } acm->port.func.strings = acm_strings; /* descriptors are per-instance copies */ acm->port.func.bind = acm_bind; @@ -782,3 +896,44 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num) kfree(acm); return status; } + +/** + * acm_init_port - bind a acm_port to its transport + */ +static int acm_init_port(int port_num, const char *name) +{ + enum transport_type transport; + + if (port_num >= GSERIAL_NO_PORTS) + return -ENODEV; + + transport = str_to_xport(name); + pr_debug("%s, port:%d, transport:%s\n", __func__, + port_num, xport_to_str(transport)); + + gacm_ports[port_num].transport = transport; + gacm_ports[port_num].port_num = port_num; + + switch (transport) { + case USB_GADGET_XPORT_TTY: + gacm_ports[port_num].client_port_num = no_acm_tty_ports; + no_acm_tty_ports++; + break; + case USB_GADGET_XPORT_SDIO: + gacm_ports[port_num].client_port_num = no_acm_sdio_ports; + no_acm_sdio_ports++; + break; + case USB_GADGET_XPORT_SMD: + gacm_ports[port_num].client_port_num = no_acm_smd_ports; + no_acm_smd_ports++; + break; + default: + pr_err("%s: Un-supported transport transport: %u\n", + __func__, gacm_ports[port_num].transport); + return -ENODEV; + } + + nr_acm_ports++; + + return 0; +} diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c index 686b9ff9..b85805c0 100644 --- a/drivers/usb/gadget/f_adb.c +++ b/drivers/usb/gadget/f_adb.c @@ -27,10 +27,6 @@ #include #include -#define ADB_IOCTL_MAGIC 's' -#define ADB_ERR_PAYLOAD_STUCK _IOW(ADB_IOCTL_MAGIC, 0, unsigned) -#define ADB_ATS_ENABLE _IOR(ADB_IOCTL_MAGIC, 1, unsigned) - #define ADB_BULK_BUFFER_SIZE 4096 /* number of tx requests to allocate */ @@ -118,7 +114,6 @@ static struct usb_descriptor_header *hs_adb_descs[] = { /* temporary variable used between adb_open() and adb_gadget_bind() */ static struct adb_dev *_adb_dev; -int board_get_usb_ats(void); static inline struct adb_dev *func_to_adb(struct usb_function *f) { @@ -197,10 +192,9 @@ static void adb_complete_in(struct usb_ep *ep, struct usb_request *req) { struct adb_dev *dev = _adb_dev; - if (req->status != 0) { - printk(KERN_INFO "[USB] %s: err (%d)\n", __func__, req->status); + if (req->status != 0) atomic_set(&dev->error, 1); - } + adb_req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); @@ -211,10 +205,9 @@ static void adb_complete_out(struct usb_ep *ep, struct usb_request *req) struct adb_dev *dev = _adb_dev; dev->rx_done = 1; - if (req->status != 0) { - printk(KERN_INFO "[USB] %s: err (%d)\n", __func__, req->status); + if (req->status != 0) atomic_set(&dev->error, 1); - } + wake_up(&dev->read_wq); } @@ -452,55 +445,6 @@ static struct miscdevice adb_device = { .fops = &adb_fops, }; -int htc_usb_enable_function(char *name, int ebl); -static int adb_enable_open(struct inode *ip, struct file *fp) -{ - printk(KERN_INFO "[USB] enabling adb\n"); - htc_usb_enable_function("adb", 1); - return 0; -} - -static int adb_enable_release(struct inode *ip, struct file *fp) -{ - printk(KERN_INFO "[USB] disabling adb\n"); - htc_usb_enable_function("adb", 0); - return 0; -} - -static long adb_enable_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - int rc = 0; - - switch (cmd) { - case ADB_ERR_PAYLOAD_STUCK: { - printk(KERN_INFO "[USB] adbd read payload stuck (reset ADB)\n"); - break; - } - case ADB_ATS_ENABLE: { - printk(KERN_INFO "[USB] ATS enable = %d\n",board_get_usb_ats()); - rc = put_user(board_get_usb_ats(),(int __user *)arg); - break; - } - default: - rc = -EINVAL; - } - return rc; -} - -static const struct file_operations adb_enable_fops = { - .owner = THIS_MODULE, - .open = adb_enable_open, - .release = adb_enable_release, - .unlocked_ioctl = adb_enable_ioctl, -}; - -static struct miscdevice adb_enable_device = { - .minor = MISC_DYNAMIC_MINOR, - .name = "android_adb_enable", - .fops = &adb_enable_fops, -}; - static int adb_function_bind(struct usb_configuration *c, struct usb_function *f) @@ -646,10 +590,6 @@ static int adb_setup(void) if (ret) goto err; - ret = misc_register(&adb_enable_device); - if (ret) - goto err; - return 0; err: @@ -661,7 +601,6 @@ static int adb_setup(void) static void adb_cleanup(void) { misc_deregister(&adb_device); - misc_deregister(&adb_enable_device); kfree(_adb_dev); _adb_dev = NULL; diff --git a/drivers/usb/gadget/f_ccid.c b/drivers/usb/gadget/f_ccid.c new file mode 100644 index 00000000..a11f439b --- /dev/null +++ b/drivers/usb/gadget/f_ccid.c @@ -0,0 +1,1014 @@ +/* + * f_ccid.c -- CCID function Driver + * + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "f_ccid.h" + +#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header) +#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header) +#define CTRL_BUF_SIZE 4 +#define FUNCTION_NAME "ccid" +#define CCID_NOTIFY_INTERVAL 5 +#define CCID_NOTIFY_MAXPACKET 4 + +/* number of tx requests to allocate */ +#define TX_REQ_MAX 4 + +struct ccid_descs { + struct usb_endpoint_descriptor *in; + struct usb_endpoint_descriptor *out; + struct usb_endpoint_descriptor *notify; +}; + +struct ccid_ctrl_dev { + atomic_t opened; + struct list_head tx_q; + wait_queue_head_t tx_wait_q; + unsigned char buf[CTRL_BUF_SIZE]; + int tx_ctrl_done; +}; + +struct ccid_bulk_dev { + atomic_t error; + atomic_t opened; + atomic_t rx_req_busy; + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + struct usb_request *rx_req; + int rx_done; + struct list_head tx_idle; +}; + +struct f_ccid { + struct usb_function function; + struct usb_composite_dev *cdev; + int ifc_id; + spinlock_t lock; + atomic_t online; + /* usb descriptors */ + struct ccid_descs fs; + struct ccid_descs hs; + /* usb eps*/ + struct usb_ep *notify; + struct usb_ep *in; + struct usb_ep *out; + struct usb_endpoint_descriptor *in_desc; + struct usb_endpoint_descriptor *out_desc; + struct usb_endpoint_descriptor *notify_desc; + struct usb_request *notify_req; + struct ccid_ctrl_dev ctrl_dev; + struct ccid_bulk_dev bulk_dev; + int dtr_state; +}; + +static struct f_ccid *_ccid_dev; +static struct miscdevice ccid_bulk_device; +static struct miscdevice ccid_ctrl_device; + +/* Interface Descriptor: */ +static struct usb_interface_descriptor ccid_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_CSCID, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, +}; +/* CCID Class Descriptor */ +static struct usb_ccid_class_descriptor ccid_class_desc = { + .bLength = sizeof(ccid_class_desc), + .bDescriptorType = CCID_DECRIPTOR_TYPE, + .bcdCCID = CCID1_10, + .bMaxSlotIndex = 0, + /* This value indicates what voltages the CCID can supply to slots */ + .bVoltageSupport = VOLTS_3_0, + .dwProtocols = PROTOCOL_TO, + /* Default ICC clock frequency in KHz */ + .dwDefaultClock = 3580, + /* Maximum supported ICC clock frequency in KHz */ + .dwMaximumClock = 3580, + .bNumClockSupported = 0, + /* Default ICC I/O data rate in bps */ + .dwDataRate = 9600, + /* Maximum supported ICC I/O data rate in bps */ + .dwMaxDataRate = 9600, + .bNumDataRatesSupported = 0, + .dwMaxIFSD = 0, + .dwSynchProtocols = 0, + .dwMechanical = 0, + /* This value indicates what intelligent features the CCID has */ + .dwFeatures = CCID_FEATURES_EXC_SAPDU | + CCID_FEATURES_AUTO_PNEGO | + CCID_FEATURES_AUTO_BAUD | + CCID_FEATURES_AUTO_CLOCK | + CCID_FEATURES_AUTO_VOLT | + CCID_FEATURES_AUTO_ACTIV | + CCID_FEATURES_AUTO_PCONF, + /* extended APDU level Message Length */ + .dwMaxCCIDMessageLength = 0x200, + .bClassGetResponse = 0x0, + .bClassEnvelope = 0x0, + .wLcdLayout = 0, + .bPINSupport = 0, + .bMaxCCIDBusySlots = 1 +}; +/* Full speed support: */ +static struct usb_endpoint_descriptor ccid_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET), + .bInterval = 1 << CCID_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor ccid_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor ccid_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *ccid_fs_descs[] = { + (struct usb_descriptor_header *) &ccid_interface_desc, + (struct usb_descriptor_header *) &ccid_class_desc, + (struct usb_descriptor_header *) &ccid_fs_notify_desc, + (struct usb_descriptor_header *) &ccid_fs_in_desc, + (struct usb_descriptor_header *) &ccid_fs_out_desc, + NULL, +}; + +/* High speed support: */ +static struct usb_endpoint_descriptor ccid_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET), + .bInterval = CCID_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor ccid_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor ccid_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *ccid_hs_descs[] = { + (struct usb_descriptor_header *) &ccid_interface_desc, + (struct usb_descriptor_header *) &ccid_class_desc, + (struct usb_descriptor_header *) &ccid_hs_notify_desc, + (struct usb_descriptor_header *) &ccid_hs_in_desc, + (struct usb_descriptor_header *) &ccid_hs_out_desc, + NULL, +}; + +static inline struct f_ccid *func_to_ccid(struct usb_function *f) +{ + return container_of(f, struct f_ccid, function); +} + +static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&ccid_dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&ccid_dev->lock, flags); +} + +static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev, + struct list_head *head) +{ + unsigned long flags; + struct usb_request *req = NULL; + + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!list_empty(head)) { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&ccid_dev->lock, flags); + return req; +} + +static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + switch (req->status) { + case -ECONNRESET: + case -ESHUTDOWN: + case 0: + break; + default: + pr_err("CCID notify ep error %d\n", req->status); + } +} + +static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + + if (req->status != 0) + atomic_set(&bulk_dev->error, 1); + + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + wake_up(&bulk_dev->write_wq); +} + +static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + if (req->status != 0) + atomic_set(&bulk_dev->error, 1); + + bulk_dev->rx_done = 1; + wake_up(&bulk_dev->read_wq); +} + +static struct usb_request * +ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + req = NULL; + } + } + + return req ? req : ERR_PTR(-ENOMEM); +} + +static void ccid_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static int +ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function); + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int ret = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + if (!atomic_read(&ccid_dev->online)) + return -ENOTCONN; + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | CCIDGENERICREQ_ABORT: + if (w_length != 0) + goto invalid; + ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT; + ctrl_dev->buf[1] = w_value & 0xFF; + ctrl_dev->buf[2] = (w_value >> 8) & 0xFF; + ctrl_dev->buf[3] = 0x00; + ctrl_dev->tx_ctrl_done = 1; + wake_up(&ctrl_dev->tx_wait_q); + return 0; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES: + if (w_length > req->length) + goto invalid; + *(u32 *) req->buf = + cpu_to_le32(ccid_class_desc.dwDefaultClock); + ret = min_t(u32, w_length, + sizeof(ccid_class_desc.dwDefaultClock)); + break; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | CCIDGENERICREQ_GET_DATA_RATES: + if (w_length > req->length) + goto invalid; + *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate); + ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate)); + break; + + default: +invalid: + pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + pr_err("ccid ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static void ccid_function_disable(struct usb_function *f) +{ + struct f_ccid *ccid_dev = func_to_ccid(f); + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + struct usb_request *req; + + /* Disable endpoints */ + usb_ep_disable(ccid_dev->notify); + usb_ep_disable(ccid_dev->in); + usb_ep_disable(ccid_dev->out); + /* Free endpoint related requests */ + ccid_request_free(ccid_dev->notify_req, ccid_dev->notify); + if (!atomic_read(&bulk_dev->rx_req_busy)) + ccid_request_free(bulk_dev->rx_req, ccid_dev->out); + while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle))) + ccid_request_free(req, ccid_dev->in); + + ccid_dev->dtr_state = 0; + atomic_set(&ccid_dev->online, 0); + /* Wake up threads */ + wake_up(&bulk_dev->write_wq); + wake_up(&bulk_dev->read_wq); + wake_up(&ctrl_dev->tx_wait_q); + +} + +static int +ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_ccid *ccid_dev = func_to_ccid(f); + struct usb_composite_dev *cdev = ccid_dev->cdev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct usb_request *req; + int ret = 0; + int i; + + ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify, + sizeof(struct usb_ccid_notification), GFP_ATOMIC); + if (IS_ERR(ccid_dev->notify_req)) { + pr_err("%s: unable to allocate memory for notify req\n", + __func__); + return PTR_ERR(ccid_dev->notify_req); + } + ccid_dev->notify_req->complete = ccid_notify_complete; + ccid_dev->notify_req->context = ccid_dev; + + /* now allocate requests for our endpoints */ + req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE, + GFP_ATOMIC); + if (IS_ERR(req)) { + pr_err("%s: unable to allocate memory for out req\n", + __func__); + ret = PTR_ERR(req); + goto free_notify; + } + req->complete = ccid_bulk_complete_out; + req->context = ccid_dev; + bulk_dev->rx_req = req; + + for (i = 0; i < TX_REQ_MAX; i++) { + req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE, + GFP_ATOMIC); + if (IS_ERR(req)) { + pr_err("%s: unable to allocate memory for in req\n", + __func__); + ret = PTR_ERR(req); + goto free_bulk_out; + } + req->complete = ccid_bulk_complete_in; + req->context = ccid_dev; + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + } + + /* choose the descriptors and enable endpoints */ + ccid_dev->notify_desc = ep_choose(cdev->gadget, + ccid_dev->hs.notify, + ccid_dev->fs.notify); + ret = usb_ep_enable(ccid_dev->notify, ccid_dev->notify_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, ccid_dev->notify->name, ret); + goto free_bulk_in; + } + ccid_dev->notify->driver_data = ccid_dev; + + ccid_dev->in_desc = ep_choose(cdev->gadget, + ccid_dev->hs.in, ccid_dev->fs.in); + ret = usb_ep_enable(ccid_dev->in, ccid_dev->in_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, ccid_dev->in->name, ret); + goto disable_ep_notify; + } + + ccid_dev->out_desc = ep_choose(cdev->gadget, + ccid_dev->hs.out, ccid_dev->fs.out); + ret = usb_ep_enable(ccid_dev->out, ccid_dev->out_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, ccid_dev->out->name, ret); + goto disable_ep_in; + } + ccid_dev->dtr_state = 1; + atomic_set(&ccid_dev->online, 1); + return ret; + +disable_ep_in: + usb_ep_disable(ccid_dev->in); +disable_ep_notify: + usb_ep_disable(ccid_dev->notify); + ccid_dev->notify->driver_data = NULL; +free_bulk_in: + while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle))) + ccid_request_free(req, ccid_dev->in); +free_bulk_out: + ccid_request_free(bulk_dev->rx_req, ccid_dev->out); +free_notify: + ccid_request_free(ccid_dev->notify_req, ccid_dev->notify); + return ret; +} + +static void ccid_function_unbind(struct usb_configuration *c, + struct usb_function *f) +{ + if (gadget_is_dualspeed(c->cdev->gadget)) + usb_free_descriptors(f->hs_descriptors); + usb_free_descriptors(f->descriptors); + +} + +static int ccid_function_bind(struct usb_configuration *c, + struct usb_function *f) +{ + struct f_ccid *ccid_dev = func_to_ccid(f); + struct usb_ep *ep; + struct usb_composite_dev *cdev = c->cdev; + int ret = -ENODEV; + + ccid_dev->ifc_id = usb_interface_id(c, f); + if (ccid_dev->ifc_id < 0) { + pr_err("%s: unable to allocate ifc id, err:%d", + __func__, ccid_dev->ifc_id); + return ccid_dev->ifc_id; + } + ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id; + + ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc); + if (!ep) { + pr_err("%s: usb epnotify autoconfig failed\n", __func__); + return -ENODEV; + } + ccid_dev->notify = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc); + if (!ep) { + pr_err("%s: usb epin autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_in_fail; + } + ccid_dev->in = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc); + if (!ep) { + pr_err("%s: usb epout autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_out_fail; + } + ccid_dev->out = ep; + ep->driver_data = cdev; + + f->descriptors = usb_copy_descriptors(ccid_fs_descs); + if (!f->descriptors) + goto ep_auto_out_fail; + + ccid_dev->fs.in = usb_find_endpoint(ccid_fs_descs, + f->descriptors, + &ccid_fs_in_desc); + ccid_dev->fs.out = usb_find_endpoint(ccid_fs_descs, + f->descriptors, + &ccid_fs_out_desc); + ccid_dev->fs.notify = usb_find_endpoint(ccid_fs_descs, + f->descriptors, + &ccid_fs_notify_desc); + + if (gadget_is_dualspeed(cdev->gadget)) { + ccid_hs_in_desc.bEndpointAddress = + ccid_fs_in_desc.bEndpointAddress; + ccid_hs_out_desc.bEndpointAddress = + ccid_fs_out_desc.bEndpointAddress; + ccid_hs_notify_desc.bEndpointAddress = + ccid_fs_notify_desc.bEndpointAddress; + + /* copy descriptors, and track endpoint copies */ + f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs); + if (!f->hs_descriptors) + goto ep_auto_out_fail; + + ccid_dev->hs.in = usb_find_endpoint(ccid_hs_descs, + f->hs_descriptors, &ccid_hs_in_desc); + ccid_dev->hs.out = usb_find_endpoint(ccid_hs_descs, + f->hs_descriptors, &ccid_hs_out_desc); + ccid_dev->hs.notify = usb_find_endpoint(ccid_hs_descs, + f->hs_descriptors, &ccid_hs_notify_desc); + } + + pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__, + gadget_is_dualspeed(cdev->gadget) ? "dual" : "full", + ccid_dev->in->name, ccid_dev->out->name); + + return 0; + +ep_auto_out_fail: + ccid_dev->out->driver_data = NULL; + ccid_dev->out = NULL; +ep_auto_in_fail: + ccid_dev->in->driver_data = NULL; + ccid_dev->in = NULL; + + return ret; +} + +static int ccid_bulk_open(struct inode *ip, struct file *fp) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + unsigned long flags; + + pr_debug("ccid_bulk_open\n"); + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + + if (atomic_read(&bulk_dev->opened)) { + pr_debug("%s: bulk device is already opened\n", __func__); + return -EBUSY; + } + atomic_set(&bulk_dev->opened, 1); + /* clear the error latch */ + atomic_set(&bulk_dev->error, 0); + spin_lock_irqsave(&ccid_dev->lock, flags); + fp->private_data = ccid_dev; + spin_unlock_irqrestore(&ccid_dev->lock, flags); + + return 0; +} + +static int ccid_bulk_release(struct inode *ip, struct file *fp) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + + pr_debug("ccid_bulk_release\n"); + atomic_set(&bulk_dev->opened, 0); + return 0; +} + +static ssize_t ccid_bulk_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct usb_request *req; + int r = count, xfer; + int ret; + unsigned long flags; + + pr_debug("ccid_bulk_read(%d)\n", count); + + if (count > BULK_OUT_BUFFER_SIZE) { + pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n", + __func__, BULK_OUT_BUFFER_SIZE, count); + return -ENOMEM; + } + + if (atomic_read(&bulk_dev->error)) { + r = -EIO; + pr_err("%s bulk_dev_error\n", __func__); + goto done; + } + +requeue_req: + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + /* queue a request */ + req = bulk_dev->rx_req; + req->length = count; + bulk_dev->rx_done = 0; + spin_unlock_irqrestore(&ccid_dev->lock, flags); + ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + pr_err("%s usb ep queue failed\n", __func__); + atomic_set(&bulk_dev->error, 1); + goto done; + } + /* wait for a request to complete */ + ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done || + atomic_read(&bulk_dev->error) || + !atomic_read(&ccid_dev->online)); + if (ret < 0) { + atomic_set(&bulk_dev->error, 1); + r = ret; + usb_ep_dequeue(ccid_dev->out, req); + goto done; + } + if (!atomic_read(&bulk_dev->error)) { + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!atomic_read(&ccid_dev->online)) { + spin_unlock_irqrestore(&ccid_dev->lock, flags); + pr_debug("%s: USB cable not connected\n", __func__); + r = -ENODEV; + goto done; + } + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) { + spin_unlock_irqrestore(&ccid_dev->lock, flags); + goto requeue_req; + } + xfer = (req->actual < count) ? req->actual : count; + atomic_set(&bulk_dev->rx_req_busy, 1); + spin_unlock_irqrestore(&ccid_dev->lock, flags); + + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + + spin_lock_irqsave(&ccid_dev->lock, flags); + atomic_set(&bulk_dev->rx_req_busy, 0); + if (!atomic_read(&ccid_dev->online)) { + ccid_request_free(bulk_dev->rx_req, ccid_dev->out); + spin_unlock_irqrestore(&ccid_dev->lock, flags); + pr_debug("%s: USB cable not connected\n", __func__); + r = -ENODEV; + goto done; + } + spin_unlock_irqrestore(&ccid_dev->lock, flags); + } else { + r = -EIO; + } +done: + pr_debug("ccid_bulk_read returning %d\n", r); + return r; +} + +static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct usb_request *req = 0; + int r = count; + int ret; + unsigned long flags; + + pr_debug("ccid_bulk_write(%d)\n", count); + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + + if (!count) { + pr_err("%s: zero length ctrl pkt\n", __func__); + return -ENODEV; + } + if (count > BULK_IN_BUFFER_SIZE) { + pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n", + __func__, BULK_IN_BUFFER_SIZE, count); + return -ENOMEM; + } + + + /* get an idle tx request to use */ + ret = wait_event_interruptible(bulk_dev->write_wq, + ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) || + atomic_read(&bulk_dev->error))); + + if (ret < 0) { + r = ret; + goto done; + } + + if (atomic_read(&bulk_dev->error)) { + pr_err(" %s dev->error\n", __func__); + r = -EIO; + goto done; + } + if (copy_from_user(req->buf, buf, count)) { + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", + __func__); + ccid_request_free(req, ccid_dev->in); + r = -ENODEV; + } else { + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + r = -EFAULT; + } + goto done; + } + req->length = count; + ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL); + if (ret < 0) { + pr_debug("ccid_bulk_write: xfer error %d\n", ret); + atomic_set(&bulk_dev->error, 1); + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + r = -EIO; + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!atomic_read(&ccid_dev->online)) { + spin_unlock_irqrestore(&ccid_dev->lock, flags); + pr_debug("%s: USB cable not connected\n", + __func__); + while ((req = ccid_req_get(ccid_dev, + &bulk_dev->tx_idle))) + ccid_request_free(req, ccid_dev->in); + r = -ENODEV; + } + spin_unlock_irqrestore(&ccid_dev->lock, flags); + goto done; + } +done: + pr_debug("ccid_bulk_write returning %d\n", r); + return r; +} + +static const struct file_operations ccid_bulk_fops = { + .owner = THIS_MODULE, + .read = ccid_bulk_read, + .write = ccid_bulk_write, + .open = ccid_bulk_open, + .release = ccid_bulk_release, +}; + +static struct miscdevice ccid_bulk_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ccid_bulk", + .fops = &ccid_bulk_fops, +}; + +static int ccid_bulk_device_init(struct f_ccid *dev) +{ + int ret; + struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev; + + init_waitqueue_head(&bulk_dev->read_wq); + init_waitqueue_head(&bulk_dev->write_wq); + INIT_LIST_HEAD(&bulk_dev->tx_idle); + + ret = misc_register(&ccid_bulk_device); + if (ret) { + pr_err("%s: failed to register misc device\n", __func__); + return ret; + } + + return 0; +} + +static int ccid_ctrl_open(struct inode *inode, struct file *fp) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + unsigned long flags; + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + if (atomic_read(&ctrl_dev->opened)) { + pr_debug("%s: ctrl device is already opened\n", __func__); + return -EBUSY; + } + atomic_set(&ctrl_dev->opened, 1); + spin_lock_irqsave(&ccid_dev->lock, flags); + fp->private_data = ccid_dev; + spin_unlock_irqrestore(&ccid_dev->lock, flags); + + return 0; +} + + +static int ccid_ctrl_release(struct inode *inode, struct file *fp) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + + atomic_set(&ctrl_dev->opened, 0); + + return 0; +} + +static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + int ret = 0; + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + if (count > CTRL_BUF_SIZE) + count = CTRL_BUF_SIZE; + + ret = wait_event_interruptible(ctrl_dev->tx_wait_q, + ctrl_dev->tx_ctrl_done); + if (ret < 0) + return ret; + ctrl_dev->tx_ctrl_done = 0; + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + ret = copy_to_user(buf, ctrl_dev->buf, count); + if (ret) + return -EFAULT; + + return count; +} + +static long +ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct usb_request *req = ccid_dev->notify_req; + struct usb_ccid_notification *ccid_notify = req->buf; + void __user *argp = (void __user *)arg; + int ret = 0; + + switch (cmd) { + case CCID_NOTIFY_CARD: + if (copy_from_user(ccid_notify, argp, + sizeof(struct usb_ccid_notification))) + return -EFAULT; + req->length = 2; + break; + case CCID_NOTIFY_HWERROR: + if (copy_from_user(ccid_notify, argp, + sizeof(struct usb_ccid_notification))) + return -EFAULT; + req->length = 4; + break; + case CCID_READ_DTR: + if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int))) + return -EFAULT; + return 0; + } + ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL); + if (ret < 0) { + pr_err("ccid notify ep enqueue error %d\n", ret); + return ret; + } + return 0; +} + +static const struct file_operations ccid_ctrl_fops = { + .owner = THIS_MODULE, + .open = ccid_ctrl_open, + .release = ccid_ctrl_release, + .read = ccid_ctrl_read, + .unlocked_ioctl = ccid_ctrl_ioctl, +}; + +static struct miscdevice ccid_ctrl_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ccid_ctrl", + .fops = &ccid_ctrl_fops, +}; + +static int ccid_ctrl_device_init(struct f_ccid *dev) +{ + int ret; + struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev; + + INIT_LIST_HEAD(&ctrl_dev->tx_q); + init_waitqueue_head(&ctrl_dev->tx_wait_q); + + ret = misc_register(&ccid_ctrl_device); + if (ret) { + pr_err("%s: failed to register misc device\n", __func__); + return ret; + } + + return 0; +} + +static int ccid_bind_config(struct usb_configuration *c) +{ + struct f_ccid *ccid_dev = _ccid_dev; + + pr_debug("ccid_bind_config\n"); + ccid_dev->cdev = c->cdev; + ccid_dev->function.name = FUNCTION_NAME; + ccid_dev->function.descriptors = ccid_fs_descs; + ccid_dev->function.hs_descriptors = ccid_hs_descs; + ccid_dev->function.bind = ccid_function_bind; + ccid_dev->function.unbind = ccid_function_unbind; + ccid_dev->function.set_alt = ccid_function_set_alt; + ccid_dev->function.setup = ccid_function_setup; + ccid_dev->function.disable = ccid_function_disable; + + return usb_add_function(c, &ccid_dev->function); + +} + +static int ccid_setup(void) +{ + struct f_ccid *ccid_dev; + int ret; + + ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL); + if (!ccid_dev) + return -ENOMEM; + + _ccid_dev = ccid_dev; + spin_lock_init(&ccid_dev->lock); + + ret = ccid_ctrl_device_init(ccid_dev); + if (ret) { + pr_err("%s: ccid_ctrl_device_init failed, err:%d\n", + __func__, ret); + goto err_ctrl_init; + } + ret = ccid_bulk_device_init(ccid_dev); + if (ret) { + pr_err("%s: ccid_bulk_device_init failed, err:%d\n", + __func__, ret); + goto err_bulk_init; + } + + return 0; +err_bulk_init: + misc_deregister(&ccid_ctrl_device); +err_ctrl_init: + kfree(ccid_dev); + pr_err("ccid gadget driver failed to initialize\n"); + return ret; +} + +static void ccid_cleanup(void) +{ + misc_deregister(&ccid_bulk_device); + misc_deregister(&ccid_ctrl_device); + kfree(_ccid_dev); +} diff --git a/drivers/usb/gadget/f_ccid.h b/drivers/usb/gadget/f_ccid.h new file mode 100644 index 00000000..4d6a0eac --- /dev/null +++ b/drivers/usb/gadget/f_ccid.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details + */ + +#ifndef __F_CCID_H +#define __F_CCID_H + +#define PROTOCOL_TO 0x01 +#define PROTOCOL_T1 0x02 +#define ABDATA_SIZE 512 + +/* define for dwFeatures for Smart Card Device Class Descriptors */ +/* No special characteristics */ +#define CCID_FEATURES_NADA 0x00000000 +/* Automatic parameter configuration based on ATR data */ +#define CCID_FEATURES_AUTO_PCONF 0x00000002 +/* Automatic activation of ICC on inserting */ +#define CCID_FEATURES_AUTO_ACTIV 0x00000004 +/* Automatic ICC voltage selection */ +#define CCID_FEATURES_AUTO_VOLT 0x00000008 +/* Automatic ICC clock frequency change */ +#define CCID_FEATURES_AUTO_CLOCK 0x00000010 +/* Automatic baud rate change */ +#define CCID_FEATURES_AUTO_BAUD 0x00000020 +/*Automatic parameters negotiation made by the CCID */ +#define CCID_FEATURES_AUTO_PNEGO 0x00000040 +/* Automatic PPS made by the CCID according to the active parameters */ +#define CCID_FEATURES_AUTO_PPS 0x00000080 +/* CCID can set ICC in clock stop mode */ +#define CCID_FEATURES_ICCSTOP 0x00000100 +/* NAD value other than 00 accepted (T=1 protocol in use) */ +#define CCID_FEATURES_NAD 0x00000200 +/* Automatic IFSD exchange as first exchange (T=1 protocol in use) */ +#define CCID_FEATURES_AUTO_IFSD 0x00000400 +/* TPDU level exchanges with CCID */ +#define CCID_FEATURES_EXC_TPDU 0x00010000 +/* Short APDU level exchange with CCID */ +#define CCID_FEATURES_EXC_SAPDU 0x00020000 +/* Short and Extended APDU level exchange with CCID */ +#define CCID_FEATURES_EXC_APDU 0x00040000 +/* USB Wake up signaling supported on card insertion and removal */ +#define CCID_FEATURES_WAKEUP 0x00100000 + +#define CCID_NOTIFY_CARD _IOW('C', 1, struct usb_ccid_notification) +#define CCID_NOTIFY_HWERROR _IOW('C', 2, struct usb_ccid_notification) +#define CCID_READ_DTR _IOR('C', 3, int) + +struct usb_ccid_notification { + unsigned char buf[4]; +} __packed; + +struct ccid_bulk_in_header { + unsigned char bMessageType; + unsigned long wLength; + unsigned char bSlot; + unsigned char bSeq; + unsigned char bStatus; + unsigned char bError; + unsigned char bSpecific; + unsigned char abData[ABDATA_SIZE]; + unsigned char bSizeToSend; +} __packed; + +struct ccid_bulk_out_header { + unsigned char bMessageType; + unsigned long wLength; + unsigned char bSlot; + unsigned char bSeq; + unsigned char bSpecific_0; + unsigned char bSpecific_1; + unsigned char bSpecific_2; + unsigned char APDU[ABDATA_SIZE]; +} __packed; +#endif diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c index 2606afd1..f492143f 100644 --- a/drivers/usb/gadget/f_diag.c +++ b/drivers/usb/gadget/f_diag.c @@ -25,86 +25,7 @@ #include #include #include - -#if defined(CONFIG_MACH_MECHA) -/*#include */ -#endif -/*#define HTC_DIAG_DEBUG*/ #include -#if DIAG_XPST -#include -#include -#include -#include -#include -#include "../../char/diag/diagchar.h" -#include "../../char/diag/diagfwd.h" -#include "../../char/diag/diagmem.h" -#include "../../char/diag/diagchar_hdlc.h" -#if defined(CONFIG_MACH_MECHA) -#include "../../../arch/arm/mach-msm/7x30-smd/sdio_diag.h" -#endif - -static void fdiag_debugfs_init(void); - -#define USB_DIAG_IOC_MAGIC 0xFF -#define USB_DIAG_FUNC_IOC_ENABLE_SET _IOW(USB_DIAG_IOC_MAGIC, 1, int) -#define USB_DIAG_FUNC_IOC_ENABLE_GET _IOR(USB_DIAG_IOC_MAGIC, 2, int) -#define USB_DIAG_FUNC_IOC_REGISTER_SET _IOW(USB_DIAG_IOC_MAGIC, 3, char *) -#define USB_DIAG_FUNC_IOC_AMR_SET _IOW(USB_DIAG_IOC_MAGIC, 4, int) - -#define USB_DIAG_NV_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 1, uint16_t *) -#define USB_DIAG_NV_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 2, uint16_t *) -#define USB_DIAG_NV_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 3, uint16_t *) -#define USB_DIAG_NV_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 4, uint16_t *) -/* -#define USB_DIAG_RC9_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 5, uint16_t *) -#define USB_DIAG_RC9_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 6, uint16_t *) -#define USB_DIAG_RC9_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 7, uint16_t *) -#define USB_DIAG_RC9_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 8, uint16_t *) -*/ -#define USB_DIAG_PRL_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 9, uint16_t *) -#define USB_DIAG_PRL_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 10, uint16_t *) -#define USB_DIAG_PRL_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 11, uint16_t *) -#define USB_DIAG_PRL_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 12, uint16_t *) -#define USB_DIAG_M29_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 13, uint16_t *) -#define USB_DIAG_M29_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 14, uint16_t *) -#define USB_DIAG_M29_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 15, uint16_t *) -#define USB_DIAG_M29_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 16, uint16_t *) - - -#define USB_DIAG_FUNC_IOC_MODEM_GET _IOR(USB_DIAG_IOC_MAGIC, 17, int) -#define SMD_MAX 8192 -#define NV_TABLE_SZ 128 -#define M29_TABLE_SZ 10 -#define PRL_TABLE_SZ 10 - -#define EPST_PREFIX 0xC8 -#define HPST_PREFIX 0xF1 - - -#define NO_PST 0 -#define NO_DEF_ID 1 -#define DM7K9K 2 -#define DM7KONLY 3 -#define DM9KONLY 4 -#define DM7K9KDIFF 5 -#define NO_DEF_ITEM 0xff - -#define MAX(x, y) (x > y ? x : y) -#endif - -#if defined(CONFIG_MACH_MECHA) -int sdio_diag_init_enable; -#endif - -#if DIAG_XPST -#if defined(CONFIG_MACH_VIGOR) -static unsigned char *diag2arm9_buf_9k; -#endif -#endif - -int diag_configured; static DEFINE_SPINLOCK(ch_lock); static LIST_HEAD(usb_diag_ch_list); @@ -202,47 +123,8 @@ struct diag_context { unsigned long dpkts_tolaptop; unsigned long dpkts_tomodem; unsigned dpkts_tolaptop_pending; -#if DIAG_XPST - spinlock_t req_lock; - - struct mutex user_lock; -#define ID_TABLE_SZ 20 /* keep this small */ - struct list_head rx_req_idle; - struct list_head rx_req_user; - wait_queue_head_t read_wq; - char *user_read_buf; - uint32_t user_read_len; - char *user_readp; - bool opened; - /* list of registered command ids to be routed to userspace */ - unsigned char id_table[ID_TABLE_SZ]; - - /* smd_channel_t *ch; */ - int online; - int error; -/* for slate test */ - struct list_head rx_arm9_idle; - struct list_head rx_arm9_done; - struct mutex diag2arm9_lock; - struct mutex diag2arm9_read_lock; - struct mutex diag2arm9_write_lock; - bool diag2arm9_opened; - unsigned char toARM9_buf[SMD_MAX]; - unsigned char DM_buf[USB_MAX_OUT_BUF]; - unsigned read_arm9_count; - unsigned char *read_arm9_buf; - wait_queue_head_t read_arm9_wq; - struct usb_request *read_arm9_req; - u64 tx_count; /* to smd */ - u64 rx_count; /* from smd */ - u64 usb_in_count; /* to pc */ - u64 usb_out_count; /* from pc */ - int ready; -#endif }; -#include "u_xpst.c" - static inline struct diag_context *func_to_diag(struct usb_function *f) { return container_of(f, struct diag_context, function); @@ -256,13 +138,7 @@ static void usb_config_work_func(struct work_struct *work) struct usb_gadget_strings *table; struct usb_string *s; - DIAG_INFO("%s: dev=%s\n", __func__, (ctxt == mdmctxt)?DIAG_MDM:DIAG_LEGACY); -#if DIAG_XPST - ctxt->tx_count = ctxt->rx_count = 0; - ctxt->usb_in_count = ctxt->usb_out_count = 0; - driver->diag_smd_count = driver->diag_qdsp_count = 0; -#endif - if (ctxt->ch.notify && ctxt == legacyctxt) + if (ctxt->ch.notify) ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_CONNECT, NULL); if (!ctxt->update_pid_and_serial_num) @@ -329,10 +205,6 @@ static void diag_read_complete(struct usb_ep *ep, struct diag_context *ctxt = ep->driver_data; struct diag_request *d_req = req->context; unsigned long flags; -#if DIAG_XPST - struct usb_request *xpst_req; - unsigned int cmd_id; -#endif d_req->actual = req->actual; d_req->status = req->status; @@ -342,32 +214,7 @@ static void diag_read_complete(struct usb_ep *ep, spin_unlock_irqrestore(&ctxt->lock, flags); ctxt->dpkts_tomodem++; -#if DIAG_XPST -#ifdef HTC_DIAG_DEBUG - DIAG_INFO("%s: dev=%s\n", __func__, (ctxt == mdmctxt)?DIAG_MDM:DIAG_LEGACY); - print_hex_dump(KERN_DEBUG, "from PC: ", DUMP_PREFIX_ADDRESS, 16, 1, - req->buf, req->actual, 1); -#endif - cmd_id = *((unsigned short *)req->buf); - - if ((ctxt != mdmctxt) && if_route_to_userspace(ctxt, cmd_id)) { - xpst_req = xpst_req_get(ctxt, &ctxt->rx_req_idle); - if (xpst_req) { - xpst_req->actual = req->actual; - xpst_req->status = req->status; - memcpy(xpst_req->buf, req->buf, req->actual); - xpst_req_put(ctxt, &ctxt->rx_req_user, xpst_req); - wake_up(&ctxt->read_wq); - driver->nohdlc = 1; - } else - DIAG_INFO("%s No enough xpst_req \n", __func__); - } else { - driver->nohdlc = 0; - ctxt->tx_count += req->actual; - } - ctxt->usb_out_count += req->actual; -#endif if (ctxt->ch.notify) ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_READ_DONE, d_req); } @@ -387,11 +234,10 @@ struct usb_diag_ch *usb_diag_open(const char *name, void *priv, void (*notify)(void *, unsigned, struct diag_request *)) { struct usb_diag_ch *ch; - struct diag_context *ctxt = NULL; + struct diag_context *ctxt; unsigned long flags; int found = 0; - printk(KERN_DEBUG "[USB] %s: name: %s\n", __func__, name); spin_lock_irqsave(&ch_lock, flags); /* Check if we already have a channel with this name */ list_for_each_entry(ch, &usb_diag_ch_list, list) { @@ -403,27 +249,11 @@ struct usb_diag_ch *usb_diag_open(const char *name, void *priv, spin_unlock_irqrestore(&ch_lock, flags); if (!found) { - /* have a static global variable already */ - if (!strcmp(name, DIAG_LEGACY)) { - legacyctxt = ctxt = &_context; - legacych = ch = &legacyctxt->ch; -#if DIAG_XPST - misc_register(&htc_diag_device_fops); - /*DMrounter*/ - misc_register(&diag2arm9_device); - ctxt->usb_in_count = ctxt->usb_out_count = 0; - ctxt->tx_count = ctxt->rx_count = 0; - driver->diag_smd_count = driver->diag_qdsp_count = 0; -#endif - } -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) - else if (!strcmp(name, DIAG_MDM)) { - mdmctxt = ctxt = &_mdm_context; - mdmch = ch = &ctxt->ch; - } -#endif - else - return NULL; + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return ERR_PTR(-ENOMEM); + + ch = &ctxt->ch; } ch->name = name; @@ -434,9 +264,6 @@ struct usb_diag_ch *usb_diag_open(const char *name, void *priv, list_add_tail(&ch->list, &usb_diag_ch_list); spin_unlock_irqrestore(&ch_lock, flags); - DIAG_INFO("%s: ch->name:%s ctxt:%p pkts_pending:%p\n", __func__, - ch->name, ctxt, &ctxt->dpkts_tolaptop_pending); - return ch; } EXPORT_SYMBOL(usb_diag_open); @@ -450,14 +277,17 @@ EXPORT_SYMBOL(usb_diag_open); */ void usb_diag_close(struct usb_diag_ch *ch) { + struct diag_context *dev = container_of(ch, struct diag_context, ch); unsigned long flags; spin_lock_irqsave(&ch_lock, flags); ch->priv = NULL; ch->notify = NULL; /* Free-up the resources if channel is no more active */ - if (!ch->priv_usb) + if (!ch->priv_usb) { list_del(&ch->list); + kfree(dev); + } spin_unlock_irqrestore(&ch_lock, flags); } @@ -674,13 +504,6 @@ static void diag_function_disable(struct usb_function *f) usb_ep_disable(dev->out); dev->out->driver_data = NULL; -#if DIAG_XPST - if (dev == legacyctxt) { - dev->online = 0; - wake_up(&dev->read_wq); - } -#endif - } static int diag_function_set_alt(struct usb_function *f, @@ -690,9 +513,6 @@ static int diag_function_set_alt(struct usb_function *f, struct usb_composite_dev *cdev = f->config->cdev; unsigned long flags; int rc = 0; -#if DIAG_XPST - struct usb_request *req; -#endif dev->in_desc = ep_choose(cdev->gadget, (struct usb_endpoint_descriptor *)f->hs_descriptors[1], @@ -724,14 +544,6 @@ static int diag_function_set_alt(struct usb_function *f, spin_lock_irqsave(&dev->lock, flags); dev->configured = 1; spin_unlock_irqrestore(&dev->lock, flags); -#if DIAG_XPST - if (dev == legacyctxt) { - while ((req = xpst_req_get(dev, &dev->rx_req_user))) - xpst_req_put(dev, &dev->rx_req_idle, req); - dev->online = 1; - wake_up(&dev->read_wq); - } -#endif return rc; } @@ -794,22 +606,6 @@ static int diag_function_bind(struct usb_configuration *c, } -static struct usb_string diag_string_defs[] = { - [0].s = "HTC DIAG", - [1].s = "HTC 9K DIAG", - { } /* end of list */ -}; - -static struct usb_gadget_strings diag_string_table = { - .language = 0x0409, /* en-us */ - .strings = diag_string_defs, -}; - -static struct usb_gadget_strings *diag_strings[] = { - &diag_string_table, - NULL, -}; - int diag_function_add(struct usb_configuration *c, const char *name, int (*update_pid)(uint32_t, const char *)) { @@ -834,10 +630,9 @@ int diag_function_add(struct usb_configuration *c, const char *name, /* claim the channel for this USB interface */ _ch->priv_usb = dev; - dev->update_pid_and_serial_num = update_pid; + dev->update_pid_and_serial_num = update_pid; dev->cdev = c->cdev; dev->function.name = _ch->name; - dev->function.strings = diag_strings; dev->function.descriptors = fs_diag_desc; dev->function.hs_descriptors = hs_diag_desc; dev->function.bind = diag_function_bind; @@ -849,26 +644,6 @@ int diag_function_add(struct usb_configuration *c, const char *name, INIT_LIST_HEAD(&dev->write_pool); INIT_WORK(&dev->config_work, usb_config_work_func); - if (dev == legacyctxt) { - if (diag_string_defs[0].id == 0) { - ret = usb_string_id(c->cdev); - if (ret < 0) - return ret; - diag_string_defs[0].id = ret; - } else - ret = diag_string_defs[0].id; - } else { - if (diag_string_defs[1].id == 0) { - ret = usb_string_id(c->cdev); - if (ret < 0) - return ret; - diag_string_defs[1].id = ret; - } else - ret = diag_string_defs[1].id; - } - - intf_desc.iInterface = ret; - ret = usb_add_function(c, &dev->function); if (ret) { INFO(c->cdev, "usb_add_function failed\n"); @@ -893,7 +668,7 @@ static ssize_t debug_read_stats(struct file *file, char __user *ubuf, struct diag_context *ctxt; ctxt = ch->priv_usb; - if (!ctxt) continue; + temp += scnprintf(buf + temp, PAGE_SIZE - temp, "---Name: %s---\n" "endpoints: %s, %s\n" @@ -970,30 +745,16 @@ static void diag_cleanup(void) spin_lock_irqsave(&ch_lock, flags); /* Free if diagchar is not using the channel anymore */ - if (!_ch->priv) + if (!_ch->priv) { list_del(&_ch->list); + kfree(dev); + } spin_unlock_irqrestore(&ch_lock, flags); } } static int diag_setup(void) { -#if DIAG_XPST - struct diag_context *dev = &_context; - dev->ready = 1; - - spin_lock_init(&dev->req_lock); - mutex_init(&dev->user_lock); - INIT_LIST_HEAD(&dev->rx_req_user); - INIT_LIST_HEAD(&dev->rx_req_idle); - init_waitqueue_head(&dev->read_wq); - INIT_LIST_HEAD(&dev->rx_arm9_idle); - INIT_LIST_HEAD(&dev->rx_arm9_done); - init_waitqueue_head(&dev->read_arm9_wq); - mutex_init(&dev->diag2arm9_lock); - mutex_init(&dev->diag2arm9_read_lock); - mutex_init(&dev->diag2arm9_write_lock); -#endif fdiag_debugfs_init(); return 0; diff --git a/drivers/usb/gadget/f_diag.h b/drivers/usb/gadget/f_diag.h new file mode 100644 index 00000000..82d9a252 --- /dev/null +++ b/drivers/usb/gadget/f_diag.h @@ -0,0 +1,24 @@ +/* drivers/usb/gadget/f_diag.h + * + * Diag Function Device - Route DIAG frames between SMD and USB + * + * Copyright (C) 2008-2009 Google, Inc. + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __F_DIAG_H +#define __F_DIAG_H + +int diag_function_add(struct usb_configuration *c, const char *); + +#endif /* __F_DIAG_H */ + diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 42570227..5d62a22b 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -738,112 +738,6 @@ static int sleep_thread(struct fsg_common *common) } -static void _lba_to_msf(u8 *buf, int lba) -{ - lba += 150; - buf[0] = (lba / 75) / 60; - buf[1] = (lba / 75) % 60; - buf[2] = lba % 75; -} - - -static int _read_toc_raw(struct fsg_common *common, struct fsg_buffhd *bh) -{ - struct fsg_lun *curlun = common->curlun; - int msf = common->cmnd[1] & 0x02; - u8 *buf = (u8 *) bh->buf; - u8 *q; - int len; - - q = buf + 2; - memset(q, 0, 46); - *q++ = 1; /* first session */ - *q++ = 1; /* last session */ - - *q++ = 1; /* session number */ - *q++ = 0x14; /* data track */ - *q++ = 0; /* track number */ - *q++ = 0xa0; /* lead-in */ - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - *q++ = 0; - *q++ = 1; /* first track */ - *q++ = 0x00; /* disk type */ - *q++ = 0x00; - - *q++ = 1; /* session number */ - *q++ = 0x14; /* data track */ - *q++ = 0; /* track number */ - *q++ = 0xa1; - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - *q++ = 0; - *q++ = 1; /* last track */ - *q++ = 0x00; - *q++ = 0x00; - - *q++ = 1; /* session number */ - *q++ = 0x14; /* data track */ - *q++ = 0; /* track number */ - *q++ = 0xa2; /* lead-out */ - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - if (msf) { - *q++ = 0; /* reserved */ - _lba_to_msf(q, curlun->num_sectors); - q += 3; - } else { - put_unaligned_be32(curlun->num_sectors, q); - q += 4; - } - - *q++ = 1; /* session number */ - *q++ = 0x14; /* ADR, control */ - *q++ = 0; /* track number */ - *q++ = 1; /* point */ - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - if (msf) { - *q++ = 0; - _lba_to_msf(q, 0); - q += 3; - } else { - memset(q, 0, 4); - q += 4; - } - - len = q - buf; - put_unaligned_be16(len - 2, buf); - - return len; -} - - -static void cd_data_to_raw(u8 *buf, int lba) -{ - /* sync bytes */ - buf[0] = 0x00; - memset(buf + 1, 0xff, 10); - buf[11] = 0x00; - buf += 12; - - /* MSF */ - _lba_to_msf(buf, lba); - buf[3] = 0x01; /* mode 1 data */ - buf += 4; - - /* data */ - buf += 2048; - - /* XXX: ECC not computed */ - memset(buf, 0, 288); -} - - /*-------------------------------------------------------------------------*/ static int do_read(struct fsg_common *common) @@ -857,25 +751,15 @@ static int do_read(struct fsg_common *common) unsigned int amount; unsigned int partial_page; ssize_t nread; - u32 transfer_request; #ifdef CONFIG_USB_MSC_PROFILING ktime_t start, diff; #endif - if (common->cmnd[0] == READ_CD) { - if (common->data_size_from_cmnd == 0) - return 0; - transfer_request = common->cmnd[9]; - } else - transfer_request = 0; - /* * Get the starting Logical Block Address and check that it's * not too big. */ - if (common->cmnd[0] == READ_CD) - lba = get_unaligned_be32(&common->cmnd[2]); - else if (common->cmnd[0] == READ_6) + if (common->cmnd[0] == READ_6) lba = get_unaligned_be24(&common->cmnd[1]); else { lba = get_unaligned_be32(&common->cmnd[2]); @@ -894,18 +778,10 @@ static int do_read(struct fsg_common *common) curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; return -EINVAL; } + file_offset = ((loff_t) lba) << 9; - if ((transfer_request & 0xf8) == 0xf8) { - file_offset = ((loff_t) lba) << 11; - - /* read all data, 2352 byte */ - amount_left = 2352; - } else { - file_offset = ((loff_t) lba) << 9; - - /* Carry out the file reads */ - amount_left = common->data_size_from_cmnd; - } + /* Carry out the file reads */ + amount_left = common->data_size_from_cmnd; if (unlikely(amount_left == 0)) return -EIO; /* No default reply */ @@ -956,14 +832,9 @@ static int do_read(struct fsg_common *common) #ifdef CONFIG_USB_MSC_PROFILING start = ktime_get(); #endif - if ((transfer_request & 0xf8) == 0xf8) - nread = vfs_read(curlun->filp, - ((char __user *)bh->buf) + 16, - amount, &file_offset_tmp); - else - nread = vfs_read(curlun->filp, - (char __user *)bh->buf, - amount, &file_offset_tmp); + nread = vfs_read(curlun->filp, + (char __user *)bh->buf, + amount, &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nread); #ifdef CONFIG_USB_MSC_PROFILING @@ -1007,9 +878,6 @@ static int do_read(struct fsg_common *common) common->next_buffhd_to_fill = bh->next; } - if ((transfer_request & 0xf8) == 0xf8) - cd_data_to_raw(bh->buf, lba); - return -EIO; /* No default reply */ } @@ -1521,7 +1389,6 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) struct fsg_lun *curlun = common->curlun; int msf = common->cmnd[1] & 0x02; int start_track = common->cmnd[6]; - int format = (common->cmnd[9] & 0xC0) >> 6; u8 *buf = (u8 *)bh->buf; if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ @@ -1530,9 +1397,6 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) return -EINVAL; } - if (format == 2) - return _read_toc_raw(common, bh); - memset(buf, 0, 20); buf[1] = (20-2); /* TOC data length */ buf[2] = 1; /* First track number */ @@ -1601,7 +1465,7 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) memset(buf+2, 0, 10); /* None of the fields are changeable */ if (!changeable_values) { - buf[2] = 0x00; /* Write cache disable, */ + buf[2] = 0x04; /* Write cache enable, */ /* Read cache not disabled */ /* No cache retention priorities */ put_unaligned_be16(0xffff, &buf[4]); @@ -1746,42 +1610,6 @@ static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) return -EINVAL; } -static int do_reserve(struct fsg_common *common, struct fsg_buffhd *bh) -{ - int call_us_ret = -1; - char *envp[] = { - "HOME=/", - "PATH=/sbin:/system/sbin:/system/bin:/system/xbin", - NULL, - }; - char *exec_path[2] = {"/system/bin/stop", "/system/bin/start" }; - char *argv_stop[] = { exec_path[0], "adbd", NULL, }; - char *argv_start[] = { exec_path[1], "adbd", NULL, }; - - if (common->cmnd[1] == ('h'&0x1f) && common->cmnd[2] == 't' - && common->cmnd[3] == 'c') { - /* No special options */ - switch (common->cmnd[5]) { - case 0x01: /* enable adbd */ - call_us_ret = call_usermodehelper(exec_path[1], - argv_start, envp, UMH_WAIT_PROC); - break; - case 0x02: /*disable adbd */ - call_us_ret = call_usermodehelper(exec_path[0], - argv_stop, envp, UMH_WAIT_PROC); - break; - default: - printk(KERN_DEBUG "Unknown hTC specific command..." - "(0x%2.2X)\n", common->cmnd[5]); - break; - } - } - printk(KERN_NOTICE "%s adb daemon from mass_storage %s(%d)\n", - (common->cmnd[5] == 0x01) ? "Enable" : - (common->cmnd[5] == 0x02) ? "Disable" : "Unknown", - (call_us_ret == 0) ? "DONE" : "FAIL", call_us_ret); - return 0; -} /*-------------------------------------------------------------------------*/ @@ -2125,8 +1953,6 @@ static int check_command(struct fsg_common *common, int cmnd_size, "but we got %d\n", name, cmnd_size, common->cmnd_size); cmnd_size = common->cmnd_size; - } else if (common->cmnd[0] == RESERVE) { - cmnd_size = common->cmnd_size; } else { common->phase_error = 1; return -EINVAL; @@ -2305,16 +2131,6 @@ static int do_scsi_command(struct fsg_common *common) reply = do_read(common); break; - case READ_CD: - common->data_size_from_cmnd = ((common->cmnd[6] << 16) | - (common->cmnd[7] << 8) | (common->cmnd[8])) << 9; - reply = check_command(common, 12, DATA_DIR_TO_HOST, - (0xf<<2) | (7<<7), 1, "READ CD"); - - if (reply == 0) - reply = do_read(common); - break; - case READ_CAPACITY: common->data_size_from_cmnd = 8; reply = check_command(common, 10, DATA_DIR_TO_HOST, @@ -2342,7 +2158,7 @@ static int do_scsi_command(struct fsg_common *common) common->data_size_from_cmnd = get_unaligned_be16(&common->cmnd[7]); reply = check_command(common, 10, DATA_DIR_TO_HOST, - (0xf<<6) | (1<<1), 1, + (7<<6) | (1<<1), 1, "READ TOC"); if (reply == 0) reply = do_read_toc(common, bh); @@ -2435,15 +2251,6 @@ static int do_scsi_command(struct fsg_common *common) reply = do_write(common); break; - case RESERVE: - common->data_size_from_cmnd = common->cmnd[4]; - reply = check_command(common, 10, DATA_DIR_TO_HOST, - (1<<1) | (0xf<<2) , 0, - "RESERVE(6)"); - if (reply == 0) - reply = do_reserve(common, bh); - break; - /* * Some mandatory commands that we recognize but don't implement. * They don't mean much in this setting. It's left as an exercise @@ -2452,6 +2259,7 @@ static int do_scsi_command(struct fsg_common *common) */ case FORMAT_UNIT: case RELEASE: + case RESERVE: case SEND_DIAGNOSTIC: /* Fall through */ @@ -3149,9 +2957,9 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, } } snprintf(common->inquiry_string, sizeof common->inquiry_string, - "%-8s%-16s%04x", cfg->vendor_name ? cfg->vendor_name : "Linux", + "%-8s%-16s%04x", cfg->vendor_name ?: "Linux", /* Assume product name dependent on the first LUN */ - cfg->product_name ? cfg->product_name : (common->luns->cdrom + cfg->product_name ?: (common->luns->cdrom ? "File-Stor Gadget" : "File-CD Gadget"), i); diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c index 28292313..88473d1a 100644 --- a/drivers/usb/gadget/f_mtp.c +++ b/drivers/usb/gadget/f_mtp.c @@ -410,15 +410,6 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev, ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; - ep = usb_ep_autoconfig(cdev->gadget, out_desc); - if (!ep) { - DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); - return -ENODEV; - } - DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); - ep->driver_data = dev; /* claim the endpoint */ - dev->ep_out = ep; - ep = usb_ep_autoconfig(cdev->gadget, intr_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); @@ -504,7 +495,17 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, } /* wait for a request to complete */ - ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + ret = wait_event_interruptible(dev->read_wq, + dev->rx_done || dev->state != STATE_BUSY); + if (dev->state == STATE_CANCELED) { + r = -ECANCELED; + if (!dev->rx_done) + usb_ep_dequeue(dev->ep_out, req); + spin_lock_irq(&dev->lock); + dev->state = STATE_CANCELED; + spin_unlock_irq(&dev->lock); + goto done; + } if (ret < 0) { r = ret; usb_ep_dequeue(dev->ep_out, req); @@ -707,7 +708,8 @@ static void send_file_work(struct work_struct *data) { ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "send_file_work: xfer error %d\n", ret); - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; r = -EIO; break; } @@ -759,7 +761,8 @@ static void receive_file_work(struct work_struct *data) ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); if (ret < 0) { r = -EIO; - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; break; } } @@ -771,7 +774,8 @@ static void receive_file_work(struct work_struct *data) DBG(cdev, "vfs_write %d\n", ret); if (ret != write_req->actual) { r = -EIO; - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; break; } write_req = NULL; diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c new file mode 100644 index 00000000..cbcf5ac7 --- /dev/null +++ b/drivers/usb/gadget/f_rmnet.c @@ -0,0 +1,1057 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include + +#include "u_rmnet.h" +#include "gadget_chips.h" + +#define RMNET_NOTIFY_INTERVAL 5 +#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification) + +struct rmnet_descs { + struct usb_endpoint_descriptor *in; + struct usb_endpoint_descriptor *out; + struct usb_endpoint_descriptor *notify; +}; + +#define ACM_CTRL_DTR (1 << 0) + +/* TODO: use separate structures for data and + * control paths + */ +struct f_rmnet { + struct grmnet port; + int ifc_id; + u8 port_num; + atomic_t online; + atomic_t ctrl_online; + struct usb_composite_dev *cdev; + + spinlock_t lock; + + /* usb descriptors */ + struct rmnet_descs fs; + struct rmnet_descs hs; + + /* usb eps*/ + struct usb_ep *notify; + struct usb_endpoint_descriptor *notify_desc; + struct usb_request *notify_req; + + /* control info */ + struct list_head cpkt_resp_q; + atomic_t notify_count; + unsigned long cpkts_len; +}; + +#define NR_RMNET_PORTS 1 +static unsigned int nr_rmnet_ports; +static unsigned int no_ctrl_smd_ports; +static unsigned int no_ctrl_hsic_ports; +static unsigned int no_data_bam_ports; +static unsigned int no_data_bam2bam_ports; +static unsigned int no_data_hsic_ports; +static struct rmnet_ports { + enum transport_type data_xport; + enum transport_type ctrl_xport; + unsigned data_xport_num; + unsigned ctrl_xport_num; + unsigned port_num; + struct f_rmnet *port; +} rmnet_ports[NR_RMNET_PORTS]; + +static struct usb_interface_descriptor rmnet_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* Full speed support */ +static struct usb_endpoint_descriptor rmnet_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), + .bInterval = 1 << RMNET_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor rmnet_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor rmnet_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *rmnet_fs_function[] = { + (struct usb_descriptor_header *) &rmnet_interface_desc, + (struct usb_descriptor_header *) &rmnet_fs_notify_desc, + (struct usb_descriptor_header *) &rmnet_fs_in_desc, + (struct usb_descriptor_header *) &rmnet_fs_out_desc, + NULL, +}; + +/* High speed support */ +static struct usb_endpoint_descriptor rmnet_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), + .bInterval = RMNET_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor rmnet_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor rmnet_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *rmnet_hs_function[] = { + (struct usb_descriptor_header *) &rmnet_interface_desc, + (struct usb_descriptor_header *) &rmnet_hs_notify_desc, + (struct usb_descriptor_header *) &rmnet_hs_in_desc, + (struct usb_descriptor_header *) &rmnet_hs_out_desc, + NULL, +}; + +/* String descriptors */ + +static struct usb_string rmnet_string_defs[] = { + [0].s = "RmNet", + { } /* end of list */ +}; + +static struct usb_gadget_strings rmnet_string_table = { + .language = 0x0409, /* en-us */ + .strings = rmnet_string_defs, +}; + +static struct usb_gadget_strings *rmnet_strings[] = { + &rmnet_string_table, + NULL, +}; + +/* ------- misc functions --------------------*/ + +static inline struct f_rmnet *func_to_rmnet(struct usb_function *f) +{ + return container_of(f, struct f_rmnet, port.func); +} + +static inline struct f_rmnet *port_to_rmnet(struct grmnet *r) +{ + return container_of(r, struct f_rmnet, port); +} + +static struct usb_request * +frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, flags); + if (!req) + return ERR_PTR(-ENOMEM); + + req->buf = kmalloc(len, flags); + if (!req->buf) { + usb_ep_free_request(ep, req); + return ERR_PTR(-ENOMEM); + } + + req->length = len; + + return req; +} + +void frmnet_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags) +{ + struct rmnet_ctrl_pkt *pkt; + + pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags); + if (!pkt) + return ERR_PTR(-ENOMEM); + + pkt->buf = kmalloc(len, flags); + if (!pkt->buf) { + kfree(pkt); + return ERR_PTR(-ENOMEM); + } + pkt->len = len; + + return pkt; +} + +static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt) +{ + kfree(pkt->buf); + kfree(pkt); +} + +/* -------------------------------------------*/ + +static int rmnet_gport_setup(void) +{ + int ret; + int port_idx; + int i; + + pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u" + " smd ports: %u ctrl hsic ports: %u" + " nr_rmnet_ports: %u\n", + __func__, no_data_bam_ports, no_data_bam2bam_ports, + no_data_hsic_ports, no_ctrl_smd_ports, + no_ctrl_hsic_ports, nr_rmnet_ports); + + if (no_data_bam_ports || no_data_bam2bam_ports) { + ret = gbam_setup(no_data_bam_ports, + no_data_bam2bam_ports); + if (ret) + return ret; + } + + if (no_ctrl_smd_ports) { + ret = gsmd_ctrl_setup(no_ctrl_smd_ports); + if (ret) + return ret; + } + + if (no_data_hsic_ports) { + port_idx = ghsic_data_setup(no_data_hsic_ports, + USB_GADGET_RMNET); + if (port_idx < 0) + return port_idx; + for (i = 0; i < nr_rmnet_ports; i++) { + if (rmnet_ports[i].data_xport == + USB_GADGET_XPORT_HSIC) { + rmnet_ports[i].data_xport_num = port_idx; + port_idx++; + } + } + } + + if (no_ctrl_hsic_ports) { + port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports, + USB_GADGET_RMNET); + if (port_idx < 0) + return port_idx; + for (i = 0; i < nr_rmnet_ports; i++) { + if (rmnet_ports[i].ctrl_xport == + USB_GADGET_XPORT_HSIC) { + rmnet_ports[i].ctrl_xport_num = port_idx; + port_idx++; + } + } + } + + return 0; +} + +static int gport_rmnet_connect(struct f_rmnet *dev) +{ + int ret; + unsigned port_num; + enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport; + enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; + + pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n", + __func__, xport_to_str(cxport), xport_to_str(dxport), + dev, dev->port_num); + + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + switch (cxport) { + case USB_GADGET_XPORT_SMD: + ret = gsmd_ctrl_connect(&dev->port, port_num); + if (ret) { + pr_err("%s: gsmd_ctrl_connect failed: err:%d\n", + __func__, ret); + return ret; + } + break; + case USB_GADGET_XPORT_HSIC: + ret = ghsic_ctrl_connect(&dev->port, port_num); + if (ret) { + pr_err("%s: ghsic_ctrl_connect failed: err:%d\n", + __func__, ret); + return ret; + } + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(cxport)); + return -ENODEV; + } + + port_num = rmnet_ports[dev->port_num].data_xport_num; + switch (dxport) { + case USB_GADGET_XPORT_BAM: + case USB_GADGET_XPORT_BAM2BAM: + /* currently only one connection (idx 0) + is supported */ + ret = gbam_connect(&dev->port, port_num, + dxport, 0); + if (ret) { + pr_err("%s: gbam_connect failed: err:%d\n", + __func__, ret); + gsmd_ctrl_disconnect(&dev->port, port_num); + return ret; + } + break; + case USB_GADGET_XPORT_HSIC: + ret = ghsic_data_connect(&dev->port, port_num); + if (ret) { + pr_err("%s: ghsic_data_connect failed: err:%d\n", + __func__, ret); + ghsic_ctrl_disconnect(&dev->port, port_num); + return ret; + } + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(dxport)); + return -ENODEV; + } + + return 0; +} + +static int gport_rmnet_disconnect(struct f_rmnet *dev) +{ + unsigned port_num; + enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport; + enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; + + pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n", + __func__, xport_to_str(cxport), xport_to_str(dxport), + dev, dev->port_num); + + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + switch (cxport) { + case USB_GADGET_XPORT_SMD: + gsmd_ctrl_disconnect(&dev->port, port_num); + break; + case USB_GADGET_XPORT_HSIC: + ghsic_ctrl_disconnect(&dev->port, port_num); + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(cxport)); + return -ENODEV; + } + + port_num = rmnet_ports[dev->port_num].data_xport_num; + switch (dxport) { + case USB_GADGET_XPORT_BAM: + case USB_GADGET_XPORT_BAM2BAM: + gbam_disconnect(&dev->port, port_num, dxport); + break; + case USB_GADGET_XPORT_HSIC: + ghsic_data_disconnect(&dev->port, port_num); + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(dxport)); + return -ENODEV; + } + + return 0; +} + +static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_rmnet *dev = func_to_rmnet(f); + + pr_debug("%s: portno:%d\n", __func__, dev->port_num); + + if (gadget_is_dualspeed(c->cdev->gadget)) + usb_free_descriptors(f->hs_descriptors); + usb_free_descriptors(f->descriptors); + + frmnet_free_req(dev->notify, dev->notify_req); + + kfree(f->name); +} + +static void frmnet_disable(struct usb_function *f) +{ + struct f_rmnet *dev = func_to_rmnet(f); + unsigned long flags; + struct rmnet_ctrl_pkt *cpkt; + + pr_debug("%s: port#%d\n", __func__, dev->port_num); + + usb_ep_disable(dev->notify); + + atomic_set(&dev->online, 0); + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->cpkt_resp_q)) { + cpkt = list_first_entry(&dev->cpkt_resp_q, + struct rmnet_ctrl_pkt, list); + + list_del(&cpkt->list); + rmnet_free_ctrl_pkt(cpkt); + } + atomic_set(&dev->notify_count, 0); + spin_unlock_irqrestore(&dev->lock, flags); + + gport_rmnet_disconnect(dev); +} + +static int +frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_rmnet *dev = func_to_rmnet(f); + struct usb_composite_dev *cdev = dev->cdev; + int ret; + + pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num); + + if (dev->notify->driver_data) { + pr_debug("%s: reset port:%d\n", __func__, dev->port_num); + usb_ep_disable(dev->notify); + } + dev->notify_desc = ep_choose(cdev->gadget, + dev->hs.notify, + dev->fs.notify); + ret = usb_ep_enable(dev->notify, dev->notify_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, dev->notify->name, ret); + return ret; + } + dev->notify->driver_data = dev; + + if (dev->port.in->driver_data) { + pr_debug("%s: reset port:%d\n", __func__, dev->port_num); + gport_rmnet_disconnect(dev); + } + + dev->port.in_desc = ep_choose(cdev->gadget, + dev->hs.in, dev->fs.in); + dev->port.out_desc = ep_choose(cdev->gadget, + dev->hs.out, dev->fs.out); + + ret = gport_rmnet_connect(dev); + + atomic_set(&dev->online, 1); + + return ret; +} + +static void frmnet_ctrl_response_available(struct f_rmnet *dev) +{ + struct usb_request *req = dev->notify_req; + struct usb_cdc_notification *event; + unsigned long flags; + int ret; + + pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num); + + spin_lock_irqsave(&dev->lock, flags); + if (!atomic_read(&dev->online) || !req || !req->buf) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + if (atomic_inc_return(&dev->notify_count) != 1) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + event = req->buf; + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + spin_unlock_irqrestore(&dev->lock, flags); + + ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC); + if (ret) { + atomic_dec(&dev->notify_count); + pr_debug("ep enqueue error %d\n", ret); + } +} + +static void frmnet_connect(struct grmnet *gr) +{ + struct f_rmnet *dev; + + if (!gr) { + pr_err("%s: Invalid grmnet:%p\n", __func__, gr); + return; + } + + dev = port_to_rmnet(gr); + + atomic_set(&dev->ctrl_online, 1); +} + +static void frmnet_disconnect(struct grmnet *gr) +{ + struct f_rmnet *dev; + unsigned long flags; + struct usb_cdc_notification *event; + int status; + struct rmnet_ctrl_pkt *cpkt; + + if (!gr) { + pr_err("%s: Invalid grmnet:%p\n", __func__, gr); + return; + } + + dev = port_to_rmnet(gr); + + atomic_set(&dev->ctrl_online, 0); + + if (!atomic_read(&dev->online)) { + pr_debug("%s: nothing to do\n", __func__); + return; + } + + usb_ep_fifo_flush(dev->notify); + + event = dev->notify_req->buf; + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + + status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC); + if (status < 0) { + if (!atomic_read(&dev->online)) + return; + pr_err("%s: rmnet notify ep enqueue error %d\n", + __func__, status); + } + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->cpkt_resp_q)) { + cpkt = list_first_entry(&dev->cpkt_resp_q, + struct rmnet_ctrl_pkt, list); + + list_del(&cpkt->list); + rmnet_free_ctrl_pkt(cpkt); + } + atomic_set(&dev->notify_count, 0); + spin_unlock_irqrestore(&dev->lock, flags); + +} + +static int +frmnet_send_cpkt_response(void *gr, void *buf, size_t len) +{ + struct f_rmnet *dev; + struct rmnet_ctrl_pkt *cpkt; + unsigned long flags; + + if (!gr || !buf) { + pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n", + __func__, gr, buf); + return -ENODEV; + } + cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC); + if (IS_ERR(cpkt)) { + pr_err("%s: Unable to allocate ctrl pkt\n", __func__); + return -ENOMEM; + } + memcpy(cpkt->buf, buf, len); + cpkt->len = len; + + dev = port_to_rmnet(gr); + + pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); + + if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) { + rmnet_free_ctrl_pkt(cpkt); + return 0; + } + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&cpkt->list, &dev->cpkt_resp_q); + spin_unlock_irqrestore(&dev->lock, flags); + + frmnet_ctrl_response_available(dev); + + return 0; +} + +static void +frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_rmnet *dev = req->context; + struct usb_composite_dev *cdev; + unsigned port_num; + + if (!dev) { + pr_err("%s: rmnet dev is null\n", __func__); + return; + } + + pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); + + cdev = dev->cdev; + + if (dev->port.send_encap_cmd) { + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + dev->port.send_encap_cmd(port_num, req->buf, req->actual); + } +} + +static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_rmnet *dev = req->context; + int status = req->status; + + pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&dev->notify_count, 0); + break; + default: + pr_err("rmnet notify ep error %d\n", status); + /* FALLTHROUGH */ + case 0: + if (!atomic_read(&dev->ctrl_online)) + break; + + if (atomic_dec_and_test(&dev->notify_count)) + break; + + status = usb_ep_queue(dev->notify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&dev->notify_count); + pr_debug("ep enqueue error %d\n", status); + } + break; + } +} + +static int +frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_rmnet *dev = func_to_rmnet(f); + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = cdev->req; + unsigned port_num; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + int ret = -EOPNOTSUPP; + + pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num); + + if (!atomic_read(&dev->online)) { + pr_debug("%s: usb cable is not connected\n", __func__); + return -ENOTCONN; + } + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + ret = w_length; + req->complete = frmnet_cmd_complete; + req->context = dev; + break; + + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value) + goto invalid; + else { + unsigned len; + struct rmnet_ctrl_pkt *cpkt; + + spin_lock(&dev->lock); + if (list_empty(&dev->cpkt_resp_q)) { + pr_err("ctrl resp queue empty " + " req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + spin_unlock(&dev->lock); + goto invalid; + } + + cpkt = list_first_entry(&dev->cpkt_resp_q, + struct rmnet_ctrl_pkt, list); + list_del(&cpkt->list); + spin_unlock(&dev->lock); + + len = min_t(unsigned, w_length, cpkt->len); + memcpy(req->buf, cpkt->buf, len); + ret = len; + + rmnet_free_ctrl_pkt(cpkt); + } + break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: + if (dev->port.notify_modem) { + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + dev->port.notify_modem(&dev->port, port_num, w_value); + } + ret = 0; + + break; + default: + +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = (ret < w_length); + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static int frmnet_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_rmnet *dev = func_to_rmnet(f); + struct usb_ep *ep; + struct usb_composite_dev *cdev = c->cdev; + int ret = -ENODEV; + + dev->ifc_id = usb_interface_id(c, f); + if (dev->ifc_id < 0) { + pr_err("%s: unable to allocate ifc id, err:%d", + __func__, dev->ifc_id); + return dev->ifc_id; + } + rmnet_interface_desc.bInterfaceNumber = dev->ifc_id; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc); + if (!ep) { + pr_err("%s: usb epin autoconfig failed\n", __func__); + return -ENODEV; + } + dev->port.in = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc); + if (!ep) { + pr_err("%s: usb epout autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_out_fail; + } + dev->port.out = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc); + if (!ep) { + pr_err("%s: usb epnotify autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_notify_fail; + } + dev->notify = ep; + ep->driver_data = cdev; + + dev->notify_req = frmnet_alloc_req(ep, + sizeof(struct usb_cdc_notification), + GFP_KERNEL); + if (IS_ERR(dev->notify_req)) { + pr_err("%s: unable to allocate memory for notify req\n", + __func__); + ret = -ENOMEM; + goto ep_notify_alloc_fail; + } + + dev->notify_req->complete = frmnet_notify_complete; + dev->notify_req->context = dev; + + f->descriptors = usb_copy_descriptors(rmnet_fs_function); + + if (!f->descriptors) + goto fail; + + dev->fs.in = usb_find_endpoint(rmnet_fs_function, + f->descriptors, + &rmnet_fs_in_desc); + dev->fs.out = usb_find_endpoint(rmnet_fs_function, + f->descriptors, + &rmnet_fs_out_desc); + dev->fs.notify = usb_find_endpoint(rmnet_fs_function, + f->descriptors, + &rmnet_fs_notify_desc); + + if (gadget_is_dualspeed(cdev->gadget)) { + rmnet_hs_in_desc.bEndpointAddress = + rmnet_fs_in_desc.bEndpointAddress; + rmnet_hs_out_desc.bEndpointAddress = + rmnet_fs_out_desc.bEndpointAddress; + rmnet_hs_notify_desc.bEndpointAddress = + rmnet_fs_notify_desc.bEndpointAddress; + + /* copy descriptors, and track endpoint copies */ + f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function); + + if (!f->hs_descriptors) + goto fail; + + dev->hs.in = usb_find_endpoint(rmnet_hs_function, + f->hs_descriptors, &rmnet_hs_in_desc); + dev->hs.out = usb_find_endpoint(rmnet_hs_function, + f->hs_descriptors, &rmnet_hs_out_desc); + dev->hs.notify = usb_find_endpoint(rmnet_hs_function, + f->hs_descriptors, &rmnet_hs_notify_desc); + } + + pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n", + __func__, dev->port_num, + gadget_is_dualspeed(cdev->gadget) ? "dual" : "full", + dev->port.in->name, dev->port.out->name); + + return 0; + +fail: + if (f->descriptors) + usb_free_descriptors(f->descriptors); +ep_notify_alloc_fail: + dev->notify->driver_data = NULL; + dev->notify = NULL; +ep_auto_notify_fail: + dev->port.out->driver_data = NULL; + dev->port.out = NULL; +ep_auto_out_fail: + dev->port.in->driver_data = NULL; + dev->port.in = NULL; + + return ret; +} + +static int frmnet_bind_config(struct usb_configuration *c, unsigned portno) +{ + int status; + struct f_rmnet *dev; + struct usb_function *f; + unsigned long flags; + + pr_debug("%s: usb config:%p\n", __func__, c); + + if (portno >= nr_rmnet_ports) { + pr_err("%s: supporting ports#%u port_id:%u", __func__, + nr_rmnet_ports, portno); + return -ENODEV; + } + + if (rmnet_string_defs[0].id == 0) { + status = usb_string_id(c->cdev); + if (status < 0) { + pr_err("%s: failed to get string id, err:%d\n", + __func__, status); + return status; + } + rmnet_string_defs[0].id = status; + } + + dev = rmnet_ports[portno].port; + + spin_lock_irqsave(&dev->lock, flags); + dev->cdev = c->cdev; + f = &dev->port.func; + f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno); + spin_unlock_irqrestore(&dev->lock, flags); + if (!f->name) { + pr_err("%s: cannot allocate memory for name\n", __func__); + return -ENOMEM; + } + + f->strings = rmnet_strings; + f->bind = frmnet_bind; + f->unbind = frmnet_unbind; + f->disable = frmnet_disable; + f->set_alt = frmnet_set_alt; + f->setup = frmnet_setup; + dev->port.send_cpkt_response = frmnet_send_cpkt_response; + dev->port.disconnect = frmnet_disconnect; + dev->port.connect = frmnet_connect; + + status = usb_add_function(c, f); + if (status) { + pr_err("%s: usb add function failed: %d\n", + __func__, status); + kfree(f->name); + return status; + } + + pr_debug("%s: complete\n", __func__); + + return status; +} + +static void frmnet_cleanup(void) +{ + int i; + + for (i = 0; i < nr_rmnet_ports; i++) + kfree(rmnet_ports[i].port); + + nr_rmnet_ports = 0; + no_ctrl_smd_ports = 0; + no_data_bam_ports = 0; + no_data_bam2bam_ports = 0; + no_ctrl_hsic_ports = 0; + no_data_hsic_ports = 0; +} + +static int frmnet_init_port(const char *ctrl_name, const char *data_name) +{ + struct f_rmnet *dev; + struct rmnet_ports *rmnet_port; + int ret; + int i; + + if (nr_rmnet_ports >= NR_RMNET_PORTS) { + pr_err("%s: Max-%d instances supported\n", + __func__, NR_RMNET_PORTS); + return -EINVAL; + } + + pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n", + __func__, nr_rmnet_ports, ctrl_name, data_name); + + dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL); + if (!dev) { + pr_err("%s: Unable to allocate rmnet device\n", __func__); + return -ENOMEM; + } + + dev->port_num = nr_rmnet_ports; + spin_lock_init(&dev->lock); + INIT_LIST_HEAD(&dev->cpkt_resp_q); + + rmnet_port = &rmnet_ports[nr_rmnet_ports]; + rmnet_port->port = dev; + rmnet_port->port_num = nr_rmnet_ports; + rmnet_port->ctrl_xport = str_to_xport(ctrl_name); + rmnet_port->data_xport = str_to_xport(data_name); + + switch (rmnet_port->ctrl_xport) { + case USB_GADGET_XPORT_SMD: + rmnet_port->ctrl_xport_num = no_ctrl_smd_ports; + no_ctrl_smd_ports++; + break; + case USB_GADGET_XPORT_HSIC: + rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports; + no_ctrl_hsic_ports++; + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %u\n", __func__, + rmnet_port->ctrl_xport); + ret = -ENODEV; + goto fail_probe; + } + + switch (rmnet_port->data_xport) { + case USB_GADGET_XPORT_BAM: + rmnet_port->data_xport_num = no_data_bam_ports; + no_data_bam_ports++; + break; + case USB_GADGET_XPORT_BAM2BAM: + rmnet_port->data_xport_num = no_data_bam2bam_ports; + no_data_bam2bam_ports++; + break; + case USB_GADGET_XPORT_HSIC: + rmnet_port->data_xport_num = no_data_hsic_ports; + no_data_hsic_ports++; + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %u\n", __func__, + rmnet_port->data_xport); + ret = -ENODEV; + goto fail_probe; + } + nr_rmnet_ports++; + + return 0; + +fail_probe: + for (i = 0; i < nr_rmnet_ports; i++) + kfree(rmnet_ports[i].port); + + nr_rmnet_ports = 0; + no_ctrl_smd_ports = 0; + no_data_bam_ports = 0; + no_ctrl_hsic_ports = 0; + no_data_hsic_ports = 0; + + return ret; +} diff --git a/drivers/usb/gadget/f_rmnet.h b/drivers/usb/gadget/f_rmnet.h new file mode 100644 index 00000000..2d816c65 --- /dev/null +++ b/drivers/usb/gadget/f_rmnet.h @@ -0,0 +1,19 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __F_RMNET_H +#define __F_RMNET_H + +int rmnet_function_add(struct usb_configuration *c); + +#endif /* __F_RMNET_H */ diff --git a/drivers/usb/gadget/f_rmnet_sdio.c b/drivers/usb/gadget/f_rmnet_sdio.c new file mode 100644 index 00000000..f63d9396 --- /dev/null +++ b/drivers/usb/gadget/f_rmnet_sdio.c @@ -0,0 +1,1535 @@ +/* + * f_rmnet_sdio.c -- RmNet SDIO function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 Nokia Corporation + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_RMNET_SDIO_CTL_CHANNEL +static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL; +#else +static uint32_t rmnet_sdio_ctl_ch; +#endif +module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO); +MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID"); + +#ifdef CONFIG_RMNET_SDIO_DATA_CHANNEL +static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL; +#else +static uint32_t rmnet_sdio_data_ch; +#endif +module_param(rmnet_sdio_data_ch, uint, S_IRUGO); +MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID"); + +#define ACM_CTRL_DTR (1 << 0) + +#define SDIO_MUX_HDR 8 +#define RMNET_SDIO_NOTIFY_INTERVAL 5 +#define RMNET_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification) + +#define RMNET_SDIO_RX_REQ_MAX 16 +#define RMNET_SDIO_RX_REQ_SIZE 2048 +#define RMNET_SDIO_TX_REQ_MAX 200 + +#define TX_PKT_DROP_THRESHOLD 1000 +#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000 +#define RX_PKT_FLOW_CTRL_DISABLE 500 + +unsigned int sdio_tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD; +module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR); + +unsigned int sdio_rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD; +module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); + +unsigned int sdio_rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE; +module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); + +/* QMI requests & responses buffer*/ +struct rmnet_sdio_qmi_buf { + void *buf; + int len; + struct list_head list; +}; + +struct rmnet_sdio_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + + struct usb_ep *epout; + struct usb_ep *epin; + struct usb_ep *epnotify; + struct usb_request *notify_req; + + u8 ifc_id; + /* QMI lists */ + struct list_head qmi_req_q; + unsigned int qreq_q_len; + struct list_head qmi_resp_q; + unsigned int qresp_q_len; + /* Tx/Rx lists */ + struct list_head tx_idle; + unsigned int tx_idle_len; + struct sk_buff_head tx_skb_queue; + struct list_head rx_idle; + unsigned int rx_idle_len; + struct sk_buff_head rx_skb_queue; + + spinlock_t lock; + atomic_t online; + atomic_t notify_count; + + struct workqueue_struct *wq; + struct work_struct disconnect_work; + + struct work_struct ctl_rx_work; + struct work_struct data_rx_work; + + struct delayed_work sdio_open_work; + struct work_struct sdio_close_work; +#define RMNET_SDIO_CH_OPEN 1 + unsigned long data_ch_status; + unsigned long ctrl_ch_status; + + unsigned int dpkts_pending_atdmux; + int cbits_to_modem; + struct work_struct set_modem_ctl_bits_work; + + /* pkt logging dpkt - data pkt; cpkt - control pkt*/ + struct dentry *dent; + unsigned long dpkt_tolaptop; + unsigned long dpkt_tomodem; + unsigned long tx_drp_cnt; + unsigned long cpkt_tolaptop; + unsigned long cpkt_tomodem; +}; + +static struct usb_interface_descriptor rmnet_sdio_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* Full speed support */ +static struct usb_endpoint_descriptor rmnet_sdio_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE), + .bInterval = 1 << RMNET_SDIO_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor rmnet_sdio_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor rmnet_sdio_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *rmnet_sdio_fs_function[] = { + (struct usb_descriptor_header *) &rmnet_sdio_interface_desc, + (struct usb_descriptor_header *) &rmnet_sdio_fs_notify_desc, + (struct usb_descriptor_header *) &rmnet_sdio_fs_in_desc, + (struct usb_descriptor_header *) &rmnet_sdio_fs_out_desc, + NULL, +}; + +/* High speed support */ +static struct usb_endpoint_descriptor rmnet_sdio_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE), + .bInterval = RMNET_SDIO_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor rmnet_sdio_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor rmnet_sdio_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *rmnet_sdio_hs_function[] = { + (struct usb_descriptor_header *) &rmnet_sdio_interface_desc, + (struct usb_descriptor_header *) &rmnet_sdio_hs_notify_desc, + (struct usb_descriptor_header *) &rmnet_sdio_hs_in_desc, + (struct usb_descriptor_header *) &rmnet_sdio_hs_out_desc, + NULL, +}; + +/* String descriptors */ + +static struct usb_string rmnet_sdio_string_defs[] = { + [0].s = "QMI RmNet", + { } /* end of list */ +}; + +static struct usb_gadget_strings rmnet_sdio_string_table = { + .language = 0x0409, /* en-us */ + .strings = rmnet_sdio_string_defs, +}; + +static struct usb_gadget_strings *rmnet_sdio_strings[] = { + &rmnet_sdio_string_table, + NULL, +}; + +static struct rmnet_sdio_qmi_buf * +rmnet_sdio_alloc_qmi(unsigned len, gfp_t kmalloc_flags) + +{ + struct rmnet_sdio_qmi_buf *qmi; + + qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags); + if (qmi != NULL) { + qmi->buf = kmalloc(len, kmalloc_flags); + if (qmi->buf == NULL) { + kfree(qmi); + qmi = NULL; + } + } + + return qmi ? qmi : ERR_PTR(-ENOMEM); +} + +static void rmnet_sdio_free_qmi(struct rmnet_sdio_qmi_buf *qmi) +{ + kfree(qmi->buf); + kfree(qmi); +} +/* + * Allocate a usb_request and its buffer. Returns a pointer to the + * usb_request or a pointer with an error code if there is an error. + */ +static struct usb_request * +rmnet_sdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (len && req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + req = NULL; + } + } + + return req ? req : ERR_PTR(-ENOMEM); +} + +/* + * Free a usb_request and its buffer. + */ +static void rmnet_sdio_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static void rmnet_sdio_notify_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&dev->notify_count, 0); + break; + default: + ERROR(cdev, "rmnet notifyep error %d\n", status); + /* FALLTHROUGH */ + case 0: + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + /* handle multiple pending QMI_RESPONSE_AVAILABLE + * notifications by resending until we're done + */ + if (atomic_dec_and_test(&dev->notify_count)) + break; + + status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enq error %d\n", status); + } + break; + } +} + +static void rmnet_sdio_qmi_resp_available(struct rmnet_sdio_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_cdc_notification *event; + int status; + unsigned long flags; + + /* Response will be sent later */ + if (atomic_inc_return(&dev->notify_count) != 1) + return; + + spin_lock_irqsave(&dev->lock, flags); + + if (!atomic_read(&dev->online)) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + event = dev->notify_req->buf; + + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + spin_unlock_irqrestore(&dev->lock, flags); + + status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC); + if (status < 0) { + if (atomic_read(&dev->online)) + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enqueue error %d\n", status); + } +} + +#define SDIO_MAX_CTRL_PKT_SIZE 4096 +static void rmnet_sdio_ctl_receive_cb(void *data, int size, void *priv) +{ + struct rmnet_sdio_dev *dev = priv; + struct usb_composite_dev *cdev = dev->cdev; + struct rmnet_sdio_qmi_buf *qmi_resp; + unsigned long flags; + + if (!data) { + pr_info("%s: cmux_ch close event\n", __func__); + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) && + test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + queue_work(dev->wq, &dev->sdio_close_work); + } + return; + } + + if (!size || !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + + if (size > SDIO_MAX_CTRL_PKT_SIZE) { + ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n", + size, SDIO_MAX_CTRL_PKT_SIZE); + return; + } + + if (!atomic_read(&dev->online)) { + DBG(cdev, "USB disconnected\n"); + return; + } + + qmi_resp = rmnet_sdio_alloc_qmi(size, GFP_KERNEL); + if (IS_ERR(qmi_resp)) { + DBG(cdev, "unable to allocate memory for QMI resp\n"); + return; + } + memcpy(qmi_resp->buf, data, size); + qmi_resp->len = size; + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&qmi_resp->list, &dev->qmi_resp_q); + dev->qresp_q_len++; + spin_unlock_irqrestore(&dev->lock, flags); + + rmnet_sdio_qmi_resp_available(dev); +} + +static void rmnet_sdio_ctl_write_done(void *data, int size, void *priv) +{ + struct rmnet_sdio_dev *dev = priv; + struct usb_composite_dev *cdev = dev->cdev; + + VDBG(cdev, "rmnet control write done = %d bytes\n", size); +} + +static void rmnet_sdio_sts_callback(int id, void *priv) +{ + struct rmnet_sdio_dev *dev = priv; + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "rmnet_sdio_sts_callback: id: %d\n", id); +} + +static void rmnet_sdio_control_rx_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev, + ctl_rx_work); + struct usb_composite_dev *cdev = dev->cdev; + struct rmnet_sdio_qmi_buf *qmi_req; + unsigned long flags; + int ret; + + while (1) { + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(&dev->qmi_req_q)) + goto unlock; + + qmi_req = list_first_entry(&dev->qmi_req_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&qmi_req->list); + dev->qreq_q_len--; + spin_unlock_irqrestore(&dev->lock, flags); + + ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf, + qmi_req->len); + if (ret != qmi_req->len) { + ERROR(cdev, "rmnet control SDIO write failed\n"); + return; + } + + dev->cpkt_tomodem++; + + /* + * cmux_write API copies the buffer and gives it to sdio_al. + * Hence freeing the memory before write is completed. + */ + rmnet_sdio_free_qmi(qmi_req); + } +unlock: + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_response_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + + switch (req->status) { + case -ECONNRESET: + case -ESHUTDOWN: + case 0: + return; + default: + INFO(cdev, "rmnet %s response error %d, %d/%d\n", + ep->name, req->status, + req->actual, req->length); + } +} + +static void rmnet_sdio_command_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + struct rmnet_sdio_qmi_buf *qmi_req; + int len = req->actual; + + if (req->status < 0) { + ERROR(cdev, "rmnet command error %d\n", req->status); + return; + } + + /* discard the packet if sdio is not available */ + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + qmi_req = rmnet_sdio_alloc_qmi(len, GFP_ATOMIC); + if (IS_ERR(qmi_req)) { + ERROR(cdev, "unable to allocate memory for QMI req\n"); + return; + } + memcpy(qmi_req->buf, req->buf, len); + qmi_req->len = len; + spin_lock(&dev->lock); + list_add_tail(&qmi_req->list, &dev->qmi_req_q); + dev->qreq_q_len++; + spin_unlock(&dev->lock); + queue_work(dev->wq, &dev->ctl_rx_work); +} + +static int +rmnet_sdio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int ret = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + struct rmnet_sdio_qmi_buf *resp; + + if (!atomic_read(&dev->online)) + return -ENOTCONN; + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + ret = w_length; + req->complete = rmnet_sdio_command_complete; + req->context = dev; + break; + + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value) + goto invalid; + else { + unsigned len; + + spin_lock(&dev->lock); + + if (list_empty(&dev->qmi_resp_q)) { + INFO(cdev, "qmi resp empty " + " req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + spin_unlock(&dev->lock); + goto invalid; + } + + resp = list_first_entry(&dev->qmi_resp_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&resp->list); + dev->qresp_q_len--; + spin_unlock(&dev->lock); + + len = min_t(unsigned, w_length, resp->len); + memcpy(req->buf, resp->buf, len); + ret = len; + req->context = dev; + req->complete = rmnet_sdio_response_complete; + rmnet_sdio_free_qmi(resp); + + /* check if its the right place to add */ + dev->cpkt_tolaptop++; + } + break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: + /* This is a workaround for RmNet and is borrowed from the + * CDC/ACM standard. The host driver will issue the above ACM + * standard request to the RmNet interface in the following + * scenario: Once the network adapter is disabled from device + * manager, the above request will be sent from the qcusbnet + * host driver, with DTR being '0'. Once network adapter is + * enabled from device manager (or during enumeration), the + * request will be sent with DTR being '1'. + */ + if (w_value & ACM_CTRL_DTR) + dev->cbits_to_modem |= TIOCM_DTR; + else + dev->cbits_to_modem &= ~TIOCM_DTR; + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); + + ret = 0; + + break; + default: + +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = (ret < w_length); + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static int +rmnet_sdio_rx_submit(struct rmnet_sdio_dev *dev, struct usb_request *req, + gfp_t gfp_flags) +{ + struct sk_buff *skb; + int retval; + + skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags); + if (skb == NULL) + return -ENOMEM; + skb_reserve(skb, SDIO_MUX_HDR); + + req->buf = skb->data; + req->length = RMNET_SDIO_RX_REQ_SIZE; + req->context = skb; + + retval = usb_ep_queue(dev->epout, req, gfp_flags); + if (retval) + dev_kfree_skb_any(skb); + + return retval; +} + +static void rmnet_sdio_start_rx(struct rmnet_sdio_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + int status; + struct usb_request *req; + unsigned long flags; + + if (!atomic_read(&dev->online)) { + pr_err("%s: USB not connected\n", __func__); + return; + } + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->rx_idle)) { + req = list_first_entry(&dev->rx_idle, struct usb_request, list); + list_del(&req->list); + dev->rx_idle_len--; + + spin_unlock_irqrestore(&dev->lock, flags); + status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC); + spin_lock_irqsave(&dev->lock, flags); + + if (status) { + ERROR(cdev, "rmnet data rx enqueue err %d\n", status); + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + break; + } + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_start_tx(struct rmnet_sdio_dev *dev) +{ + unsigned long flags; + int status; + struct sk_buff *skb; + struct usb_request *req; + struct usb_composite_dev *cdev = dev->cdev; + + if (!atomic_read(&dev->online)) + return; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) + return; + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->tx_idle)) { + skb = __skb_dequeue(&dev->tx_skb_queue); + if (!skb) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + req = list_first_entry(&dev->tx_idle, struct usb_request, list); + req->context = skb; + req->buf = skb->data; + req->length = skb->len; + + list_del(&req->list); + dev->tx_idle_len--; + spin_unlock(&dev->lock); + status = usb_ep_queue(dev->epin, req, GFP_ATOMIC); + spin_lock(&dev->lock); + if (status) { + /* USB still online, queue requests back */ + if (atomic_read(&dev->online)) { + ERROR(cdev, "rmnet tx data enqueue err %d\n", + status); + list_add_tail(&req->list, &dev->tx_idle); + dev->tx_idle_len++; + __skb_queue_head(&dev->tx_skb_queue, skb); + } else { + req->buf = 0; + rmnet_sdio_free_req(dev->epin, req); + dev_kfree_skb_any(skb); + } + break; + } + dev->dpkt_tolaptop++; + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb) +{ + struct rmnet_sdio_dev *dev = priv; + unsigned long flags; + + /* SDIO mux sends NULL SKB when link state changes */ + if (!skb) { + pr_info("%s: dmux_ch close event\n", __func__); + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) && + test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + queue_work(dev->wq, &dev->sdio_close_work); + } + return; + } + + if (!atomic_read(&dev->online)) { + dev_kfree_skb_any(skb); + return; + } + + spin_lock_irqsave(&dev->lock, flags); + + if (dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) { + if (printk_ratelimit()) + pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n", + __func__, dev->tx_drp_cnt); + dev->tx_drp_cnt++; + spin_unlock_irqrestore(&dev->lock, flags); + dev_kfree_skb_any(skb); + return; + } + + __skb_queue_tail(&dev->tx_skb_queue, skb); + spin_unlock_irqrestore(&dev->lock, flags); + + rmnet_sdio_start_tx(dev); +} + +static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb) +{ + struct rmnet_sdio_dev *dev = priv; + + /* SDIO mux sends NULL SKB when link state changes */ + if (!skb) { + pr_info("%s: dmux_ch open event\n", __func__); + queue_delayed_work(dev->wq, &dev->sdio_open_work, 0); + return; + } + + dev_kfree_skb_any(skb); + /* this function is called from + * sdio mux from spin_lock_irqsave + */ + spin_lock(&dev->lock); + dev->dpkts_pending_atdmux--; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) || + dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) { + spin_unlock(&dev->lock); + return; + } + spin_unlock(&dev->lock); + + rmnet_sdio_start_rx(dev); +} + +static void rmnet_sdio_data_rx_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev, + data_rx_work); + struct usb_composite_dev *cdev = dev->cdev; + struct sk_buff *skb; + int ret; + unsigned long flags; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + pr_info("%s: sdio data ch not open\n", __func__); + return; + } + + spin_lock_irqsave(&dev->lock, flags); + while ((skb = __skb_dequeue(&dev->rx_skb_queue))) { + spin_unlock_irqrestore(&dev->lock, flags); + ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb); + spin_lock_irqsave(&dev->lock, flags); + if (ret < 0) { + ERROR(cdev, "rmnet SDIO data write failed\n"); + dev_kfree_skb_any(skb); + break; + } else { + dev->dpkt_tomodem++; + dev->dpkts_pending_atdmux++; + } + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_complete_epout(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = ep->driver_data; + struct usb_composite_dev *cdev = dev->cdev; + struct sk_buff *skb = req->context; + int status = req->status; + int queue = 0; + + switch (status) { + case 0: + /* successful completion */ + skb_put(skb, req->actual); + queue = 1; + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + dev_kfree_skb_any(skb); + req->buf = 0; + rmnet_sdio_free_req(ep, req); + return; + default: + /* unexpected failure */ + ERROR(cdev, "RMNET %s response error %d, %d/%d\n", + ep->name, status, + req->actual, req->length); + dev_kfree_skb_any(skb); + break; + } + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + pr_info("%s: sdio data ch not open\n", __func__); + dev_kfree_skb_any(skb); + req->buf = 0; + rmnet_sdio_free_req(ep, req); + return; + } + + spin_lock(&dev->lock); + if (queue) { + __skb_queue_tail(&dev->rx_skb_queue, skb); + queue_work(dev->wq, &dev->data_rx_work); + } + + if (dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) { + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + spin_unlock(&dev->lock); + return; + } + spin_unlock(&dev->lock); + + status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC); + if (status) { + ERROR(cdev, "rmnet data rx enqueue err %d\n", status); + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + } +} + +static void rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = ep->driver_data; + struct sk_buff *skb = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + + switch (status) { + case 0: + /* successful completion */ + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + break; + default: + ERROR(cdev, "rmnet data tx ep error %d\n", status); + break; + } + + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->tx_idle); + dev->tx_idle_len++; + spin_unlock(&dev->lock); + dev_kfree_skb_any(skb); + + rmnet_sdio_start_tx(dev); +} + +static void rmnet_sdio_free_buf(struct rmnet_sdio_dev *dev) +{ + struct rmnet_sdio_qmi_buf *qmi; + struct usb_request *req; + struct list_head *act, *tmp; + struct sk_buff *skb; + unsigned long flags; + + + spin_lock_irqsave(&dev->lock, flags); + + dev->dpkt_tolaptop = 0; + dev->dpkt_tomodem = 0; + dev->cpkt_tolaptop = 0; + dev->cpkt_tomodem = 0; + dev->dpkts_pending_atdmux = 0; + dev->tx_drp_cnt = 0; + + /* free all usb requests in tx pool */ + list_for_each_safe(act, tmp, &dev->tx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + dev->tx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epout, req); + } + + /* free all usb requests in rx pool */ + list_for_each_safe(act, tmp, &dev->rx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + dev->rx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epin, req); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_req_q) { + qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qreq_q_len--; + rmnet_sdio_free_qmi(qmi); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_resp_q) { + qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qresp_q_len--; + rmnet_sdio_free_qmi(qmi); + } + + while ((skb = __skb_dequeue(&dev->tx_skb_queue))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&dev->rx_skb_queue))) + dev_kfree_skb_any(skb); + + rmnet_sdio_free_req(dev->epnotify, dev->notify_req); + + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_set_modem_cbits_w(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev; + + dev = container_of(w, struct rmnet_sdio_dev, set_modem_ctl_bits_work); + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + pr_debug("%s: cbits_to_modem:%d\n", + __func__, dev->cbits_to_modem); + + sdio_cmux_tiocmset(rmnet_sdio_ctl_ch, + dev->cbits_to_modem, + ~dev->cbits_to_modem); +} + +static void rmnet_sdio_disconnect_work(struct work_struct *w) +{ + /* REVISIT: Push all the data to sdio if anythign is pending */ +} +static void rmnet_sdio_suspend(struct usb_function *f) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + + if (!atomic_read(&dev->online)) + return; + /* This is a workaround for Windows Host bug during suspend. + * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended. + * Since it is not beind done, Hence exclusively dropping the DTR + * from function driver suspend. + */ + dev->cbits_to_modem &= ~TIOCM_DTR; + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); +} +static void rmnet_sdio_disable(struct usb_function *f) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + + if (!atomic_read(&dev->online)) + return; + + usb_ep_disable(dev->epnotify); + usb_ep_disable(dev->epout); + usb_ep_disable(dev->epin); + + atomic_set(&dev->online, 0); + atomic_set(&dev->notify_count, 0); + rmnet_sdio_free_buf(dev); + + /* cleanup work */ + queue_work(dev->wq, &dev->disconnect_work); + dev->cbits_to_modem = 0; + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); +} + +static void rmnet_close_sdio_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev; + unsigned long flags; + struct usb_cdc_notification *event; + int status; + struct rmnet_sdio_qmi_buf *qmi; + struct usb_request *req; + struct sk_buff *skb; + + pr_debug("%s:\n", __func__); + + dev = container_of(w, struct rmnet_sdio_dev, sdio_close_work); + + if (!atomic_read(&dev->online)) + return; + + usb_ep_fifo_flush(dev->epnotify); + + spin_lock_irqsave(&dev->lock, flags); + event = dev->notify_req->buf; + + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + spin_unlock_irqrestore(&dev->lock, flags); + + status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_KERNEL); + if (status < 0) { + if (!atomic_read(&dev->online)) + return; + pr_err("%s: rmnet notify ep enqueue error %d\n", + __func__, status); + } + + usb_ep_fifo_flush(dev->epout); + usb_ep_fifo_flush(dev->epin); + cancel_work_sync(&dev->data_rx_work); + + spin_lock_irqsave(&dev->lock, flags); + + if (!atomic_read(&dev->online)) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + /* free all usb requests in tx pool */ + while (!list_empty(&dev->tx_idle)) { + req = list_first_entry(&dev->tx_idle, struct usb_request, list); + list_del(&req->list); + dev->tx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epout, req); + } + + /* free all usb requests in rx pool */ + while (!list_empty(&dev->rx_idle)) { + req = list_first_entry(&dev->rx_idle, struct usb_request, list); + list_del(&req->list); + dev->rx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epin, req); + } + + /* free all buffers in qmi request pool */ + while (!list_empty(&dev->qmi_req_q)) { + qmi = list_first_entry(&dev->qmi_req_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qreq_q_len--; + rmnet_sdio_free_qmi(qmi); + } + + /* free all buffers in qmi response pool */ + while (!list_empty(&dev->qmi_resp_q)) { + qmi = list_first_entry(&dev->qmi_resp_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qresp_q_len--; + rmnet_sdio_free_qmi(qmi); + } + atomic_set(&dev->notify_count, 0); + + pr_info("%s: setting notify count to zero\n", __func__); + + + while ((skb = __skb_dequeue(&dev->tx_skb_queue))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&dev->rx_skb_queue))) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&dev->lock, flags); +} + +static int rmnet_sdio_start_io(struct rmnet_sdio_dev *dev) +{ + struct usb_request *req; + int ret, i; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (!atomic_read(&dev->online)) { + spin_unlock_irqrestore(&dev->lock, flags); + return 0; + } + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) || + !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + spin_unlock_irqrestore(&dev->lock, flags); + return 0; + } + + for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) { + req = rmnet_sdio_alloc_req(dev->epout, 0, GFP_ATOMIC); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + spin_unlock_irqrestore(&dev->lock, flags); + goto free_buf; + } + req->complete = rmnet_sdio_complete_epout; + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + } + for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) { + req = rmnet_sdio_alloc_req(dev->epin, 0, GFP_ATOMIC); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + spin_unlock_irqrestore(&dev->lock, flags); + goto free_buf; + } + req->complete = rmnet_sdio_complete_epin; + list_add_tail(&req->list, &dev->tx_idle); + dev->tx_idle_len++; + } + spin_unlock_irqrestore(&dev->lock, flags); + + /* Queue Rx data requests */ + rmnet_sdio_start_rx(dev); + + return 0; + +free_buf: + rmnet_sdio_free_buf(dev); + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + return ret; +} + + +#define RMNET_SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000) +#define SDIO_SDIO_OPEN_MAX_RETRY 90 +static void rmnet_open_sdio_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev = + container_of(w, struct rmnet_sdio_dev, + sdio_open_work.work); + struct usb_composite_dev *cdev = dev->cdev; + int ret; + static int retry_cnt; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + /* Control channel for QMI messages */ + ret = sdio_cmux_open(rmnet_sdio_ctl_ch, + rmnet_sdio_ctl_receive_cb, + rmnet_sdio_ctl_write_done, + rmnet_sdio_sts_callback, dev); + if (!ret) + set_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + } + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + /* Data channel for network packets */ + ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev, + rmnet_sdio_data_receive_cb, + rmnet_sdio_data_write_done); + if (!ret) + set_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + } + + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) && + test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + + rmnet_sdio_start_io(dev); + + /* if usb cable is connected, update DTR status to modem */ + if (atomic_read(&dev->online)) + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); + + pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n", + __func__, retry_cnt); + retry_cnt = 0; + return; + } + + retry_cnt++; + pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n", + __func__, retry_cnt); + + if (retry_cnt > SDIO_SDIO_OPEN_MAX_RETRY) { + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + ERROR(cdev, "Unable to open control SDIO channel\n"); + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) + ERROR(cdev, "Unable to open DATA SDIO channel\n"); + + } else { + queue_delayed_work(dev->wq, &dev->sdio_open_work, + RMNET_SDIO_OPEN_RETRY_DELAY); + } +} + +static int rmnet_sdio_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + struct usb_composite_dev *cdev = dev->cdev; + int ret = 0; + + dev->epin->driver_data = dev; + usb_ep_enable(dev->epin, ep_choose(cdev->gadget, + &rmnet_sdio_hs_in_desc, + &rmnet_sdio_fs_in_desc)); + dev->epout->driver_data = dev; + usb_ep_enable(dev->epout, ep_choose(cdev->gadget, + &rmnet_sdio_hs_out_desc, + &rmnet_sdio_fs_out_desc)); + usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget, + &rmnet_sdio_hs_notify_desc, + &rmnet_sdio_fs_notify_desc)); + + /* allocate notification */ + dev->notify_req = rmnet_sdio_alloc_req(dev->epnotify, + RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC); + + if (IS_ERR(dev->notify_req)) { + ret = PTR_ERR(dev->notify_req); + pr_err("%s: unable to allocate memory for notify ep\n", + __func__); + return ret; + } + dev->notify_req->complete = rmnet_sdio_notify_complete; + dev->notify_req->context = dev; + dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE; + + atomic_set(&dev->online, 1); + + ret = rmnet_sdio_start_io(dev); + + return ret; + +} + +static int rmnet_sdio_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + int id; + struct usb_ep *ep; + + dev->cdev = cdev; + + /* allocate interface ID */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + dev->ifc_id = id; + rmnet_sdio_interface_desc.bInterfaceNumber = id; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_in_desc); + if (!ep) + goto out; + ep->driver_data = cdev; /* claim endpoint */ + dev->epin = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_out_desc); + if (!ep) + goto out; + ep->driver_data = cdev; /* claim endpoint */ + dev->epout = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_notify_desc); + if (!ep) + goto out; + ep->driver_data = cdev; /* claim endpoint */ + dev->epnotify = ep; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + rmnet_sdio_hs_in_desc.bEndpointAddress = + rmnet_sdio_fs_in_desc.bEndpointAddress; + rmnet_sdio_hs_out_desc.bEndpointAddress = + rmnet_sdio_fs_out_desc.bEndpointAddress; + rmnet_sdio_hs_notify_desc.bEndpointAddress = + rmnet_sdio_fs_notify_desc.bEndpointAddress; + } + + queue_delayed_work(dev->wq, &dev->sdio_open_work, 0); + + return 0; + +out: + if (dev->epnotify) + dev->epnotify->driver_data = NULL; + if (dev->epout) + dev->epout->driver_data = NULL; + if (dev->epin) + dev->epin->driver_data = NULL; + + return -ENODEV; +} + +static void +rmnet_sdio_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + + cancel_delayed_work_sync(&dev->sdio_open_work); + destroy_workqueue(dev->wq); + + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + msm_sdio_dmux_close(rmnet_sdio_data_ch); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + } + + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + sdio_cmux_close(rmnet_sdio_ctl_ch); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + } + + debugfs_remove_recursive(dev->dent); + + kfree(dev); +} + +#if defined(CONFIG_DEBUG_FS) +static ssize_t rmnet_sdio_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct rmnet_sdio_dev *dev = file->private_data; + char *buf; + unsigned long flags; + int ret; + + buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock_irqsave(&dev->lock, flags); + ret = scnprintf(buf, PAGE_SIZE, + "-*-DATA-*-\n" + "dpkts_tohost:%lu epInPool:%u tx_size:%u drp_cnt:%lu\n" + "dpkts_tomodem:%lu epOutPool:%u rx_size:%u pending:%u\n" + "-*-QMI-*-\n" + "cpkts_tomodem:%lu qmi_req_q:%u cbits:%d\n" + "cpkts_tolaptop:%lu qmi_resp_q:%u notify_cnt:%d\n" + "-*-MISC-*-\n" + "data_ch_status: %lu ctrl_ch_status: %lu\n", + /* data */ + dev->dpkt_tolaptop, dev->tx_idle_len, + dev->tx_skb_queue.qlen, dev->tx_drp_cnt, + dev->dpkt_tomodem, dev->rx_idle_len, + dev->rx_skb_queue.qlen, dev->dpkts_pending_atdmux, + /* qmi */ + dev->cpkt_tomodem, dev->qreq_q_len, + dev->cbits_to_modem, + dev->cpkt_tolaptop, dev->qresp_q_len, + atomic_read(&dev->notify_count), + /* misc */ + dev->data_ch_status, dev->ctrl_ch_status); + + spin_unlock_irqrestore(&dev->lock, flags); + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret); + + kfree(buf); + + return ret; +} + +static ssize_t rmnet_sdio_reset_stats(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct rmnet_sdio_dev *dev = file->private_data; + + dev->dpkt_tolaptop = 0; + dev->dpkt_tomodem = 0; + dev->cpkt_tolaptop = 0; + dev->cpkt_tomodem = 0; + dev->dpkts_pending_atdmux = 0; + dev->tx_drp_cnt = 0; + + /* TBD: How do we reset skb qlen + * it might have side effects + */ + + return count; +} + +static int debug_rmnet_sdio_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +const struct file_operations debug_rmnet_sdio_stats_ops = { + .open = debug_rmnet_sdio_open, + .read = rmnet_sdio_read_stats, + .write = rmnet_sdio_reset_stats, +}; + +static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev) +{ + dev->dent = debugfs_create_dir("usb_rmnet_sdio", 0); + if (IS_ERR(dev->dent)) + return; + + debugfs_create_file("status", 0444, dev->dent, dev, + &debug_rmnet_sdio_stats_ops); +} +#else +static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev) +{ + return; +} +#endif + +int rmnet_sdio_function_add(struct usb_configuration *c) +{ + struct rmnet_sdio_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->wq = create_singlethread_workqueue("k_rmnet_work"); + if (!dev->wq) { + ret = -ENOMEM; + goto free_dev; + } + + spin_lock_init(&dev->lock); + atomic_set(&dev->notify_count, 0); + atomic_set(&dev->online, 0); + + INIT_WORK(&dev->disconnect_work, rmnet_sdio_disconnect_work); + INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_sdio_set_modem_cbits_w); + + INIT_WORK(&dev->ctl_rx_work, rmnet_sdio_control_rx_work); + INIT_WORK(&dev->data_rx_work, rmnet_sdio_data_rx_work); + + INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work); + INIT_WORK(&dev->sdio_close_work, rmnet_close_sdio_work); + + INIT_LIST_HEAD(&dev->qmi_req_q); + INIT_LIST_HEAD(&dev->qmi_resp_q); + + INIT_LIST_HEAD(&dev->rx_idle); + INIT_LIST_HEAD(&dev->tx_idle); + skb_queue_head_init(&dev->tx_skb_queue); + skb_queue_head_init(&dev->rx_skb_queue); + + dev->function.name = "rmnet_sdio"; + dev->function.strings = rmnet_sdio_strings; + dev->function.descriptors = rmnet_sdio_fs_function; + dev->function.hs_descriptors = rmnet_sdio_hs_function; + dev->function.bind = rmnet_sdio_bind; + dev->function.unbind = rmnet_sdio_unbind; + dev->function.setup = rmnet_sdio_setup; + dev->function.set_alt = rmnet_sdio_set_alt; + dev->function.disable = rmnet_sdio_disable; + dev->function.suspend = rmnet_sdio_suspend; + + ret = usb_add_function(c, &dev->function); + if (ret) + goto free_wq; + + rmnet_sdio_debugfs_init(dev); + + return 0; + +free_wq: + destroy_workqueue(dev->wq); +free_dev: + kfree(dev); + + return ret; +} diff --git a/drivers/usb/gadget/f_rmnet_smd.c b/drivers/usb/gadget/f_rmnet_smd.c new file mode 100644 index 00000000..2049dc01 --- /dev/null +++ b/drivers/usb/gadget/f_rmnet_smd.c @@ -0,0 +1,1368 @@ +/* + * f_rmnet.c -- RmNet function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 Nokia Corporation + * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "gadget_chips.h" + +#ifndef CONFIG_MSM_SMD +#define CONFIG_RMNET_SMD_CTL_CHANNEL "" +#define CONFIG_RMNET_SMD_DATA_CHANNEL "" +#endif + +static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL; +module_param(rmnet_ctl_ch, charp, S_IRUGO); +MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel"); + +static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL; +module_param(rmnet_data_ch, charp, S_IRUGO); +MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel"); + +#define RMNET_SMD_ACM_CTRL_DTR (1 << 0) + +#define RMNET_SMD_NOTIFY_INTERVAL 5 +#define RMNET_SMD_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification) + +#define QMI_REQ_MAX 4 +#define QMI_REQ_SIZE 2048 +#define QMI_RESP_MAX 8 +#define QMI_RESP_SIZE 2048 + +#define RMNET_RX_REQ_MAX 8 +#define RMNET_RX_REQ_SIZE 2048 +#define RMNET_TX_REQ_MAX 8 +#define RMNET_TX_REQ_SIZE 2048 + +#define RMNET_TXN_MAX 2048 + +/* QMI requests & responses buffer*/ +struct qmi_buf { + void *buf; + int len; + struct list_head list; +}; + +/* Control & data SMD channel private data */ +struct rmnet_smd_ch_info { + struct smd_channel *ch; + struct tasklet_struct tx_tlet; + struct tasklet_struct rx_tlet; +#define CH_OPENED 0 + unsigned long flags; + /* pending rx packet length */ + atomic_t rx_pkt; + /* wait for smd open event*/ + wait_queue_head_t wait; +}; + +struct rmnet_smd_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + + struct usb_ep *epout; + struct usb_ep *epin; + struct usb_ep *epnotify; + struct usb_request *notify_req; + + u8 ifc_id; + /* QMI lists */ + struct list_head qmi_req_pool; + struct list_head qmi_resp_pool; + struct list_head qmi_req_q; + struct list_head qmi_resp_q; + /* Tx/Rx lists */ + struct list_head tx_idle; + struct list_head rx_idle; + struct list_head rx_queue; + + spinlock_t lock; + atomic_t online; + atomic_t notify_count; + + struct platform_driver pdrv; + u8 is_pdrv_used; + struct rmnet_smd_ch_info smd_ctl; + struct rmnet_smd_ch_info smd_data; + + struct workqueue_struct *wq; + struct work_struct connect_work; + struct work_struct disconnect_work; + + unsigned long dpkts_to_host; + unsigned long dpkts_from_modem; + unsigned long dpkts_from_host; + unsigned long dpkts_to_modem; + + unsigned long cpkts_to_host; + unsigned long cpkts_from_modem; + unsigned long cpkts_from_host; + unsigned long cpkts_to_modem; +}; + +static struct rmnet_smd_dev *rmnet_smd; + +static struct usb_interface_descriptor rmnet_smd_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* Full speed support */ +static struct usb_endpoint_descriptor rmnet_smd_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16( + RMNET_SMD_MAX_NOTIFY_SIZE), + .bInterval = 1 << RMNET_SMD_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor rmnet_smd_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor rmnet_smd_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *rmnet_smd_fs_function[] = { + (struct usb_descriptor_header *) &rmnet_smd_interface_desc, + (struct usb_descriptor_header *) &rmnet_smd_fs_notify_desc, + (struct usb_descriptor_header *) &rmnet_smd_fs_in_desc, + (struct usb_descriptor_header *) &rmnet_smd_fs_out_desc, + NULL, +}; + +/* High speed support */ +static struct usb_endpoint_descriptor rmnet_smd_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16( + RMNET_SMD_MAX_NOTIFY_SIZE), + .bInterval = RMNET_SMD_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor rmnet_smd_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor rmnet_smd_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *rmnet_smd_hs_function[] = { + (struct usb_descriptor_header *) &rmnet_smd_interface_desc, + (struct usb_descriptor_header *) &rmnet_smd_hs_notify_desc, + (struct usb_descriptor_header *) &rmnet_smd_hs_in_desc, + (struct usb_descriptor_header *) &rmnet_smd_hs_out_desc, + NULL, +}; + +/* String descriptors */ + +static struct usb_string rmnet_smd_string_defs[] = { + [0].s = "QMI RmNet", + { } /* end of list */ +}; + +static struct usb_gadget_strings rmnet_smd_string_table = { + .language = 0x0409, /* en-us */ + .strings = rmnet_smd_string_defs, +}; + +static struct usb_gadget_strings *rmnet_smd_strings[] = { + &rmnet_smd_string_table, + NULL, +}; + +static struct qmi_buf * +rmnet_smd_alloc_qmi(unsigned len, gfp_t kmalloc_flags) +{ + struct qmi_buf *qmi; + + qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags); + if (qmi != NULL) { + qmi->buf = kmalloc(len, kmalloc_flags); + if (qmi->buf == NULL) { + kfree(qmi); + qmi = NULL; + } + } + + return qmi ? qmi : ERR_PTR(-ENOMEM); +} + +static void rmnet_smd_free_qmi(struct qmi_buf *qmi) +{ + kfree(qmi->buf); + kfree(qmi); +} +/* + * Allocate a usb_request and its buffer. Returns a pointer to the + * usb_request or a error code if there is an error. + */ +static struct usb_request * +rmnet_smd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + req = NULL; + } + } + + return req ? req : ERR_PTR(-ENOMEM); +} + +/* + * Free a usb_request and its buffer. + */ +static void rmnet_smd_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static void rmnet_smd_notify_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&dev->notify_count, 0); + break; + default: + ERROR(cdev, "rmnet notify ep error %d\n", status); + /* FALLTHROUGH */ + case 0: + if (ep != dev->epnotify) + break; + + /* handle multiple pending QMI_RESPONSE_AVAILABLE + * notifications by resending until we're done + */ + if (atomic_dec_and_test(&dev->notify_count)) + break; + + status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enqueue error %d\n", + status); + } + break; + } +} + +static void qmi_smd_response_available(struct rmnet_smd_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = dev->notify_req; + struct usb_cdc_notification *event = req->buf; + int status; + + /* Response will be sent later */ + if (atomic_inc_return(&dev->notify_count) != 1) + return; + + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + + status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC); + if (status < 0) { + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enqueue error %d\n", status); + } +} + +/* TODO + * handle modem restart events + */ +static void rmnet_smd_event_notify(void *priv, unsigned event) +{ + struct rmnet_smd_ch_info *smd_info = priv; + int len = atomic_read(&smd_info->rx_pkt); + struct rmnet_smd_dev *dev = + (struct rmnet_smd_dev *) smd_info->tx_tlet.data; + + switch (event) { + case SMD_EVENT_DATA: { + if (!atomic_read(&dev->online)) + break; + if (len && (smd_write_avail(smd_info->ch) >= len)) + tasklet_schedule(&smd_info->rx_tlet); + + if (smd_read_avail(smd_info->ch)) + tasklet_schedule(&smd_info->tx_tlet); + + break; + } + case SMD_EVENT_OPEN: + /* usb endpoints are not enabled untill smd channels + * are opened. wake up worker thread to continue + * connection processing + */ + set_bit(CH_OPENED, &smd_info->flags); + wake_up(&smd_info->wait); + break; + case SMD_EVENT_CLOSE: + /* We will never come here. + * reset flags after closing smd channel + * */ + clear_bit(CH_OPENED, &smd_info->flags); + break; + } +} + +static void rmnet_control_tx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct qmi_buf *qmi_resp; + int sz; + unsigned long flags; + + while (1) { + sz = smd_cur_packet_size(dev->smd_ctl.ch); + if (sz == 0) + break; + if (smd_read_avail(dev->smd_ctl.ch) < sz) + break; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(&dev->qmi_resp_pool)) { + ERROR(cdev, "rmnet QMI Tx buffers full\n"); + spin_unlock_irqrestore(&dev->lock, flags); + break; + } + qmi_resp = list_first_entry(&dev->qmi_resp_pool, + struct qmi_buf, list); + list_del(&qmi_resp->list); + spin_unlock_irqrestore(&dev->lock, flags); + + qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz); + + spin_lock_irqsave(&dev->lock, flags); + dev->cpkts_from_modem++; + list_add_tail(&qmi_resp->list, &dev->qmi_resp_q); + spin_unlock_irqrestore(&dev->lock, flags); + + qmi_smd_response_available(dev); + } + +} + +static void rmnet_control_rx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct qmi_buf *qmi_req; + int ret; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + while (1) { + + if (list_empty(&dev->qmi_req_q)) { + atomic_set(&dev->smd_ctl.rx_pkt, 0); + break; + } + qmi_req = list_first_entry(&dev->qmi_req_q, + struct qmi_buf, list); + if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) { + atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len); + DBG(cdev, "rmnet control smd channel full\n"); + break; + } + + list_del(&qmi_req->list); + dev->cpkts_from_host++; + spin_unlock_irqrestore(&dev->lock, flags); + ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len); + spin_lock_irqsave(&dev->lock, flags); + if (ret != qmi_req->len) { + ERROR(cdev, "rmnet control smd write failed\n"); + break; + } + dev->cpkts_to_modem++; + list_add_tail(&qmi_req->list, &dev->qmi_req_pool); + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_smd_command_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + struct qmi_buf *qmi_req; + int ret; + + if (req->status < 0) { + ERROR(cdev, "rmnet command error %d\n", req->status); + return; + } + + spin_lock(&dev->lock); + dev->cpkts_from_host++; + /* no pending control rx packet */ + if (!atomic_read(&dev->smd_ctl.rx_pkt)) { + if (smd_write_avail(dev->smd_ctl.ch) < req->actual) { + atomic_set(&dev->smd_ctl.rx_pkt, req->actual); + goto queue_req; + } + spin_unlock(&dev->lock); + ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual); + /* This should never happen */ + if (ret != req->actual) + ERROR(cdev, "rmnet control smd write failed\n"); + spin_lock(&dev->lock); + dev->cpkts_to_modem++; + spin_unlock(&dev->lock); + return; + } +queue_req: + if (list_empty(&dev->qmi_req_pool)) { + spin_unlock(&dev->lock); + ERROR(cdev, "rmnet QMI pool is empty\n"); + return; + } + + qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list); + list_del(&qmi_req->list); + spin_unlock(&dev->lock); + memcpy(qmi_req->buf, req->buf, req->actual); + qmi_req->len = req->actual; + spin_lock(&dev->lock); + list_add_tail(&qmi_req->list, &dev->qmi_req_q); + spin_unlock(&dev->lock); +} +static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + + spin_lock(&dev->lock); + dev->cpkts_to_host++; + spin_unlock(&dev->lock); +} + +static int +rmnet_smd_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int ret = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + struct qmi_buf *resp; + int schedule = 0; + + if (!atomic_read(&dev->online)) + return -ENOTCONN; + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + ret = w_length; + req->complete = rmnet_smd_command_complete; + req->context = dev; + break; + + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value) + goto invalid; + else { + spin_lock(&dev->lock); + if (list_empty(&dev->qmi_resp_q)) { + INFO(cdev, "qmi resp empty " + " req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + spin_unlock(&dev->lock); + goto invalid; + } + resp = list_first_entry(&dev->qmi_resp_q, + struct qmi_buf, list); + list_del(&resp->list); + spin_unlock(&dev->lock); + memcpy(req->buf, resp->buf, resp->len); + ret = resp->len; + spin_lock(&dev->lock); + + if (list_empty(&dev->qmi_resp_pool)) + schedule = 1; + list_add_tail(&resp->list, &dev->qmi_resp_pool); + + if (schedule) + tasklet_schedule(&dev->smd_ctl.tx_tlet); + spin_unlock(&dev->lock); + req->complete = rmnet_txcommand_complete; + req->context = dev; + } + break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: + /* This is a workaround for RmNet and is borrowed from the + * CDC/ACM standard. The host driver will issue the above ACM + * standard request to the RmNet interface in the following + * scenario: Once the network adapter is disabled from device + * manager, the above request will be sent from the qcusbnet + * host driver, with DTR being '0'. Once network adapter is + * enabled from device manager (or during enumeration), the + * request will be sent with DTR being '1'. + */ + if (w_value & RMNET_SMD_ACM_CTRL_DTR) + ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0); + else + ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR); + + break; + default: + +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static void rmnet_smd_start_rx(struct rmnet_smd_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + int status; + struct usb_request *req; + struct list_head *pool = &dev->rx_idle; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(pool)) { + req = list_entry(pool->next, struct usb_request, list); + list_del(&req->list); + + spin_unlock_irqrestore(&dev->lock, flags); + status = usb_ep_queue(dev->epout, req, GFP_ATOMIC); + spin_lock_irqsave(&dev->lock, flags); + + if (status) { + ERROR(cdev, "rmnet data rx enqueue err %d\n", status); + list_add_tail(&req->list, pool); + break; + } + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_data_tx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + int status; + int sz; + unsigned long flags; + + while (1) { + + sz = smd_cur_packet_size(dev->smd_data.ch); + if (sz == 0) + break; + if (smd_read_avail(dev->smd_data.ch) < sz) + break; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(&dev->tx_idle)) { + spin_unlock_irqrestore(&dev->lock, flags); + DBG(cdev, "rmnet data Tx buffers full\n"); + break; + } + req = list_first_entry(&dev->tx_idle, struct usb_request, list); + list_del(&req->list); + spin_unlock_irqrestore(&dev->lock, flags); + + req->length = smd_read(dev->smd_data.ch, req->buf, sz); + status = usb_ep_queue(dev->epin, req, GFP_ATOMIC); + if (status) { + ERROR(cdev, "rmnet tx data enqueue err %d\n", status); + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, &dev->tx_idle); + spin_unlock_irqrestore(&dev->lock, flags); + break; + } + spin_lock_irqsave(&dev->lock, flags); + dev->dpkts_from_modem++; + spin_unlock_irqrestore(&dev->lock, flags); + } + +} + +static void rmnet_data_rx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + int ret; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + while (1) { + if (list_empty(&dev->rx_queue)) { + atomic_set(&dev->smd_data.rx_pkt, 0); + break; + } + req = list_first_entry(&dev->rx_queue, + struct usb_request, list); + if (smd_write_avail(dev->smd_data.ch) < req->actual) { + atomic_set(&dev->smd_data.rx_pkt, req->actual); + DBG(cdev, "rmnet SMD data channel full\n"); + break; + } + + list_del(&req->list); + spin_unlock_irqrestore(&dev->lock, flags); + ret = smd_write(dev->smd_data.ch, req->buf, req->actual); + spin_lock_irqsave(&dev->lock, flags); + if (ret != req->actual) { + ERROR(cdev, "rmnet SMD data write failed\n"); + break; + } + dev->dpkts_to_modem++; + list_add_tail(&req->list, &dev->rx_idle); + } + spin_unlock_irqrestore(&dev->lock, flags); + + /* We have free rx data requests. */ + rmnet_smd_start_rx(dev); +} + +/* If SMD has enough room to accommodate a data rx packet, + * write into SMD directly. Otherwise enqueue to rx_queue. + * We will not write into SMD directly untill rx_queue is + * empty to strictly follow the ordering requests. + */ +static void rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + int ret; + + switch (status) { + case 0: + /* normal completion */ + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->rx_idle); + spin_unlock(&dev->lock); + return; + default: + /* unexpected failure */ + ERROR(cdev, "RMNET %s response error %d, %d/%d\n", + ep->name, status, + req->actual, req->length); + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->rx_idle); + spin_unlock(&dev->lock); + return; + } + + spin_lock(&dev->lock); + dev->dpkts_from_host++; + if (!atomic_read(&dev->smd_data.rx_pkt)) { + if (smd_write_avail(dev->smd_data.ch) < req->actual) { + atomic_set(&dev->smd_data.rx_pkt, req->actual); + goto queue_req; + } + spin_unlock(&dev->lock); + ret = smd_write(dev->smd_data.ch, req->buf, req->actual); + /* This should never happen */ + if (ret != req->actual) + ERROR(cdev, "rmnet data smd write failed\n"); + /* Restart Rx */ + spin_lock(&dev->lock); + dev->dpkts_to_modem++; + list_add_tail(&req->list, &dev->rx_idle); + spin_unlock(&dev->lock); + rmnet_smd_start_rx(dev); + return; + } +queue_req: + list_add_tail(&req->list, &dev->rx_queue); + spin_unlock(&dev->lock); +} + +static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + int schedule = 0; + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->tx_idle); + spin_unlock(&dev->lock); + break; + default: + ERROR(cdev, "rmnet data tx ep error %d\n", status); + /* FALLTHROUGH */ + case 0: + spin_lock(&dev->lock); + if (list_empty(&dev->tx_idle)) + schedule = 1; + list_add_tail(&req->list, &dev->tx_idle); + dev->dpkts_to_host++; + if (schedule) + tasklet_schedule(&dev->smd_data.tx_tlet); + spin_unlock(&dev->lock); + break; + } + +} + +static void rmnet_smd_disconnect_work(struct work_struct *w) +{ + struct qmi_buf *qmi; + struct usb_request *req; + struct list_head *act, *tmp; + struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev, + disconnect_work); + + tasklet_kill(&dev->smd_ctl.rx_tlet); + tasklet_kill(&dev->smd_ctl.tx_tlet); + tasklet_kill(&dev->smd_data.rx_tlet); + tasklet_kill(&dev->smd_data.tx_tlet); + + smd_close(dev->smd_ctl.ch); + dev->smd_ctl.flags = 0; + + smd_close(dev->smd_data.ch); + dev->smd_data.flags = 0; + + atomic_set(&dev->notify_count, 0); + + list_for_each_safe(act, tmp, &dev->rx_queue) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + list_add_tail(&req->list, &dev->rx_idle); + } + + list_for_each_safe(act, tmp, &dev->qmi_req_q) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + list_add_tail(&qmi->list, &dev->qmi_req_pool); + } + + list_for_each_safe(act, tmp, &dev->qmi_resp_q) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + list_add_tail(&qmi->list, &dev->qmi_resp_pool); + } + + if (dev->is_pdrv_used) { + platform_driver_unregister(&dev->pdrv); + dev->is_pdrv_used = 0; + } +} + +/* SMD close may sleep + * schedule a work to close smd channels + */ +static void rmnet_smd_disable(struct usb_function *f) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + + if (!atomic_read(&dev->online)) + return; + + atomic_set(&dev->online, 0); + + usb_ep_fifo_flush(dev->epnotify); + usb_ep_disable(dev->epnotify); + usb_ep_fifo_flush(dev->epout); + usb_ep_disable(dev->epout); + + usb_ep_fifo_flush(dev->epin); + usb_ep_disable(dev->epin); + + /* cleanup work */ + queue_work(dev->wq, &dev->disconnect_work); +} + +static void rmnet_smd_connect_work(struct work_struct *w) +{ + struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev, + connect_work); + struct usb_composite_dev *cdev = dev->cdev; + int ret = 0; + + /* Control channel for QMI messages */ + ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch, + &dev->smd_ctl, rmnet_smd_event_notify); + if (ret) { + ERROR(cdev, "Unable to open control smd channel: %d\n", ret); + /* + * Register platform driver to be notified in case SMD channels + * later becomes ready to be opened. + */ + ret = platform_driver_register(&dev->pdrv); + if (ret) + ERROR(cdev, "Platform driver %s register failed %d\n", + dev->pdrv.driver.name, ret); + else + dev->is_pdrv_used = 1; + + return; + } + wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED, + &dev->smd_ctl.flags)); + + /* Data channel for network packets */ + ret = smd_open(rmnet_data_ch, &dev->smd_data.ch, + &dev->smd_data, rmnet_smd_event_notify); + if (ret) { + ERROR(cdev, "Unable to open data smd channel\n"); + smd_close(dev->smd_ctl.ch); + return; + } + wait_event(dev->smd_data.wait, test_bit(CH_OPENED, + &dev->smd_data.flags)); + + atomic_set(&dev->online, 1); + /* Queue Rx data requests */ + rmnet_smd_start_rx(dev); +} + +static int rmnet_smd_ch_probe(struct platform_device *pdev) +{ + DBG(rmnet_smd->cdev, "Probe called for device: %s\n", pdev->name); + + queue_work(rmnet_smd->wq, &rmnet_smd->connect_work); + + return 0; +} + +/* SMD open may sleep. + * Schedule a work to open smd channels and enable + * endpoints if smd channels are opened successfully. + */ +static int rmnet_smd_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + struct usb_composite_dev *cdev = dev->cdev; + int ret = 0; + + ret = usb_ep_enable(dev->epin, ep_choose(cdev->gadget, + &rmnet_smd_hs_in_desc, + &rmnet_smd_fs_in_desc)); + if (ret) { + ERROR(cdev, "can't enable %s, result %d\n", + dev->epin->name, ret); + return ret; + } + ret = usb_ep_enable(dev->epout, ep_choose(cdev->gadget, + &rmnet_smd_hs_out_desc, + &rmnet_smd_fs_out_desc)); + if (ret) { + ERROR(cdev, "can't enable %s, result %d\n", + dev->epout->name, ret); + usb_ep_disable(dev->epin); + return ret; + } + + ret = usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget, + &rmnet_smd_hs_notify_desc, + &rmnet_smd_fs_notify_desc)); + if (ret) { + ERROR(cdev, "can't enable %s, result %d\n", + dev->epnotify->name, ret); + usb_ep_disable(dev->epin); + usb_ep_disable(dev->epout); + return ret; + } + + queue_work(dev->wq, &dev->connect_work); + return 0; +} + +static void rmnet_smd_free_buf(struct rmnet_smd_dev *dev) +{ + struct qmi_buf *qmi; + struct usb_request *req; + struct list_head *act, *tmp; + + dev->dpkts_to_host = 0; + dev->dpkts_from_modem = 0; + dev->dpkts_from_host = 0; + dev->dpkts_to_modem = 0; + + dev->cpkts_to_host = 0; + dev->cpkts_from_modem = 0; + dev->cpkts_from_host = 0; + dev->cpkts_to_modem = 0; + /* free all usb requests in tx pool */ + list_for_each_safe(act, tmp, &dev->tx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + rmnet_smd_free_req(dev->epout, req); + } + + /* free all usb requests in rx pool */ + list_for_each_safe(act, tmp, &dev->rx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + rmnet_smd_free_req(dev->epin, req); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_req_pool) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + rmnet_smd_free_qmi(qmi); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_resp_pool) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + rmnet_smd_free_qmi(qmi); + } + + rmnet_smd_free_req(dev->epnotify, dev->notify_req); +} +static int rmnet_smd_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + int i, id, ret; + struct qmi_buf *qmi; + struct usb_request *req; + struct usb_ep *ep; + + dev->cdev = cdev; + + /* allocate interface ID */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + dev->ifc_id = id; + rmnet_smd_interface_desc.bInterfaceNumber = id; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_in_desc); + if (!ep) + return -ENODEV; + ep->driver_data = cdev; /* claim endpoint */ + dev->epin = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_out_desc); + if (!ep) + return -ENODEV; + ep->driver_data = cdev; /* claim endpoint */ + dev->epout = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_notify_desc); + if (!ep) + return -ENODEV; + ep->driver_data = cdev; /* clain endpoint */ + dev->epnotify = ep; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + rmnet_smd_hs_in_desc.bEndpointAddress = + rmnet_smd_fs_in_desc.bEndpointAddress; + rmnet_smd_hs_out_desc.bEndpointAddress = + rmnet_smd_fs_out_desc.bEndpointAddress; + rmnet_smd_hs_notify_desc.bEndpointAddress = + rmnet_smd_fs_notify_desc.bEndpointAddress; + + } + + /* allocate notification */ + dev->notify_req = rmnet_smd_alloc_req(dev->epnotify, + RMNET_SMD_MAX_NOTIFY_SIZE, GFP_KERNEL); + if (IS_ERR(dev->notify_req)) + return PTR_ERR(dev->notify_req); + + dev->notify_req->complete = rmnet_smd_notify_complete; + dev->notify_req->context = dev; + dev->notify_req->length = RMNET_SMD_MAX_NOTIFY_SIZE; + + /* Allocate the qmi request and response buffers */ + for (i = 0; i < QMI_REQ_MAX; i++) { + qmi = rmnet_smd_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL); + if (IS_ERR(qmi)) { + ret = PTR_ERR(qmi); + goto free_buf; + } + list_add_tail(&qmi->list, &dev->qmi_req_pool); + } + + for (i = 0; i < QMI_RESP_MAX; i++) { + qmi = rmnet_smd_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL); + if (IS_ERR(qmi)) { + ret = PTR_ERR(qmi); + goto free_buf; + } + list_add_tail(&qmi->list, &dev->qmi_resp_pool); + } + + /* Allocate bulk in/out requests for data transfer */ + for (i = 0; i < RMNET_RX_REQ_MAX; i++) { + req = rmnet_smd_alloc_req(dev->epout, RMNET_RX_REQ_SIZE, + GFP_KERNEL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + goto free_buf; + } + req->length = RMNET_TXN_MAX; + req->context = dev; + req->complete = rmnet_smd_complete_epout; + list_add_tail(&req->list, &dev->rx_idle); + } + + for (i = 0; i < RMNET_TX_REQ_MAX; i++) { + req = rmnet_smd_alloc_req(dev->epin, RMNET_TX_REQ_SIZE, + GFP_KERNEL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + goto free_buf; + } + req->context = dev; + req->complete = rmnet_smd_complete_epin; + list_add_tail(&req->list, &dev->tx_idle); + } + + return 0; + +free_buf: + rmnet_smd_free_buf(dev); + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + return ret; +} + +#if defined(CONFIG_DEBUG_FS) +static ssize_t rmnet_smd_debug_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct rmnet_smd_dev *dev = file->private_data; + struct rmnet_smd_ch_info smd_ctl_info = dev->smd_ctl; + struct rmnet_smd_ch_info smd_data_info = dev->smd_data; + char *buf; + unsigned long flags; + int ret; + + buf = kzalloc(sizeof(char) * 512, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock_irqsave(&dev->lock, flags); + ret = scnprintf(buf, 512, + "smd_control_ch_opened: %lu\n" + "smd_data_ch_opened: %lu\n" + "usb online : %d\n" + "dpkts_from_modem: %lu\n" + "dpkts_to_host: %lu\n" + "pending_dpkts_to_host: %lu\n" + "dpkts_from_host: %lu\n" + "dpkts_to_modem: %lu\n" + "pending_dpkts_to_modem: %lu\n" + "cpkts_from_modem: %lu\n" + "cpkts_to_host: %lu\n" + "pending_cpkts_to_host: %lu\n" + "cpkts_from_host: %lu\n" + "cpkts_to_modem: %lu\n" + "pending_cpkts_to_modem: %lu\n" + "smd_read_avail_ctrl: %d\n" + "smd_write_avail_ctrl: %d\n" + "smd_read_avail_data: %d\n" + "smd_write_avail_data: %d\n", + smd_ctl_info.flags, smd_data_info.flags, + atomic_read(&dev->online), + dev->dpkts_from_modem, dev->dpkts_to_host, + (dev->dpkts_from_modem - dev->dpkts_to_host), + dev->dpkts_from_host, dev->dpkts_to_modem, + (dev->dpkts_from_host - dev->dpkts_to_modem), + dev->cpkts_from_modem, dev->cpkts_to_host, + (dev->cpkts_from_modem - dev->cpkts_to_host), + dev->cpkts_from_host, dev->cpkts_to_modem, + (dev->cpkts_from_host - dev->cpkts_to_modem), + smd_read_avail(dev->smd_ctl.ch), + smd_write_avail(dev->smd_ctl.ch), + smd_read_avail(dev->smd_data.ch), + smd_write_avail(dev->smd_data.ch)); + + spin_unlock_irqrestore(&dev->lock, flags); + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret); + + kfree(buf); + + return ret; +} + +static ssize_t rmnet_smd_debug_reset_stats(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct rmnet_smd_dev *dev = file->private_data; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + + dev->dpkts_to_host = 0; + dev->dpkts_from_modem = 0; + dev->dpkts_from_host = 0; + dev->dpkts_to_modem = 0; + + dev->cpkts_to_host = 0; + dev->cpkts_from_modem = 0; + dev->cpkts_from_host = 0; + dev->cpkts_to_modem = 0; + + spin_unlock_irqrestore(&dev->lock, flags); + + return count; +} + +static int rmnet_smd_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +const struct file_operations rmnet_smd_debug_stats_ops = { + .open = rmnet_smd_debug_open, + .read = rmnet_smd_debug_read_stats, + .write = rmnet_smd_debug_reset_stats, +}; + +struct dentry *dent_smd; +struct dentry *dent_smd_status; + +static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) +{ + + dent_smd = debugfs_create_dir("usb_rmnet_smd", 0); + if (IS_ERR(dent_smd)) + return; + + dent_smd_status = debugfs_create_file("status", 0444, dent_smd, dev, + &rmnet_smd_debug_stats_ops); + + if (!dent_smd_status) { + debugfs_remove(dent_smd); + dent_smd = NULL; + return; + } + + return; +} +#else +static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) {} +#endif + +static void +rmnet_smd_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + + tasklet_kill(&dev->smd_ctl.rx_tlet); + tasklet_kill(&dev->smd_ctl.tx_tlet); + tasklet_kill(&dev->smd_data.rx_tlet); + tasklet_kill(&dev->smd_data.tx_tlet); + + flush_workqueue(dev->wq); + rmnet_smd_free_buf(dev); + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + + destroy_workqueue(dev->wq); + debugfs_remove_recursive(dent_smd); + kfree(dev); + +} + +int rmnet_smd_bind_config(struct usb_configuration *c) +{ + struct rmnet_smd_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + rmnet_smd = dev; + + dev->wq = create_singlethread_workqueue("k_rmnet_work"); + if (!dev->wq) { + ret = -ENOMEM; + goto free_dev; + } + + spin_lock_init(&dev->lock); + atomic_set(&dev->notify_count, 0); + atomic_set(&dev->online, 0); + atomic_set(&dev->smd_ctl.rx_pkt, 0); + atomic_set(&dev->smd_data.rx_pkt, 0); + + INIT_WORK(&dev->connect_work, rmnet_smd_connect_work); + INIT_WORK(&dev->disconnect_work, rmnet_smd_disconnect_work); + + tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet, + (unsigned long) dev); + tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet, + (unsigned long) dev); + tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet, + (unsigned long) dev); + tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet, + (unsigned long) dev); + + init_waitqueue_head(&dev->smd_ctl.wait); + init_waitqueue_head(&dev->smd_data.wait); + + dev->pdrv.probe = rmnet_smd_ch_probe; + dev->pdrv.driver.name = CONFIG_RMNET_SMD_CTL_CHANNEL; + dev->pdrv.driver.owner = THIS_MODULE; + + INIT_LIST_HEAD(&dev->qmi_req_pool); + INIT_LIST_HEAD(&dev->qmi_req_q); + INIT_LIST_HEAD(&dev->qmi_resp_pool); + INIT_LIST_HEAD(&dev->qmi_resp_q); + INIT_LIST_HEAD(&dev->rx_idle); + INIT_LIST_HEAD(&dev->rx_queue); + INIT_LIST_HEAD(&dev->tx_idle); + + dev->function.name = "rmnet"; + dev->function.strings = rmnet_smd_strings; + dev->function.descriptors = rmnet_smd_fs_function; + dev->function.hs_descriptors = rmnet_smd_hs_function; + dev->function.bind = rmnet_smd_bind; + dev->function.unbind = rmnet_smd_unbind; + dev->function.setup = rmnet_smd_setup; + dev->function.set_alt = rmnet_smd_set_alt; + dev->function.disable = rmnet_smd_disable; + + ret = usb_add_function(c, &dev->function); + if (ret) + goto free_wq; + + rmnet_smd_debugfs_init(dev); + + return 0; + +free_wq: + destroy_workqueue(dev->wq); +free_dev: + kfree(dev); + + return ret; +} diff --git a/drivers/usb/gadget/f_rmnet_smd_sdio.c b/drivers/usb/gadget/f_rmnet_smd_sdio.c index 0cac681b..e39125d5 100644 --- a/drivers/usb/gadget/f_rmnet_smd_sdio.c +++ b/drivers/usb/gadget/f_rmnet_smd_sdio.c @@ -42,6 +42,7 @@ #include #include #include +#include #ifdef CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL static uint32_t rmnet_mux_sdio_ctl_ch = CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL; @@ -104,12 +105,6 @@ struct rmnet_mux_ctrl_pkt { struct list_head list; }; -enum usb_rmnet_mux_xport_type { - USB_RMNET_MUX_XPORT_UNDEFINED, - USB_RMNET_MUX_XPORT_SDIO, - USB_RMNET_MUX_XPORT_SMD, -}; - struct rmnet_mux_ctrl_dev { struct list_head tx_q; wait_queue_head_t tx_wait_q; @@ -176,7 +171,7 @@ struct rmnet_mux_dev { struct rmnet_mux_ctrl_dev ctrl_dev; u8 ifc_id; - enum usb_rmnet_mux_xport_type xport; + enum transport_type xport; spinlock_t lock; atomic_t online; atomic_t notify_count; @@ -291,18 +286,6 @@ static struct usb_gadget_strings *rmnet_mux_strings[] = { NULL, }; -static char *xport_to_str(enum usb_rmnet_mux_xport_type t) -{ - switch (t) { - case USB_RMNET_MUX_XPORT_SDIO: - return "SDIO"; - case USB_RMNET_MUX_XPORT_SMD: - return "SMD"; - default: - return "UNDEFINED"; - } -} - static struct rmnet_mux_ctrl_pkt *rmnet_mux_alloc_ctrl_pkt(unsigned len, gfp_t flags) { @@ -556,7 +539,7 @@ rmnet_mux_sdio_complete_epout(struct usb_ep *ep, struct usb_request *req) int status = req->status; int queue = 0; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { dev_kfree_skb_any(skb); req->buf = 0; rmnet_mux_free_req(ep, req); @@ -614,7 +597,7 @@ rmnet_mux_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req) struct usb_composite_dev *cdev = dev->cdev; int status = req->status; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { dev_kfree_skb_any(skb); req->buf = 0; rmnet_mux_free_req(ep, req); @@ -797,7 +780,7 @@ rmnet_mux_smd_complete_epout(struct usb_ep *ep, struct usb_request *req) int status = req->status; int ret; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { rmnet_mux_free_req(ep, req); return; } @@ -857,7 +840,7 @@ static void rmnet_mux_smd_complete_epin(struct usb_ep *ep, int status = req->status; int schedule = 0; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { rmnet_mux_free_req(ep, req); return; } @@ -1209,7 +1192,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } pool = &sdio_dev->rx_idle; @@ -1218,7 +1201,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } while ((skb = __skb_dequeue(&sdio_dev->tx_skb_queue))) @@ -1232,7 +1215,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } pool = &smd_dev->rx_idle; @@ -1240,7 +1223,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } /* free all usb requests in SMD rx queue */ @@ -1248,7 +1231,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } pool = &ctrl_dev->tx_q; @@ -1276,7 +1259,7 @@ static void rmnet_mux_disconnect_work(struct work_struct *w) struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev; struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev; - if (dev->xport == USB_RMNET_MUX_XPORT_SMD) { + if (dev->xport == USB_GADGET_XPORT_SMD) { tasklet_kill(&smd_dev->smd_data.rx_tlet); tasklet_kill(&smd_dev->smd_data.tx_tlet); } @@ -1414,8 +1397,8 @@ static ssize_t transport_store( { struct rmnet_mux_dev *dev = rmux_dev; int value; - enum usb_rmnet_mux_xport_type given_xport; - enum usb_rmnet_mux_xport_type t; + enum transport_type given_xport; + enum transport_type t; struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev; struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev; struct list_head *pool; @@ -1425,15 +1408,15 @@ static ssize_t transport_store( unsigned long flags; if (!atomic_read(&dev->online)) { - pr_debug("%s: usb cable is not connected\n", __func__); + pr_err("%s: usb cable is not connected\n", __func__); return -EINVAL; } sscanf(buf, "%d", &value); if (value) - given_xport = USB_RMNET_MUX_XPORT_SDIO; + given_xport = USB_GADGET_XPORT_SDIO; else - given_xport = USB_RMNET_MUX_XPORT_SMD; + given_xport = USB_GADGET_XPORT_SMD; if (given_xport == dev->xport) { pr_err("%s: given_xport:%s cur_xport:%s doing nothing\n", @@ -1447,14 +1430,14 @@ static ssize_t transport_store( /* prevent any other pkts to/from usb */ t = dev->xport; - dev->xport = USB_RMNET_MUX_XPORT_UNDEFINED; - if (t != USB_RMNET_MUX_XPORT_UNDEFINED) { + dev->xport = USB_GADGET_XPORT_UNDEF; + if (t != USB_GADGET_XPORT_UNDEF) { usb_ep_fifo_flush(dev->epin); usb_ep_fifo_flush(dev->epout); } switch (t) { - case USB_RMNET_MUX_XPORT_SDIO: + case USB_GADGET_XPORT_SDIO: spin_lock_irqsave(&dev->lock, flags); /* tx_idle */ @@ -1465,7 +1448,7 @@ static ssize_t transport_store( req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } /* rx_idle */ @@ -1475,7 +1458,7 @@ static ssize_t transport_store( req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } /* tx_skb_queue */ @@ -1489,7 +1472,7 @@ static ssize_t transport_store( spin_unlock_irqrestore(&dev->lock, flags); break; - case USB_RMNET_MUX_XPORT_SMD: + case USB_GADGET_XPORT_SMD: /* close smd xport */ tasklet_kill(&smd_dev->smd_data.rx_tlet); tasklet_kill(&smd_dev->smd_data.tx_tlet); @@ -1500,7 +1483,7 @@ static ssize_t transport_store( while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } pool = &smd_dev->rx_idle; @@ -1508,7 +1491,7 @@ static ssize_t transport_store( while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } /* free all usb requests in SMD rx queue */ @@ -1516,7 +1499,7 @@ static ssize_t transport_store( while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } spin_unlock_irqrestore(&dev->lock, flags); @@ -1528,10 +1511,10 @@ static ssize_t transport_store( dev->xport = given_xport; switch (dev->xport) { - case USB_RMNET_MUX_XPORT_SDIO: + case USB_GADGET_XPORT_SDIO: rmnet_mux_sdio_enable(dev); break; - case USB_RMNET_MUX_XPORT_SMD: + case USB_GADGET_XPORT_SMD: rmnet_mux_smd_enable(dev); break; default: @@ -1640,14 +1623,11 @@ static void rmnet_mux_sdio_init(struct rmnet_mux_sdio_dev *sdio_dev) static void rmnet_mux_unbind(struct usb_configuration *c, struct usb_function *f) { -/* Do not clear flags to avoid SMD open status mismatch */ -#if 0 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev, function); struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev; smd_dev->smd_data.flags = 0; -#endif } #if defined(CONFIG_DEBUG_FS) @@ -1947,7 +1927,6 @@ static int rmnet_mux_ctrl_device_init(struct rmnet_mux_dev *dev) static int rmnet_smd_sdio_function_add(struct usb_configuration *c) { struct rmnet_mux_dev *dev = rmux_dev; - int status; if (!dev) return -ENODEV; @@ -1965,16 +1944,6 @@ static int rmnet_smd_sdio_function_add(struct usb_configuration *c) dev->function.disable = rmnet_mux_disable; dev->function.suspend = rmnet_mux_suspend; - if (rmnet_mux_string_defs[0].id == 0) { - status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); - return status; - } - rmnet_mux_string_defs[0].id = status; - rmnet_mux_interface_desc.iInterface = status; - } - return usb_add_function(c, &dev->function); } diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 7a184326..259d7f35 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -98,8 +98,6 @@ struct f_rndis { struct usb_endpoint_descriptor *notify_desc; struct usb_request *notify_req; atomic_t notify_count; - - atomic_t online; }; static inline struct f_rndis *func_to_rndis(struct usb_function *f) @@ -391,24 +389,29 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rndis *rndis = req->context; + struct usb_composite_dev *cdev = rndis->port.func.config->cdev; int status; - - if (req->status < 0) { - pr_err("%s: staus error: %d\n", __func__, req->status); - return; - } - - if (!atomic_read(&rndis->online)) { - pr_warning("%s: usb rndis is not online\n", __func__); - return; - } + rndis_init_msg_type *buf; /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ // spin_lock(&dev->lock); status = rndis_msg_parser(rndis->config, (u8 *) req->buf); if (status < 0) - pr_err("[USB] RNDIS command error %d, %d/%d\n", + ERROR(cdev, "RNDIS command error %d, %d/%d\n", status, req->actual, req->length); + + buf = (rndis_init_msg_type *)req->buf; + + if (buf->MessageType == REMOTE_NDIS_INITIALIZE_MSG) { + if (buf->MaxTransferSize > 2048) + rndis->port.multi_pkt_xfer = 1; + else + rndis->port.multi_pkt_xfer = 0; + DBG(cdev, "%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n", + __func__, buf->MaxTransferSize, + rndis->port.multi_pkt_xfer ? "enabled" : + "disabled"); + } // spin_unlock(&dev->lock); } @@ -423,12 +426,6 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); - if (!atomic_read(&rndis->online)) { - pr_warning("%s: usb rndis is not online\n", __func__); - return -ENOTCONN; - } - - /* composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ @@ -555,7 +552,6 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } else goto fail; - atomic_set(&rndis->online, 1); return 0; fail: return -EINVAL; @@ -566,8 +562,6 @@ static void rndis_disable(struct usb_function *f) struct f_rndis *rndis = func_to_rndis(f); struct usb_composite_dev *cdev = f->config->cdev; - atomic_set(&rndis->online, 0); - if (!rndis->notify->driver_data) return; @@ -775,6 +769,8 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f) rndis_deregister(rndis->config); rndis_exit(); + rndis_string_defs[0].id = 0; + if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); @@ -814,14 +810,14 @@ rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN], if (!can_support_rndis(c) || !ethaddr) return -EINVAL; - /* setup RNDIS itself */ - status = rndis_init(); - if (status < 0) - return status; - /* maybe allocate device-global string IDs */ if (rndis_string_defs[0].id == 0) { + /* ... and setup RNDIS itself */ + status = rndis_init(); + if (status < 0) + return status; + /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c index 52b97434..de8c8ed5 100644 --- a/drivers/usb/gadget/f_serial.c +++ b/drivers/usb/gadget/f_serial.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "u_serial.h" #include "gadget_chips.h" @@ -27,6 +27,7 @@ * CDC ACM driver. However, for many purposes it's just as functional * if you can arrange appropriate host side drivers. */ +#define GSERIAL_NO_PORTS 2 struct gser_descs { struct usb_endpoint_descriptor *in; @@ -75,18 +76,18 @@ struct f_gser { static unsigned int no_tty_ports; static unsigned int no_sdio_ports; static unsigned int no_smd_ports; +static unsigned int no_hsic_sports; static unsigned int nr_ports; static struct port_info { enum transport_type transport; - enum fserial_func_type func_type; unsigned port_num; unsigned client_port_num; } gserial_ports[GSERIAL_NO_PORTS]; static inline bool is_transport_sdio(enum transport_type t) { - if (t == USB_GADGET_FSERIAL_TRANSPORT_SDIO) + if (t == USB_GADGET_XPORT_SDIO) return 1; return 0; } @@ -183,14 +184,10 @@ static struct usb_endpoint_descriptor gser_fs_out_desc = { static struct usb_descriptor_header *gser_fs_function[] = { (struct usb_descriptor_header *) &gser_interface_desc, #ifdef CONFIG_MODEM_SUPPORT -/* These descriptors may not be recognized by some OS. Mark it. - */ -/* (struct usb_descriptor_header *) &gser_header_desc, (struct usb_descriptor_header *) &gser_call_mgmt_descriptor, (struct usb_descriptor_header *) &gser_descriptor, (struct usb_descriptor_header *) &gser_union_desc, -*/ (struct usb_descriptor_header *) &gser_fs_notify_desc, #endif (struct usb_descriptor_header *) &gser_fs_in_desc, @@ -227,14 +224,10 @@ static struct usb_endpoint_descriptor gser_hs_out_desc = { static struct usb_descriptor_header *gser_hs_function[] = { (struct usb_descriptor_header *) &gser_interface_desc, #ifdef CONFIG_MODEM_SUPPORT -/* These descriptors may not be recognized by some OS. Mark it. - */ -/* (struct usb_descriptor_header *) &gser_header_desc, (struct usb_descriptor_header *) &gser_call_mgmt_descriptor, (struct usb_descriptor_header *) &gser_descriptor, (struct usb_descriptor_header *) &gser_union_desc, -*/ (struct usb_descriptor_header *) &gser_hs_notify_desc, #endif (struct usb_descriptor_header *) &gser_hs_in_desc, @@ -243,24 +236,9 @@ static struct usb_descriptor_header *gser_hs_function[] = { }; /* string descriptors: */ -static struct usb_string modem_string_defs[] = { - [0].s = "HTC Modem", - [1].s = "HTC 9k Modem", - { } /* end of list */ -}; - -static struct usb_gadget_strings modem_string_table = { - .language = 0x0409, /* en-us */ - .strings = modem_string_defs, -}; - -static struct usb_gadget_strings *modem_strings[] = { - &modem_string_table, - NULL, -}; static struct usb_string gser_string_defs[] = { - [0].s = "HTC Serial", + [0].s = "Generic Serial", { } /* end of list */ }; @@ -274,51 +252,16 @@ static struct usb_gadget_strings *gser_strings[] = { NULL, }; -static char *transport_to_str(enum transport_type t) -{ - switch (t) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: - return "TTY"; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: - return "SDIO"; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: - return "SMD"; - } - - return "NONE"; -} - -static enum transport_type serial_str_to_transport(const char *name) -{ - if (!strcasecmp("SDIO", name)) - return USB_GADGET_FSERIAL_TRANSPORT_SDIO; - if (!strcasecmp("SMD", name)) - return USB_GADGET_FSERIAL_TRANSPORT_SMD; - - return USB_GADGET_FSERIAL_TRANSPORT_TTY; -} - -static enum fserial_func_type serial_str_to_func_type(const char *name) -{ - if (!name) - return USB_FSER_FUNC_NONE; - - if (!strcasecmp("MODEM", name)) - return USB_FSER_FUNC_MODEM; - if (!strcasecmp("MODEM_MDM", name)) - return USB_FSER_FUNC_MODEM_MDM; - if (!strcasecmp("SERIAL", name)) - return USB_FSER_FUNC_SERIAL; - - return USB_FSER_FUNC_NONE; -} - static int gport_setup(struct usb_configuration *c) { int ret = 0; + int port_idx; + int i; - pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n", - __func__, no_tty_ports, no_sdio_ports, nr_ports); + pr_debug("%s: no_tty_ports: %u no_sdio_ports: %u" + " no_smd_ports: %u no_hsic_sports: %u nr_ports: %u\n", + __func__, no_tty_ports, no_sdio_ports, no_smd_ports, + no_hsic_sports, nr_ports); if (no_tty_ports) ret = gserial_setup(c->cdev->gadget, no_tty_ports); @@ -326,33 +269,67 @@ static int gport_setup(struct usb_configuration *c) ret = gsdio_setup(c->cdev->gadget, no_sdio_ports); if (no_smd_ports) ret = gsmd_setup(c->cdev->gadget, no_smd_ports); + if (no_hsic_sports) { + port_idx = ghsic_data_setup(no_hsic_sports, USB_GADGET_SERIAL); + if (port_idx < 0) + return port_idx; + + for (i = 0; i < nr_ports; i++) { + if (gserial_ports[i].transport == + USB_GADGET_XPORT_HSIC) { + gserial_ports[i].client_port_num = port_idx; + port_idx++; + } + } + /*clinet port num is same for data setup and ctrl setup*/ + ret = ghsic_ctrl_setup(no_hsic_sports, USB_GADGET_SERIAL); + if (ret < 0) + return ret; + return 0; + } return ret; } static int gport_connect(struct f_gser *gser) { - unsigned port_num; + unsigned port_num; + int ret; - pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n", - __func__, transport_to_str(gser->transport), + pr_debug("%s: transport: %s f_gser: %p gserial: %p port_num: %d\n", + __func__, xport_to_str(gser->transport), gser, &gser->port, gser->port_num); port_num = gserial_ports[gser->port_num].client_port_num; switch (gser->transport) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: + case USB_GADGET_XPORT_TTY: gserial_connect(&gser->port, port_num); break; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: + case USB_GADGET_XPORT_SDIO: gsdio_connect(&gser->port, port_num); break; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: + case USB_GADGET_XPORT_SMD: gsmd_connect(&gser->port, port_num); break; + case USB_GADGET_XPORT_HSIC: + ret = ghsic_ctrl_connect(&gser->port, port_num); + if (ret) { + pr_err("%s: ghsic_ctrl_connect failed: err:%d\n", + __func__, ret); + return ret; + } + ret = ghsic_data_connect(&gser->port, port_num); + if (ret) { + pr_err("%s: ghsic_data_connect failed: err:%d\n", + __func__, ret); + ghsic_ctrl_disconnect(&gser->port, port_num); + return ret; + } + break; default: pr_err("%s: Un-supported transport: %s\n", __func__, - transport_to_str(gser->transport)); + xport_to_str(gser->transport)); return -ENODEV; } @@ -363,25 +340,29 @@ static int gport_disconnect(struct f_gser *gser) { unsigned port_num; - pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n", - __func__, transport_to_str(gser->transport), + pr_debug("%s: transport: %s f_gser: %p gserial: %p port_num: %d\n", + __func__, xport_to_str(gser->transport), gser, &gser->port, gser->port_num); port_num = gserial_ports[gser->port_num].client_port_num; switch (gser->transport) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: + case USB_GADGET_XPORT_TTY: gserial_disconnect(&gser->port); break; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: + case USB_GADGET_XPORT_SDIO: gsdio_disconnect(&gser->port, port_num); break; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: + case USB_GADGET_XPORT_SMD: gsmd_disconnect(&gser->port, port_num); break; + case USB_GADGET_XPORT_HSIC: + ghsic_ctrl_disconnect(&gser->port, port_num); + ghsic_data_disconnect(&gser->port, port_num); + break; default: pr_err("%s: Un-supported transport:%s\n", __func__, - transport_to_str(gser->transport)); + xport_to_str(gser->transport)); return -ENODEV; } @@ -862,59 +843,16 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) { struct f_gser *gser; int status; - struct port_info *p = &gserial_ports[port_num]; - - if (p->func_type == USB_FSER_FUNC_NONE) { - pr_info("%s: non function port : %d\n", __func__, port_num); - return 0; - } - pr_info("%s: type:%d, trasport: %s\n", __func__, p->func_type, - transport_to_str(p->transport)); /* REVISIT might want instance-specific strings to help * distinguish instances ... */ /* maybe allocate device-global string ID */ - /* HTC modem port_num is 0 */ -#if 0 - if (port_num != 0) { - if (gser_string_defs[0].id == 0) { - status = usb_string_id(c->cdev); - if (status < 0) - return status; - gser_string_defs[0].id = status; - } - } -#endif - - if (modem_string_defs[0].id == 0 && - p->func_type == USB_FSER_FUNC_MODEM) { - status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); - return status; - } - modem_string_defs[0].id = status; - } - - if (modem_string_defs[1].id == 0 && - p->func_type == USB_FSER_FUNC_MODEM_MDM) { - status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); - return status; - } - modem_string_defs[1].id = status; - } - - if (gser_string_defs[0].id == 0 && - p->func_type == USB_FSER_FUNC_SERIAL) { + if (gser_string_defs[0].id == 0) { status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); + if (status < 0) return status; - } gser_string_defs[0].id = status; } @@ -937,12 +875,10 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) gser->transport = gserial_ports[port_num].transport; #ifdef CONFIG_MODEM_SUPPORT /* We support only two ports for now */ - if (port_num == 0) { + if (port_num == 0) gser->port.func.name = "modem"; - } else + else gser->port.func.name = "nmea"; - - gser->port.func.setup = gser_setup; gser->port.connect = gser_connect; gser->port.get_dtr = gser_get_dtr; @@ -954,28 +890,6 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) gser->port.send_break = gser_send_break; #endif - switch (p->func_type) { - case USB_FSER_FUNC_MODEM: - gser->port.func.name = "modem"; - gser->port.func.strings = modem_strings; - gser_interface_desc.iInterface = modem_string_defs[0].id; - break; - case USB_FSER_FUNC_MODEM_MDM: - gser->port.func.name = "modem_mdm"; - gser->port.func.strings = modem_strings; - gser_interface_desc.iInterface = modem_string_defs[1].id; - break; - case USB_FSER_FUNC_SERIAL: - gser->port.func.name = "serial"; - gser->port.func.strings = gser_strings; - gser_interface_desc.iInterface = gser_string_defs[0].id; - break; - case USB_FSER_FUNC_NONE: - default : - break; - } - - status = usb_add_function(c, &gser->port.func); if (status) kfree(gser); @@ -985,39 +899,37 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) /** * gserial_init_port - bind a gserial_port to its transport */ -static int gserial_init_port(int port_num, const char *name, char *serial_type) +static int gserial_init_port(int port_num, const char *name) { enum transport_type transport; - enum fserial_func_type func_type; if (port_num >= GSERIAL_NO_PORTS) return -ENODEV; - transport = serial_str_to_transport(name); - func_type = serial_str_to_func_type(serial_type); - - pr_info("%s, port:%d, transport:%s, type:%d\n", __func__, - port_num, transport_to_str(transport), func_type); - - + transport = str_to_xport(name); + pr_debug("%s, port:%d, transport:%s\n", __func__, + port_num, xport_to_str(transport)); gserial_ports[port_num].transport = transport; - gserial_ports[port_num].func_type = func_type; gserial_ports[port_num].port_num = port_num; switch (transport) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: + case USB_GADGET_XPORT_TTY: gserial_ports[port_num].client_port_num = no_tty_ports; no_tty_ports++; break; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: + case USB_GADGET_XPORT_SDIO: gserial_ports[port_num].client_port_num = no_sdio_ports; no_sdio_ports++; break; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: + case USB_GADGET_XPORT_SMD: gserial_ports[port_num].client_port_num = no_smd_ports; no_smd_ports++; break; + case USB_GADGET_XPORT_HSIC: + /*client port number will be updated in gport_setup*/ + no_hsic_sports++; + break; default: pr_err("%s: Un-supported transport transport: %u\n", __func__, gserial_ports[port_num].transport); diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c new file mode 100644 index 00000000..43b1c544 --- /dev/null +++ b/drivers/usb/gadget/msm72k_udc.c @@ -0,0 +1,2793 @@ +/* + * Driver for HighSpeed USB Client Controller in MSM7K + * + * Copyright (C) 2008 Google, Inc. + * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. + * Author: Mike Lockwood + * Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +static const char driver_name[] = "msm72k_udc"; + +/* #define DEBUG */ +/* #define VERBOSE */ + +#define MSM_USB_BASE ((unsigned) ui->addr) + +#define DRIVER_DESC "MSM 72K USB Peripheral Controller" +#define DRIVER_NAME "MSM72K_UDC" + +#define EPT_FLAG_IN 0x0001 + +#define SETUP_BUF_SIZE 8 + + +static const char *const ep_name[] = { + "ep0out", "ep1out", "ep2out", "ep3out", + "ep4out", "ep5out", "ep6out", "ep7out", + "ep8out", "ep9out", "ep10out", "ep11out", + "ep12out", "ep13out", "ep14out", "ep15out", + "ep0in", "ep1in", "ep2in", "ep3in", + "ep4in", "ep5in", "ep6in", "ep7in", + "ep8in", "ep9in", "ep10in", "ep11in", + "ep12in", "ep13in", "ep14in", "ep15in" +}; + +/*To release the wakelock from debugfs*/ +static int release_wlocks; + +struct msm_request { + struct usb_request req; + + /* saved copy of req.complete */ + void (*gadget_complete)(struct usb_ep *ep, + struct usb_request *req); + + + struct usb_info *ui; + struct msm_request *next; + struct msm_request *prev; + + unsigned busy:1; + unsigned live:1; + unsigned alloced:1; + + dma_addr_t dma; + dma_addr_t item_dma; + + struct ept_queue_item *item; +}; + +#define to_msm_request(r) container_of(r, struct msm_request, req) +#define to_msm_endpoint(r) container_of(r, struct msm_endpoint, ep) +#define to_msm_otg(xceiv) container_of(xceiv, struct msm_otg, otg) +#define is_b_sess_vld() ((OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0) +#define is_usb_online(ui) (ui->usb_state != USB_STATE_NOTATTACHED) + +struct msm_endpoint { + struct usb_ep ep; + struct usb_info *ui; + struct msm_request *req; /* head of pending requests */ + struct msm_request *last; + unsigned flags; + + /* bit number (0-31) in various status registers + ** as well as the index into the usb_info's array + ** of all endpoints + */ + unsigned char bit; + unsigned char num; + unsigned long dTD_update_fail_count; + unsigned long false_prime_fail_count; + unsigned actual_prime_fail_count; + + unsigned wedged:1; + /* pointers to DMA transfer list area */ + /* these are allocated from the usb_info dma space */ + struct ept_queue_head *head; + struct timer_list prime_timer; +}; + +/* PHY status check timer to monitor phy stuck up on reset */ +static struct timer_list phy_status_timer; + +static void ept_prime_timer_func(unsigned long data); +static void usb_do_work(struct work_struct *w); +static void usb_do_remote_wakeup(struct work_struct *w); + + +#define USB_STATE_IDLE 0 +#define USB_STATE_ONLINE 1 +#define USB_STATE_OFFLINE 2 + +#define USB_FLAG_START 0x0001 +#define USB_FLAG_VBUS_ONLINE 0x0002 +#define USB_FLAG_VBUS_OFFLINE 0x0004 +#define USB_FLAG_RESET 0x0008 +#define USB_FLAG_SUSPEND 0x0010 +#define USB_FLAG_CONFIGURED 0x0020 + +#define USB_CHG_DET_DELAY msecs_to_jiffies(1000) +#define REMOTE_WAKEUP_DELAY msecs_to_jiffies(1000) +#define PHY_STATUS_CHECK_DELAY (jiffies + msecs_to_jiffies(1000)) +#define EPT_PRIME_CHECK_DELAY (jiffies + msecs_to_jiffies(1000)) + +struct usb_info { + /* lock for register/queue/device state changes */ + spinlock_t lock; + + /* single request used for handling setup transactions */ + struct usb_request *setup_req; + + struct platform_device *pdev; + int irq; + void *addr; + + unsigned state; + unsigned flags; + + atomic_t configured; + atomic_t running; + + struct dma_pool *pool; + + /* dma page to back the queue heads and items */ + unsigned char *buf; + dma_addr_t dma; + + struct ept_queue_head *head; + + /* used for allocation */ + unsigned next_item; + unsigned next_ifc_num; + + /* endpoints are ordered based on their status bits, + ** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15 + */ + struct msm_endpoint ept[32]; + + + /* max power requested by selected configuration */ + unsigned b_max_pow; + unsigned chg_current; + struct delayed_work chg_det; + struct delayed_work chg_stop; + struct msm_hsusb_gadget_platform_data *pdata; + struct work_struct phy_status_check; + + struct work_struct work; + unsigned phy_status; + unsigned phy_fail_count; + unsigned prime_fail_count; + unsigned long dTD_update_fail_count; + + struct usb_gadget gadget; + struct usb_gadget_driver *driver; + struct switch_dev sdev; + +#define ep0out ept[0] +#define ep0in ept[16] + + atomic_t ep0_dir; + atomic_t test_mode; + atomic_t offline_pending; + atomic_t softconnect; +#ifdef CONFIG_USB_OTG + u8 hnp_avail; +#endif + + atomic_t remote_wakeup; + atomic_t self_powered; + struct delayed_work rw_work; + + struct otg_transceiver *xceiv; + enum usb_device_state usb_state; + struct wake_lock wlock; +}; + +static const struct usb_ep_ops msm72k_ep_ops; +static struct usb_info *the_usb_info; + +static int msm72k_wakeup(struct usb_gadget *_gadget); +static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active); +static int msm72k_set_halt(struct usb_ep *_ep, int value); +static void flush_endpoint(struct msm_endpoint *ept); +static void usb_reset(struct usb_info *ui); +static int usb_ept_set_halt(struct usb_ep *_ep, int value); + +static void msm_hsusb_set_speed(struct usb_info *ui) +{ + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + switch (readl(USB_PORTSC) & PORTSC_PSPD_MASK) { + case PORTSC_PSPD_FS: + dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_FULL\n"); + ui->gadget.speed = USB_SPEED_FULL; + break; + case PORTSC_PSPD_LS: + dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_LOW\n"); + ui->gadget.speed = USB_SPEED_LOW; + break; + case PORTSC_PSPD_HS: + dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_HIGH\n"); + ui->gadget.speed = USB_SPEED_HIGH; + break; + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void msm_hsusb_set_state(enum usb_device_state state) +{ + unsigned long flags; + + spin_lock_irqsave(&the_usb_info->lock, flags); + the_usb_info->usb_state = state; + spin_unlock_irqrestore(&the_usb_info->lock, flags); +} + +static enum usb_device_state msm_hsusb_get_state(void) +{ + unsigned long flags; + enum usb_device_state state; + + spin_lock_irqsave(&the_usb_info->lock, flags); + state = the_usb_info->usb_state; + spin_unlock_irqrestore(&the_usb_info->lock, flags); + + return state; +} + +static ssize_t print_switch_name(struct switch_dev *sdev, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", DRIVER_NAME); +} + +static ssize_t print_switch_state(struct switch_dev *sdev, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", + sdev->state ? "online" : "offline"); +} + +static inline enum chg_type usb_get_chg_type(struct usb_info *ui) +{ + if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) + return USB_CHG_TYPE__WALLCHARGER; + else + return USB_CHG_TYPE__SDP; +} + +#define USB_WALLCHARGER_CHG_CURRENT 1800 +static int usb_get_max_power(struct usb_info *ui) +{ + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long flags; + enum chg_type temp; + int suspended; + int configured; + unsigned bmaxpow; + + if (ui->gadget.is_a_peripheral) + return -EINVAL; + + temp = atomic_read(&otg->chg_type); + spin_lock_irqsave(&ui->lock, flags); + suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0; + configured = atomic_read(&ui->configured); + bmaxpow = ui->b_max_pow; + spin_unlock_irqrestore(&ui->lock, flags); + + if (temp == USB_CHG_TYPE__INVALID) + return -ENODEV; + + if (temp == USB_CHG_TYPE__WALLCHARGER) + return USB_WALLCHARGER_CHG_CURRENT; + + if (suspended || !configured) + return 0; + + return bmaxpow; +} + +static int usb_phy_stuck_check(struct usb_info *ui) +{ + /* + * write some value (0xAA) into scratch reg (0x16) and read it back, + * If the read value is same as written value, means PHY is normal + * otherwise, PHY seems to have stuck. + */ + + if (otg_io_write(ui->xceiv, 0xAA, 0x16) == -1) { + dev_dbg(&ui->pdev->dev, + "%s(): ulpi write timeout\n", __func__); + return -EIO; + } + + if (otg_io_read(ui->xceiv, 0x16) != 0xAA) { + dev_dbg(&ui->pdev->dev, + "%s(): read value is incorrect\n", __func__); + return -EIO; + } + + return 0; +} + +/* + * This function checks the phy status by reading/writing to the + * phy scratch register. If the phy is stuck resets the HW + * */ +static void usb_phy_stuck_recover(struct work_struct *w) +{ + struct usb_info *ui = the_usb_info; + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + if (ui->gadget.speed != USB_SPEED_UNKNOWN || + ui->usb_state == USB_STATE_NOTATTACHED || + ui->driver == NULL) { + spin_unlock_irqrestore(&ui->lock, flags); + return; + } + spin_unlock_irqrestore(&ui->lock, flags); + + disable_irq(otg->irq); + if (usb_phy_stuck_check(ui)) { +#ifdef CONFIG_USB_MSM_ACA + del_timer_sync(&otg->id_timer); +#endif + ui->phy_fail_count++; + dev_err(&ui->pdev->dev, + "%s():PHY stuck, resetting HW\n", __func__); + /* + * PHY seems to have stuck, + * reset the PHY and HW link to recover the PHY + */ + usb_reset(ui); +#ifdef CONFIG_USB_MSM_ACA + mod_timer(&otg->id_timer, jiffies + + msecs_to_jiffies(OTG_ID_POLL_MS)); +#endif + msm72k_pullup_internal(&ui->gadget, 1); + } + enable_irq(otg->irq); +} + +static void usb_phy_status_check_timer(unsigned long data) +{ + struct usb_info *ui = the_usb_info; + + schedule_work(&ui->phy_status_check); +} + +static void usb_chg_stop(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, chg_stop.work); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + enum chg_type temp; + + temp = atomic_read(&otg->chg_type); + + if (temp == USB_CHG_TYPE__SDP) + otg_set_power(ui->xceiv, 0); +} + +static void usb_chg_detect(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, chg_det.work); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + enum chg_type temp = USB_CHG_TYPE__INVALID; + unsigned long flags; + int maxpower; + + spin_lock_irqsave(&ui->lock, flags); + if (ui->usb_state == USB_STATE_NOTATTACHED) { + spin_unlock_irqrestore(&ui->lock, flags); + return; + } + + temp = usb_get_chg_type(ui); + spin_unlock_irqrestore(&ui->lock, flags); + + atomic_set(&otg->chg_type, temp); + maxpower = usb_get_max_power(ui); + if (maxpower > 0) + otg_set_power(ui->xceiv, maxpower); + + /* USB driver prevents idle and suspend power collapse(pc) + * while USB cable is connected. But when dedicated charger is + * connected, driver can vote for idle and suspend pc. + * OTG driver handles idle pc as part of above otg_set_power call + * when wallcharger is attached. To allow suspend pc, release the + * wakelock which will be re-acquired for any sub-sequent usb interrupts + * */ + if (temp == USB_CHG_TYPE__WALLCHARGER) { + pm_runtime_put_sync(&ui->pdev->dev); + wake_unlock(&ui->wlock); + } +} + +static int usb_ep_get_stall(struct msm_endpoint *ept) +{ + unsigned int n; + struct usb_info *ui = ept->ui; + + n = readl(USB_ENDPTCTRL(ept->num)); + if (ept->flags & EPT_FLAG_IN) + return (CTRL_TXS & n) ? 1 : 0; + else + return (CTRL_RXS & n) ? 1 : 0; +} + +static void init_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + + ept->ui = ui; + ept->bit = n; + ept->num = n & 15; + ept->ep.name = ep_name[n]; + ept->ep.ops = &msm72k_ep_ops; + + if (ept->bit > 15) { + /* IN endpoint */ + ept->head = ui->head + (ept->num << 1) + 1; + ept->flags = EPT_FLAG_IN; + } else { + /* OUT endpoint */ + ept->head = ui->head + (ept->num << 1); + ept->flags = 0; + } + setup_timer(&ept->prime_timer, ept_prime_timer_func, + (unsigned long) ept); + + } +} + +static void config_ept(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT; + + /* ep0 out needs interrupt-on-setup */ + if (ept->bit == 0) + cfg |= CONFIG_IOS; + + ept->head->config = cfg; + ept->head->next = TERMINATE; + + if (ept->ep.maxpacket) + dev_dbg(&ui->pdev->dev, + "ept #%d %s max:%d head:%p bit:%d\n", + ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->ep.maxpacket, ept->head, ept->bit); +} + +static void configure_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) + config_ept(ui->ept + n); +} + +struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept, + unsigned bufsize, gfp_t gfp_flags) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req; + + req = kzalloc(sizeof(*req), gfp_flags); + if (!req) + goto fail1; + + req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma); + if (!req->item) + goto fail2; + + if (bufsize) { + req->req.buf = kmalloc(bufsize, gfp_flags); + if (!req->req.buf) + goto fail3; + req->alloced = 1; + } + + return &req->req; + +fail3: + dma_pool_free(ui->pool, req->item, req->item_dma); +fail2: + kfree(req); +fail1: + return 0; +} + +static void usb_ept_enable(struct msm_endpoint *ept, int yes, + unsigned char ep_type) +{ + struct usb_info *ui = ept->ui; + int in = ept->flags & EPT_FLAG_IN; + unsigned n; + + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + if (yes) { + n = (n & (~CTRL_TXT_MASK)) | + (ep_type << CTRL_TXT_EP_TYPE_SHIFT); + n |= CTRL_TXE | CTRL_TXR; + } else + n &= (~CTRL_TXE); + } else { + if (yes) { + n = (n & (~CTRL_RXT_MASK)) | + (ep_type << CTRL_RXT_EP_TYPE_SHIFT); + n |= CTRL_RXE | CTRL_RXR; + } else + n &= ~(CTRL_RXE); + } + /* complete all the updates to ept->head before enabling endpoint*/ + mb(); + writel(n, USB_ENDPTCTRL(ept->num)); + + /* Ensure endpoint is enabled before returning */ + mb(); + + dev_dbg(&ui->pdev->dev, "ept %d %s %s\n", + ept->num, in ? "in" : "out", yes ? "enabled" : "disabled"); +} + +static void ept_prime_timer_func(unsigned long data) +{ + struct msm_endpoint *ept = (struct msm_endpoint *)data; + struct usb_info *ui = ept->ui; + unsigned n = 1 << ept->bit; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + + ept->false_prime_fail_count++; + if ((readl_relaxed(USB_ENDPTPRIME) & n)) { + /* + * ---- UNLIKELY --- + * May be hardware is taking long time to process the + * prime request. Or could be intermittent priming and + * previous dTD is not fired yet. + */ + mod_timer(&ept->prime_timer, EPT_PRIME_CHECK_DELAY); + goto out; + } + if (readl_relaxed(USB_ENDPTSTAT) & n) + goto out; + + /* clear speculative loads on item->info */ + rmb(); + if (ept->req && (ept->req->item->info & INFO_ACTIVE)) { + ui->prime_fail_count++; + ept->actual_prime_fail_count++; + pr_err("%s(): ept%d%s prime failed. ept: config: %x" + "active: %x next: %x info: %x\n", + __func__, ept->num, + ept->flags & EPT_FLAG_IN ? "in" : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + writel_relaxed(n, USB_ENDPTPRIME); + mod_timer(&ept->prime_timer, EPT_PRIME_CHECK_DELAY); + } +out: + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void usb_ept_start(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req = ept->req; + unsigned n = 1 << ept->bit; + + BUG_ON(req->live); + + while (req) { + req->live = 1; + /* prepare the transaction descriptor item for the hardware */ + req->item->info = + INFO_BYTES(req->req.length) | INFO_IOC | INFO_ACTIVE; + req->item->page0 = req->dma; + req->item->page1 = (req->dma + 0x1000) & 0xfffff000; + req->item->page2 = (req->dma + 0x2000) & 0xfffff000; + req->item->page3 = (req->dma + 0x3000) & 0xfffff000; + + if (req->next == NULL) { + req->item->next = TERMINATE; + break; + } + req->item->next = req->next->item_dma; + req = req->next; + } + + rmb(); + /* link the hw queue head to the request's transaction item */ + ept->head->next = ept->req->item_dma; + ept->head->info = 0; + + /* flush buffers before priming ept */ + mb(); + /* during high throughput testing it is observed that + * ept stat bit is not set even though all the data + * structures are updated properly and ept prime bit + * is set. To workaround the issue, kick a timer and + * make decision on re-prime. We can do a busy loop here + * but it leads to high cpu usage. + */ + writel_relaxed(n, USB_ENDPTPRIME); + mod_timer(&ept->prime_timer, EPT_PRIME_CHECK_DELAY); +} + +int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req) +{ + unsigned long flags; + struct msm_request *req = to_msm_request(_req); + struct msm_request *last; + struct usb_info *ui = ept->ui; + unsigned length = req->req.length; + + if (length > 0x4000) + return -EMSGSIZE; + + spin_lock_irqsave(&ui->lock, flags); + + if (req->busy) { + req->req.status = -EBUSY; + spin_unlock_irqrestore(&ui->lock, flags); + dev_err(&ui->pdev->dev, + "usb_ept_queue_xfer() tried to queue busy request\n"); + return -EBUSY; + } + + if (!atomic_read(&ui->configured) && (ept->num != 0)) { + req->req.status = -ESHUTDOWN; + spin_unlock_irqrestore(&ui->lock, flags); + if (printk_ratelimit()) + dev_err(&ui->pdev->dev, + "%s: called while offline\n", __func__); + return -ESHUTDOWN; + } + + if (ui->usb_state == USB_STATE_SUSPENDED) { + if (!atomic_read(&ui->remote_wakeup)) { + req->req.status = -EAGAIN; + spin_unlock_irqrestore(&ui->lock, flags); + if (printk_ratelimit()) + dev_err(&ui->pdev->dev, + "%s: cannot queue as bus is suspended " + "ept #%d %s max:%d head:%p bit:%d\n", + __func__, ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->ep.maxpacket, ept->head, ept->bit); + + return -EAGAIN; + } + + wake_lock(&ui->wlock); + otg_set_suspend(ui->xceiv, 0); + schedule_delayed_work(&ui->rw_work, REMOTE_WAKEUP_DELAY); + } + + req->busy = 1; + req->live = 0; + req->next = 0; + req->req.status = -EBUSY; + + req->dma = dma_map_single(NULL, req->req.buf, length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + + /* Add the new request to the end of the queue */ + last = ept->last; + if (last) { + /* Already requests in the queue. add us to the + * end, but let the completion interrupt actually + * start things going, to avoid hw issues + */ + last->next = req; + req->prev = last; + + } else { + /* queue was empty -- kick the hardware */ + ept->req = req; + req->prev = NULL; + usb_ept_start(ept); + } + ept->last = req; + + spin_unlock_irqrestore(&ui->lock, flags); + return 0; +} + +/* --- endpoint 0 handling --- */ + +static void ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + + req->complete = r->gadget_complete; + r->gadget_complete = 0; + if (req->complete) + req->complete(&ui->ep0in.ep, req); +} + +static void ep0_status_complete(struct usb_ep *ep, struct usb_request *_req) +{ + struct usb_request *req = _req->context; + struct msm_request *r; + struct msm_endpoint *ept; + struct usb_info *ui; + + pr_debug("%s:\n", __func__); + if (!req) + return; + + r = to_msm_request(req); + ept = to_msm_endpoint(ep); + ui = ept->ui; + _req->context = 0; + + req->complete = r->gadget_complete; + req->zero = 0; + r->gadget_complete = 0; + if (req->complete) + req->complete(&ui->ep0in.ep, req); + +} + +static void ep0_status_phase(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + + pr_debug("%s:\n", __func__); + + req->length = 0; + req->complete = ep0_status_complete; + + /* status phase */ + if (atomic_read(&ui->ep0_dir) == USB_DIR_IN) + usb_ept_queue_xfer(&ui->ep0out, req); + else + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0in_send_zero_leng_pkt(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct usb_request *req = ui->setup_req; + + pr_debug("%s:\n", __func__); + + req->length = 0; + req->complete = ep0_status_phase; + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0_queue_ack_complete(struct usb_ep *ep, + struct usb_request *_req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + struct usb_request *req = ui->setup_req; + + pr_debug("%s: _req:%p actual:%d length:%d zero:%d\n", + __func__, _req, _req->actual, + _req->length, _req->zero); + + /* queue up the receive of the ACK response from the host */ + if (_req->status == 0 && _req->actual == _req->length) { + req->context = _req; + if (atomic_read(&ui->ep0_dir) == USB_DIR_IN) { + if (_req->zero && _req->length && + !(_req->length % ep->maxpacket)) { + ep0in_send_zero_leng_pkt(&ui->ep0in); + return; + } + } + ep0_status_phase(ep, req); + } else + ep0_complete(ep, _req); +} + +static void ep0_setup_ack_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + unsigned int temp; + int test_mode = atomic_read(&ui->test_mode); + + if (!test_mode) + return; + + switch (test_mode) { + case J_TEST: + dev_info(&ui->pdev->dev, "usb electrical test mode: (J)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_J_STATE, USB_PORTSC); + break; + + case K_TEST: + dev_info(&ui->pdev->dev, "usb electrical test mode: (K)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_K_STATE, USB_PORTSC); + break; + + case SE0_NAK_TEST: + dev_info(&ui->pdev->dev, + "usb electrical test mode: (SE0-NAK)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_SE0_NAK, USB_PORTSC); + break; + + case TST_PKT_TEST: + dev_info(&ui->pdev->dev, + "usb electrical test mode: (TEST_PKT)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_TST_PKT, USB_PORTSC); + break; + } +} + +static void ep0_setup_ack(struct usb_info *ui) +{ + struct usb_request *req = ui->setup_req; + req->length = 0; + req->complete = ep0_setup_ack_complete; + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0_setup_stall(struct usb_info *ui) +{ + writel((1<<16) | (1<<0), USB_ENDPTCTRL(0)); +} + +static void ep0_setup_send(struct usb_info *ui, unsigned length) +{ + struct usb_request *req = ui->setup_req; + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = &ui->ep0in; + + req->length = length; + req->complete = ep0_queue_ack_complete; + r->gadget_complete = 0; + usb_ept_queue_xfer(ept, req); +} + +static void handle_setup(struct usb_info *ui) +{ + struct usb_ctrlrequest ctl; + struct usb_request *req = ui->setup_req; + int ret; +#ifdef CONFIG_USB_OTG + u8 hnp; + unsigned long flags; +#endif + /* USB hardware sometimes generate interrupt before + * 8 bytes of SETUP packet are written to system memory. + * This results in fetching wrong setup_data sometimes. + * TODO: Remove below workaround of adding 1us delay once + * it gets fixed in hardware. + */ + udelay(10); + + memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl)); + /* Ensure buffer is read before acknowledging to h/w */ + mb(); + + writel(EPT_RX(0), USB_ENDPTSETUPSTAT); + + if (ctl.bRequestType & USB_DIR_IN) + atomic_set(&ui->ep0_dir, USB_DIR_IN); + else + atomic_set(&ui->ep0_dir, USB_DIR_OUT); + + /* any pending ep0 transactions must be canceled */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + dev_dbg(&ui->pdev->dev, + "setup: type=%02x req=%02x val=%04x idx=%04x len=%04x\n", + ctl.bRequestType, ctl.bRequest, ctl.wValue, + ctl.wIndex, ctl.wLength); + + if ((ctl.bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) == + (USB_DIR_IN | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_GET_STATUS) { + /* OTG supplement Rev 2.0 introduces another device + * GET_STATUS request for HNP polling with length = 1. + */ + u8 len = 2; + switch (ctl.bRequestType & USB_RECIP_MASK) { + case USB_RECIP_ENDPOINT: + { + struct msm_endpoint *ept; + unsigned num = + ctl.wIndex & USB_ENDPOINT_NUMBER_MASK; + u16 temp = 0; + + if (num == 0) { + memset(req->buf, 0, 2); + break; + } + if (ctl.wIndex & USB_ENDPOINT_DIR_MASK) + num += 16; + ept = &ui->ep0out + num; + temp = usb_ep_get_stall(ept); + temp = temp << USB_ENDPOINT_HALT; + memcpy(req->buf, &temp, 2); + break; + } + case USB_RECIP_DEVICE: + { + u16 temp = 0; + + if (ctl.wIndex == OTG_STATUS_SELECTOR) { +#ifdef CONFIG_USB_OTG + spin_lock_irqsave(&ui->lock, flags); + hnp = (ui->gadget.host_request << + HOST_REQUEST_FLAG); + ui->hnp_avail = 1; + spin_unlock_irqrestore(&ui->lock, + flags); + memcpy(req->buf, &hnp, 1); + len = 1; +#else + goto stall; +#endif + } else { + temp = (atomic_read(&ui->self_powered) + << USB_DEVICE_SELF_POWERED); + temp |= (atomic_read(&ui->remote_wakeup) + << USB_DEVICE_REMOTE_WAKEUP); + memcpy(req->buf, &temp, 2); + } + break; + } + case USB_RECIP_INTERFACE: + memset(req->buf, 0, 2); + break; + default: + goto stall; + } + ep0_setup_send(ui, len); + return; + } + } + if (ctl.bRequestType == + (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)) { + if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) || + (ctl.bRequest == USB_REQ_SET_FEATURE)) { + if ((ctl.wValue == 0) && (ctl.wLength == 0)) { + unsigned num = ctl.wIndex & 0x0f; + + if (num != 0) { + struct msm_endpoint *ept; + + if (ctl.wIndex & 0x80) + num += 16; + ept = &ui->ep0out + num; + + if (ept->wedged) + goto ack; + if (ctl.bRequest == USB_REQ_SET_FEATURE) + usb_ept_set_halt(&ept->ep, 1); + else + usb_ept_set_halt(&ept->ep, 0); + } + goto ack; + } + } + } + if (ctl.bRequestType == (USB_DIR_OUT | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_SET_CONFIGURATION) { + atomic_set(&ui->configured, !!ctl.wValue); + msm_hsusb_set_state(USB_STATE_CONFIGURED); + } else if (ctl.bRequest == USB_REQ_SET_ADDRESS) { + /* + * Gadget speed should be set when PCI interrupt + * occurs. But sometimes, PCI interrupt is not + * occuring after reset. Hence update the gadget + * speed here. + */ + if (ui->gadget.speed == USB_SPEED_UNKNOWN) { + dev_info(&ui->pdev->dev, + "PCI intr missed" + "set speed explictly\n"); + msm_hsusb_set_speed(ui); + } + msm_hsusb_set_state(USB_STATE_ADDRESS); + + /* write address delayed (will take effect + ** after the next IN txn) + */ + writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR); + goto ack; + } else if (ctl.bRequest == USB_REQ_SET_FEATURE) { + switch (ctl.wValue) { + case USB_DEVICE_TEST_MODE: + switch (ctl.wIndex) { + case J_TEST: + case K_TEST: + case SE0_NAK_TEST: + case TST_PKT_TEST: + atomic_set(&ui->test_mode, ctl.wIndex); + goto ack; + } + goto stall; + case USB_DEVICE_REMOTE_WAKEUP: + atomic_set(&ui->remote_wakeup, 1); + goto ack; +#ifdef CONFIG_USB_OTG + case USB_DEVICE_B_HNP_ENABLE: + ui->gadget.b_hnp_enable = 1; + goto ack; + case USB_DEVICE_A_HNP_SUPPORT: + case USB_DEVICE_A_ALT_HNP_SUPPORT: + /* B-devices compliant to OTG spec + * Rev 2.0 are not required to + * suppport these features. + */ + goto stall; +#endif + } + } else if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) && + (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP)) { + atomic_set(&ui->remote_wakeup, 0); + goto ack; + } + } + + /* delegate if we get here */ + if (ui->driver) { + ret = ui->driver->setup(&ui->gadget, &ctl); + if (ret >= 0) + return; + } + +stall: + /* stall ep0 on error */ + ep0_setup_stall(ui); + return; + +ack: + ep0_setup_ack(ui); +} + +static void handle_endpoint(struct usb_info *ui, unsigned bit) +{ + struct msm_endpoint *ept = ui->ept + bit; + struct msm_request *req; + unsigned long flags; + unsigned info; + + /* + INFO("handle_endpoint() %d %s req=%p(%08x)\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->req, ept->req ? ept->req->item_dma : 0); + */ + + /* expire all requests that are no longer active */ + spin_lock_irqsave(&ui->lock, flags); + while ((req = ept->req)) { + /* if we've processed all live requests, time to + * restart the hardware on the next non-live request + */ + if (!req->live) { + usb_ept_start(ept); + break; + } + + /* clean speculative fetches on req->item->info */ + dma_coherent_post_ops(); + info = req->item->info; + /* if the transaction is still in-flight, stop here */ + if (info & INFO_ACTIVE) + break; + + del_timer(&ept->prime_timer); + /* advance ept queue to the next request */ + ept->req = req->next; + if (ept->req == 0) + ept->last = 0; + + dma_unmap_single(NULL, req->dma, req->req.length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) { + /* XXX pass on more specific error code */ + req->req.status = -EIO; + req->req.actual = 0; + dev_err(&ui->pdev->dev, + "ept %d %s error. info=%08x\n", + ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + info); + } else { + req->req.status = 0; + req->req.actual = + req->req.length - ((info >> 16) & 0x7FFF); + } + req->busy = 0; + req->live = 0; + + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void flush_endpoint_hw(struct usb_info *ui, unsigned bits) +{ + /* flush endpoint, canceling transactions + ** - this can take a "large amount of time" (per databook) + ** - the flush can fail in some cases, thus we check STAT + ** and repeat if we're still operating + ** (does the fact that this doesn't use the tripwire matter?!) + */ + do { + writel(bits, USB_ENDPTFLUSH); + while (readl(USB_ENDPTFLUSH) & bits) + udelay(100); + } while (readl(USB_ENDPTSTAT) & bits); +} + +static void flush_endpoint_sw(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req, *next_req = NULL; + unsigned long flags; + + /* inactive endpoints have nothing to do here */ + if (ept->ep.maxpacket == 0) + return; + + /* put the queue head in a sane state */ + ept->head->info = 0; + ept->head->next = TERMINATE; + + /* cancel any pending requests */ + spin_lock_irqsave(&ui->lock, flags); + req = ept->req; + ept->req = 0; + ept->last = 0; + while (req != 0) { + req->busy = 0; + req->live = 0; + req->req.status = -ESHUTDOWN; + req->req.actual = 0; + + /* Gadget driver may free the request in completion + * handler. So keep a copy of next req pointer + * before calling completion handler. + */ + next_req = req->next; + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + req = next_req; + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void flush_endpoint(struct msm_endpoint *ept) +{ + del_timer(&ept->prime_timer); + flush_endpoint_hw(ept->ui, (1 << ept->bit)); + flush_endpoint_sw(ept); +} + +static irqreturn_t usb_interrupt(int irq, void *data) +{ + struct usb_info *ui = data; + unsigned n; + unsigned long flags; + + n = readl(USB_USBSTS); + writel(n, USB_USBSTS); + + /* somehow we got an IRQ while in the reset sequence: ignore it */ + if (!atomic_read(&ui->running)) + return IRQ_HANDLED; + + if (n & STS_PCI) { + msm_hsusb_set_speed(ui); + if (atomic_read(&ui->configured)) { + wake_lock(&ui->wlock); + + spin_lock_irqsave(&ui->lock, flags); + ui->usb_state = USB_STATE_CONFIGURED; + ui->flags = USB_FLAG_CONFIGURED; + spin_unlock_irqrestore(&ui->lock, flags); + + ui->driver->resume(&ui->gadget); + schedule_work(&ui->work); + } else { + msm_hsusb_set_state(USB_STATE_DEFAULT); + } + +#ifdef CONFIG_USB_OTG + /* notify otg to clear A_BIDL_ADIS timer */ + if (ui->gadget.is_a_peripheral) + otg_set_suspend(ui->xceiv, 0); +#endif + } + + if (n & STS_URI) { + dev_dbg(&ui->pdev->dev, "reset\n"); + spin_lock_irqsave(&ui->lock, flags); + ui->gadget.speed = USB_SPEED_UNKNOWN; + spin_unlock_irqrestore(&ui->lock, flags); +#ifdef CONFIG_USB_OTG + /* notify otg to clear A_BIDL_ADIS timer */ + if (ui->gadget.is_a_peripheral) + otg_set_suspend(ui->xceiv, 0); + spin_lock_irqsave(&ui->lock, flags); + /* Host request is persistent across reset */ + ui->gadget.b_hnp_enable = 0; + ui->hnp_avail = 0; + spin_unlock_irqrestore(&ui->lock, flags); +#endif + msm_hsusb_set_state(USB_STATE_DEFAULT); + atomic_set(&ui->remote_wakeup, 0); + if (!ui->gadget.is_a_peripheral) + schedule_delayed_work(&ui->chg_stop, 0); + + writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT); + writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE); + writel(0xffffffff, USB_ENDPTFLUSH); + writel(0, USB_ENDPTCTRL(1)); + + wake_lock(&ui->wlock); + if (atomic_read(&ui->configured)) { + /* marking us offline will cause ept queue attempts + ** to fail + */ + atomic_set(&ui->configured, 0); + /* Defer sending offline uevent to userspace */ + atomic_set(&ui->offline_pending, 1); + + /* XXX: we can't seem to detect going offline, + * XXX: so deconfigure on reset for the time being + */ + dev_dbg(&ui->pdev->dev, + "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + /* cancel pending ep0 transactions */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + } + /* Start phy stuck timer */ + if (ui->pdata && ui->pdata->is_phy_status_timer_on) + mod_timer(&phy_status_timer, PHY_STATUS_CHECK_DELAY); + } + + if (n & STS_SLI) { + dev_dbg(&ui->pdev->dev, "suspend\n"); + + spin_lock_irqsave(&ui->lock, flags); + ui->usb_state = USB_STATE_SUSPENDED; + ui->flags = USB_FLAG_SUSPEND; + spin_unlock_irqrestore(&ui->lock, flags); + + ui->driver->suspend(&ui->gadget); + schedule_work(&ui->work); +#ifdef CONFIG_USB_OTG + /* notify otg for + * 1. kicking A_BIDL_ADIS timer in case of A-peripheral + * 2. disabling pull-up and kicking B_ASE0_RST timer + */ + if (ui->gadget.b_hnp_enable || ui->gadget.is_a_peripheral) + otg_set_suspend(ui->xceiv, 1); +#endif + } + + if (n & STS_UI) { + n = readl(USB_ENDPTSETUPSTAT); + if (n & EPT_RX(0)) + handle_setup(ui); + + n = readl(USB_ENDPTCOMPLETE); + writel(n, USB_ENDPTCOMPLETE); + while (n) { + unsigned bit = __ffs(n); + handle_endpoint(ui, bit); + n = n & (~(1 << bit)); + } + } + return IRQ_HANDLED; +} + +static void usb_prepare(struct usb_info *ui) +{ + spin_lock_init(&ui->lock); + + memset(ui->buf, 0, 4096); + ui->head = (void *) (ui->buf + 0); + + /* only important for reset/reinit */ + memset(ui->ept, 0, sizeof(ui->ept)); + ui->next_item = 0; + ui->next_ifc_num = 0; + + init_endpoints(ui); + + ui->ep0in.ep.maxpacket = 64; + ui->ep0out.ep.maxpacket = 64; + + ui->setup_req = + usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL); + + INIT_WORK(&ui->work, usb_do_work); + INIT_DELAYED_WORK(&ui->chg_det, usb_chg_detect); + INIT_DELAYED_WORK(&ui->chg_stop, usb_chg_stop); + INIT_DELAYED_WORK(&ui->rw_work, usb_do_remote_wakeup); + if (ui->pdata && ui->pdata->is_phy_status_timer_on) + INIT_WORK(&ui->phy_status_check, usb_phy_stuck_recover); +} + +static void usb_reset(struct usb_info *ui) +{ + struct msm_otg *otg = to_msm_otg(ui->xceiv); + + dev_dbg(&ui->pdev->dev, "reset controller\n"); + + atomic_set(&ui->running, 0); + + /* + * PHY reset takes minimum 100 msec. Hence reset only link + * during HNP. Reset PHY and link in B-peripheral mode. + */ + if (ui->gadget.is_a_peripheral) + otg->reset(ui->xceiv, 0); + else + otg->reset(ui->xceiv, 1); + + /* set usb controller interrupt threshold to zero*/ + writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0), + USB_USBCMD); + + writel(ui->dma, USB_ENDPOINTLISTADDR); + + configure_endpoints(ui); + + /* marking us offline will cause ept queue attempts to fail */ + atomic_set(&ui->configured, 0); + + if (ui->driver) { + dev_dbg(&ui->pdev->dev, "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + + /* cancel pending ep0 transactions */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + /* enable interrupts */ + writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR); + + /* Ensure that h/w RESET is completed before returning */ + mb(); + + atomic_set(&ui->running, 1); +} + +static void usb_start(struct usb_info *ui) +{ + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_START; + schedule_work(&ui->work); + spin_unlock_irqrestore(&ui->lock, flags); +} + +static int usb_free(struct usb_info *ui, int ret) +{ + dev_dbg(&ui->pdev->dev, "usb_free(%d)\n", ret); + + if (ui->xceiv) + otg_put_transceiver(ui->xceiv); + + if (ui->irq) + free_irq(ui->irq, 0); + if (ui->pool) + dma_pool_destroy(ui->pool); + if (ui->dma) + dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma); + kfree(ui); + return ret; +} + +static void usb_do_work_check_vbus(struct usb_info *ui) +{ + unsigned long iflags; + + spin_lock_irqsave(&ui->lock, iflags); + if (is_usb_online(ui)) + ui->flags |= USB_FLAG_VBUS_ONLINE; + else + ui->flags |= USB_FLAG_VBUS_OFFLINE; + spin_unlock_irqrestore(&ui->lock, iflags); +} + +static void usb_do_work(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, work); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long iflags; + unsigned flags, _vbus; + + for (;;) { + spin_lock_irqsave(&ui->lock, iflags); + flags = ui->flags; + ui->flags = 0; + _vbus = is_usb_online(ui); + spin_unlock_irqrestore(&ui->lock, iflags); + + /* give up if we have nothing to do */ + if (flags == 0) + break; + + switch (ui->state) { + case USB_STATE_IDLE: + if (flags & USB_FLAG_START) { + int ret; + + if (!_vbus) { + ui->state = USB_STATE_OFFLINE; + break; + } + + pm_runtime_get_noresume(&ui->pdev->dev); + pm_runtime_resume(&ui->pdev->dev); + dev_dbg(&ui->pdev->dev, + "msm72k_udc: IDLE -> ONLINE\n"); + usb_reset(ui); + ret = request_irq(otg->irq, usb_interrupt, + IRQF_SHARED, + ui->pdev->name, ui); + /* FIXME: should we call BUG_ON when + * requst irq fails + */ + if (ret) { + dev_err(&ui->pdev->dev, + "hsusb: peripheral: request irq" + " failed:(%d)", ret); + break; + } + ui->irq = otg->irq; + ui->state = USB_STATE_ONLINE; + usb_do_work_check_vbus(ui); + + if (!atomic_read(&ui->softconnect)) + break; + + msm72k_pullup_internal(&ui->gadget, 1); + + if (!ui->gadget.is_a_peripheral) + schedule_delayed_work( + &ui->chg_det, + USB_CHG_DET_DELAY); + + } + break; + case USB_STATE_ONLINE: + if (atomic_read(&ui->offline_pending)) { + switch_set_state(&ui->sdev, 0); + atomic_set(&ui->offline_pending, 0); + } + + /* If at any point when we were online, we received + * the signal to go offline, we must honor it + */ + if (flags & USB_FLAG_VBUS_OFFLINE) { + + ui->chg_current = 0; + /* wait incase chg_detect is running */ + if (!ui->gadget.is_a_peripheral) + cancel_delayed_work_sync(&ui->chg_det); + + dev_dbg(&ui->pdev->dev, + "msm72k_udc: ONLINE -> OFFLINE\n"); + + atomic_set(&ui->running, 0); + atomic_set(&ui->remote_wakeup, 0); + atomic_set(&ui->configured, 0); + + if (ui->driver) { + dev_dbg(&ui->pdev->dev, + "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + /* cancel pending ep0 transactions */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + /* synchronize with irq context */ + spin_lock_irqsave(&ui->lock, iflags); +#ifdef CONFIG_USB_OTG + ui->gadget.host_request = 0; + ui->gadget.b_hnp_enable = 0; + ui->hnp_avail = 0; +#endif + msm72k_pullup_internal(&ui->gadget, 0); + spin_unlock_irqrestore(&ui->lock, iflags); + + + /* if charger is initialized to known type + * we must let modem know about charger + * disconnection + */ + otg_set_power(ui->xceiv, 0); + + if (ui->irq) { + free_irq(ui->irq, ui); + ui->irq = 0; + } + + + switch_set_state(&ui->sdev, 0); + + ui->state = USB_STATE_OFFLINE; + usb_do_work_check_vbus(ui); + pm_runtime_put_noidle(&ui->pdev->dev); + pm_runtime_suspend(&ui->pdev->dev); + wake_unlock(&ui->wlock); + break; + } + if (flags & USB_FLAG_SUSPEND) { + int maxpower = usb_get_max_power(ui); + + if (maxpower < 0) + break; + + otg_set_power(ui->xceiv, 0); + /* To support TCXO during bus suspend + * This might be dummy check since bus suspend + * is not implemented as of now + * */ + if (release_wlocks) + wake_unlock(&ui->wlock); + + /* TBD: Initiate LPM at usb bus suspend */ + break; + } + if (flags & USB_FLAG_CONFIGURED) { + int maxpower = usb_get_max_power(ui); + + /* We may come here even when no configuration + * is selected. Send online/offline event + * accordingly. + */ + switch_set_state(&ui->sdev, + atomic_read(&ui->configured)); + + if (maxpower < 0) + break; + + ui->chg_current = maxpower; + otg_set_power(ui->xceiv, maxpower); + break; + } + if (flags & USB_FLAG_RESET) { + dev_dbg(&ui->pdev->dev, + "msm72k_udc: ONLINE -> RESET\n"); + msm72k_pullup_internal(&ui->gadget, 0); + usb_reset(ui); + msm72k_pullup_internal(&ui->gadget, 1); + dev_dbg(&ui->pdev->dev, + "msm72k_udc: RESET -> ONLINE\n"); + break; + } + break; + case USB_STATE_OFFLINE: + /* If we were signaled to go online and vbus is still + * present when we received the signal, go online. + */ + if ((flags & USB_FLAG_VBUS_ONLINE) && _vbus) { + int ret; + + pm_runtime_get_noresume(&ui->pdev->dev); + pm_runtime_resume(&ui->pdev->dev); + dev_dbg(&ui->pdev->dev, + "msm72k_udc: OFFLINE -> ONLINE\n"); + + usb_reset(ui); + ui->state = USB_STATE_ONLINE; + usb_do_work_check_vbus(ui); + ret = request_irq(otg->irq, usb_interrupt, + IRQF_SHARED, + ui->pdev->name, ui); + /* FIXME: should we call BUG_ON when + * requst irq fails + */ + if (ret) { + dev_err(&ui->pdev->dev, + "hsusb: peripheral: request irq" + " failed:(%d)", ret); + break; + } + ui->irq = otg->irq; + enable_irq_wake(otg->irq); + + if (!atomic_read(&ui->softconnect)) + break; + msm72k_pullup_internal(&ui->gadget, 1); + + if (!ui->gadget.is_a_peripheral) + schedule_delayed_work( + &ui->chg_det, + USB_CHG_DET_DELAY); + } + break; + } + } +} + +/* FIXME - the callers of this function should use a gadget API instead. + * This is called from htc_battery.c and board-halibut.c + * WARNING - this can get called before this driver is initialized. + */ +void msm_hsusb_set_vbus_state(int online) +{ + unsigned long flags; + struct usb_info *ui = the_usb_info; + + if (!ui) { + pr_err("%s called before driver initialized\n", __func__); + return; + } + + spin_lock_irqsave(&ui->lock, flags); + + if (is_usb_online(ui) == online) + goto out; + + if (online) { + ui->usb_state = USB_STATE_POWERED; + ui->flags |= USB_FLAG_VBUS_ONLINE; + } else { + ui->gadget.speed = USB_SPEED_UNKNOWN; + ui->usb_state = USB_STATE_NOTATTACHED; + ui->flags |= USB_FLAG_VBUS_OFFLINE; + } + if (in_interrupt()) { + schedule_work(&ui->work); + } else { + spin_unlock_irqrestore(&ui->lock, flags); + usb_do_work(&ui->work); + return; + } +out: + spin_unlock_irqrestore(&ui->lock, flags); +} + +#if defined(CONFIG_DEBUG_FS) + +void usb_function_reenumerate(void) +{ + struct usb_info *ui = the_usb_info; + + /* disable and re-enable the D+ pullup */ + dev_dbg(&ui->pdev->dev, "disable pullup\n"); + writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD); + + msleep(10); + + dev_dbg(&ui->pdev->dev, "enable pullup\n"); + writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD); +} + +static char debug_buffer[PAGE_SIZE]; + +static ssize_t debug_read_status(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + char *buf = debug_buffer; + unsigned long flags; + struct msm_endpoint *ept; + struct msm_request *req; + int n; + int i = 0; + + spin_lock_irqsave(&ui->lock, flags); + + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: setup=%08x prime=%08x stat=%08x done=%08x\n", + readl(USB_ENDPTSETUPSTAT), + readl(USB_ENDPTPRIME), + readl(USB_ENDPTSTAT), + readl(USB_ENDPTCOMPLETE)); + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: cmd=%08x sts=%08x intr=%08x port=%08x\n\n", + readl(USB_USBCMD), + readl(USB_USBSTS), + readl(USB_USBINTR), + readl(USB_PORTSC)); + + + for (n = 0; n < 32; n++) { + ept = ui->ept + n; + if (ept->ep.maxpacket == 0) + continue; + + i += scnprintf(buf + i, PAGE_SIZE - i, + "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + + for (req = ept->req; req; req = req->next) + i += scnprintf(buf + i, PAGE_SIZE - i, + " req @%08x next=%08x info=%08x page0=%08x %c %c\n", + req->item_dma, req->item->next, + req->item->info, req->item->page0, + req->busy ? 'B' : ' ', + req->live ? 'L' : ' '); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, + "phy failure count: %d\n", ui->phy_fail_count); + + spin_unlock_irqrestore(&ui->lock, flags); + + return simple_read_from_buffer(ubuf, count, ppos, buf, i); +} + +static ssize_t debug_write_reset(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_RESET; + schedule_work(&ui->work); + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} + +static ssize_t debug_write_cycle(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + usb_function_reenumerate(); + return count; +} + +static int debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +const struct file_operations debug_stat_ops = { + .open = debug_open, + .read = debug_read_status, +}; + +const struct file_operations debug_reset_ops = { + .open = debug_open, + .write = debug_write_reset, +}; + +const struct file_operations debug_cycle_ops = { + .open = debug_open, + .write = debug_write_cycle, +}; + +static ssize_t debug_read_release_wlocks(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char kbuf[10]; + size_t c = 0; + + memset(kbuf, 0, 10); + + c = scnprintf(kbuf, 10, "%d", release_wlocks); + + if (copy_to_user(ubuf, kbuf, c)) + return -EFAULT; + + return c; +} +static ssize_t debug_write_release_wlocks(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + char kbuf[10]; + long temp; + + memset(kbuf, 0, 10); + + if (copy_from_user(kbuf, buf, count > 10 ? 10 : count)) + return -EFAULT; + + if (strict_strtol(kbuf, 10, &temp)) + return -EINVAL; + + if (temp) + release_wlocks = 1; + else + release_wlocks = 0; + + return count; +} +static int debug_wake_lock_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +const struct file_operations debug_wlocks_ops = { + .open = debug_wake_lock_open, + .read = debug_read_release_wlocks, + .write = debug_write_release_wlocks, +}; + +static ssize_t debug_reprime_ep(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + struct msm_endpoint *ept; + char kbuf[10]; + unsigned int ep_num, dir; + unsigned long flags; + unsigned n, i; + + memset(kbuf, 0, 10); + + if (copy_from_user(kbuf, ubuf, count > 10 ? 10 : count)) + return -EFAULT; + + if (sscanf(kbuf, "%u %u", &ep_num, &dir) != 2) + return -EINVAL; + + if (dir) + i = ep_num + 16; + else + i = ep_num; + + spin_lock_irqsave(&ui->lock, flags); + ept = ui->ept + i; + n = 1 << ept->bit; + + if ((readl_relaxed(USB_ENDPTPRIME) & n)) + goto out; + + if (readl_relaxed(USB_ENDPTSTAT) & n) + goto out; + + /* clear speculative loads on item->info */ + rmb(); + if (ept->req && (ept->req->item->info & INFO_ACTIVE)) { + pr_err("%s(): ept%d%s prime failed. ept: config: %x" + "active: %x next: %x info: %x\n", + __func__, ept->num, + ept->flags & EPT_FLAG_IN ? "in" : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + writel_relaxed(n, USB_ENDPTPRIME); + } +out: + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} + +static char buffer[512]; +static ssize_t debug_prime_fail_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + char *buf = buffer; + unsigned long flags; + struct msm_endpoint *ept; + int n; + int i = 0; + + spin_lock_irqsave(&ui->lock, flags); + for (n = 0; n < 32; n++) { + ept = ui->ept + n; + if (ept->ep.maxpacket == 0) + continue; + + i += scnprintf(buf + i, PAGE_SIZE - i, + "ept%d %s false_prime_count=%lu prime_fail_count=%d dtd_fail_count=%lu\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out", + ept->false_prime_fail_count, + ept->actual_prime_fail_count, + ept->dTD_update_fail_count); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, + "dTD_update_fail count: %lu\n", + ui->dTD_update_fail_count); + + i += scnprintf(buf + i, PAGE_SIZE - i, + "prime_fail count: %d\n", ui->prime_fail_count); + + spin_unlock_irqrestore(&ui->lock, flags); + + return simple_read_from_buffer(ubuf, count, ppos, buf, i); +} + +static int debug_prime_fail_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +const struct file_operations prime_fail_ops = { + .open = debug_prime_fail_open, + .read = debug_prime_fail_read, + .write = debug_reprime_ep, +}; + +static void usb_debugfs_init(struct usb_info *ui) +{ + struct dentry *dent; + dent = debugfs_create_dir(dev_name(&ui->pdev->dev), 0); + if (IS_ERR(dent)) + return; + + debugfs_create_file("status", 0444, dent, ui, &debug_stat_ops); + debugfs_create_file("reset", 0222, dent, ui, &debug_reset_ops); + debugfs_create_file("cycle", 0222, dent, ui, &debug_cycle_ops); + debugfs_create_file("release_wlocks", 0666, dent, ui, + &debug_wlocks_ops); + debugfs_create_file("prime_fail_countt", 0666, dent, ui, + &prime_fail_ops); +} +#else +static void usb_debugfs_init(struct usb_info *ui) {} +#endif + +static int +msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + unsigned char ep_type = + desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + + _ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize); + config_ept(ept); + ept->wedged = 0; + usb_ept_enable(ept, 1, ep_type); + return 0; +} + +static int msm72k_disable(struct usb_ep *_ep) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + + usb_ept_enable(ept, 0, 0); + flush_endpoint(ept); + return 0; +} + +static struct usb_request * +msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) +{ + return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags); +} + +static void +msm72k_free_request(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_request *req = to_msm_request(_req); + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + + /* request should not be busy */ + BUG_ON(req->busy); + if (req->alloced) + kfree(req->req.buf); + dma_pool_free(ui->pool, req->item, req->item_dma); + kfree(req); +} + +static int +msm72k_queue(struct usb_ep *_ep, struct usb_request *req, gfp_t gfp_flags) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct usb_info *ui = ep->ui; + + if (ep == &ui->ep0in) { + struct msm_request *r = to_msm_request(req); + if (!req->length) + goto ep_queue_done; + r->gadget_complete = req->complete; + /* ep0_queue_ack_complete queue a receive for ACK before + ** calling req->complete + */ + req->complete = ep0_queue_ack_complete; + if (atomic_read(&ui->ep0_dir) == USB_DIR_OUT) + ep = &ui->ep0out; + goto ep_queue_done; + } + +ep_queue_done: + return usb_ept_queue_xfer(ep, req); +} + +static int msm72k_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct msm_request *req = to_msm_request(_req); + struct usb_info *ui = ep->ui; + + struct msm_request *temp_req; + unsigned long flags; + + if (!(ui && req && ep->req)) + return -EINVAL; + + spin_lock_irqsave(&ui->lock, flags); + if (!req->busy) { + dev_dbg(&ui->pdev->dev, "%s: !req->busy\n", __func__); + spin_unlock_irqrestore(&ui->lock, flags); + return -EINVAL; + } + del_timer(&ep->prime_timer); + /* Stop the transfer */ + do { + writel((1 << ep->bit), USB_ENDPTFLUSH); + while (readl(USB_ENDPTFLUSH) & (1 << ep->bit)) + udelay(100); + } while (readl(USB_ENDPTSTAT) & (1 << ep->bit)); + + req->req.status = 0; + req->busy = 0; + + if (ep->req == req) { + ep->req = req->next; + ep->head->next = req->item->next; + } else { + req->prev->next = req->next; + if (req->next) + req->next->prev = req->prev; + req->prev->item->next = req->item->next; + } + + if (!req->next) + ep->last = req->prev; + + /* initialize request to default */ + req->item->next = TERMINATE; + req->item->info = 0; + req->live = 0; + dma_unmap_single(NULL, req->dma, req->req.length, + (ep->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (req->req.complete) { + req->req.status = -ECONNRESET; + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ep->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + + if (!req->live) { + /* Reprime the endpoint for the remaining transfers */ + for (temp_req = ep->req ; temp_req ; temp_req = temp_req->next) + temp_req->live = 0; + if (ep->req) + usb_ept_start(ep); + spin_unlock_irqrestore(&ui->lock, flags); + return 0; + } + spin_unlock_irqrestore(&ui->lock, flags); + return 0; +} + +static int +usb_ept_set_halt(struct usb_ep *_ep, int value) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + unsigned int in = ept->flags & EPT_FLAG_IN; + unsigned int n; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + if (value) + n |= CTRL_TXS; + else { + n &= ~CTRL_TXS; + n |= CTRL_TXR; + } + } else { + if (value) + n |= CTRL_RXS; + else { + n &= ~CTRL_RXS; + n |= CTRL_RXR; + } + } + writel(n, USB_ENDPTCTRL(ept->num)); + if (!value) + ept->wedged = 0; + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static int +msm72k_set_halt(struct usb_ep *_ep, int value) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + unsigned int in = ept->flags & EPT_FLAG_IN; + + if (value && in && ept->req) + return -EAGAIN; + + usb_ept_set_halt(_ep, value); + + return 0; +} + +static int +msm72k_fifo_status(struct usb_ep *_ep) +{ + return -EOPNOTSUPP; +} + +static void +msm72k_fifo_flush(struct usb_ep *_ep) +{ + flush_endpoint(to_msm_endpoint(_ep)); +} +static int msm72k_set_wedge(struct usb_ep *_ep) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + + if (ept->num == 0) + return -EINVAL; + + ept->wedged = 1; + + return msm72k_set_halt(_ep, 1); +} + +static const struct usb_ep_ops msm72k_ep_ops = { + .enable = msm72k_enable, + .disable = msm72k_disable, + + .alloc_request = msm72k_alloc_request, + .free_request = msm72k_free_request, + + .queue = msm72k_queue, + .dequeue = msm72k_dequeue, + + .set_halt = msm72k_set_halt, + .set_wedge = msm72k_set_wedge, + .fifo_status = msm72k_fifo_status, + .fifo_flush = msm72k_fifo_flush, +}; + +static int msm72k_get_frame(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + + /* frame number is in bits 13:3 */ + return (readl(USB_FRINDEX) >> 3) & 0x000007FF; +} + +/* VBUS reporting logically comes from a transceiver */ +static int msm72k_udc_vbus_session(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + + if (is_active || atomic_read(&otg->chg_type) + == USB_CHG_TYPE__WALLCHARGER) + wake_lock(&ui->wlock); + + msm_hsusb_set_vbus_state(is_active); + return 0; +} + +/* SW workarounds +Issue #1 - USB Spoof Disconnect Failure +Symptom - Writing 0 to run/stop bit of USBCMD doesn't cause disconnect +SW workaround - Making opmode non-driving and SuspendM set in function + register of SMSC phy +*/ +/* drivers may have software control over D+ pullup */ +static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + + if (is_active) { + spin_lock_irqsave(&ui->lock, flags); + if (is_usb_online(ui) && ui->driver) + writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD); + spin_unlock_irqrestore(&ui->lock, flags); + } else { + writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD); + /* S/W workaround, Issue#1 */ + otg_io_write(ui->xceiv, 0x48, 0x04); + } + + /* Ensure pull-up operation is completed before returning */ + mb(); + + return 0; +} + +static int msm72k_pullup(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long flags; + + atomic_set(&ui->softconnect, is_active); + + spin_lock_irqsave(&ui->lock, flags); + if (ui->usb_state == USB_STATE_NOTATTACHED || ui->driver == NULL || + atomic_read(&otg->chg_type) == USB_CHG_TYPE__WALLCHARGER) { + spin_unlock_irqrestore(&ui->lock, flags); + return 0; + } + spin_unlock_irqrestore(&ui->lock, flags); + + msm72k_pullup_internal(_gadget, is_active); + + if (is_active && !ui->gadget.is_a_peripheral) + schedule_delayed_work(&ui->chg_det, USB_CHG_DET_DELAY); + + return 0; +} + +static int msm72k_wakeup(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + + if (!atomic_read(&ui->remote_wakeup)) { + dev_err(&ui->pdev->dev, + "%s: remote wakeup not supported\n", __func__); + return -ENOTSUPP; + } + + if (!atomic_read(&ui->configured)) { + dev_err(&ui->pdev->dev, + "%s: device is not configured\n", __func__); + return -ENODEV; + } + otg_set_suspend(ui->xceiv, 0); + + disable_irq(otg->irq); + + if (!is_usb_active()) + writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC); + + /* Ensure that USB port is resumed before enabling the IRQ */ + mb(); + + enable_irq(otg->irq); + + return 0; +} + +/* when Gadget is configured, it will indicate how much power + * can be pulled from vbus, as specified in configuiration descriptor + */ +static int msm72k_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + + + spin_lock_irqsave(&ui->lock, flags); + ui->b_max_pow = mA; + ui->flags = USB_FLAG_CONFIGURED; + spin_unlock_irqrestore(&ui->lock, flags); + + schedule_work(&ui->work); + + return 0; +} + +static int msm72k_set_selfpowered(struct usb_gadget *_gadget, int set) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ui->lock, flags); + if (set) { + if (ui->pdata && ui->pdata->self_powered) + atomic_set(&ui->self_powered, 1); + else + ret = -EOPNOTSUPP; + } else { + /* We can always work as a bus powered device */ + atomic_set(&ui->self_powered, 0); + } + spin_unlock_irqrestore(&ui->lock, flags); + + return ret; + +} + +static const struct usb_gadget_ops msm72k_ops = { + .get_frame = msm72k_get_frame, + .vbus_session = msm72k_udc_vbus_session, + .vbus_draw = msm72k_udc_vbus_draw, + .pullup = msm72k_pullup, + .wakeup = msm72k_wakeup, + .set_selfpowered = msm72k_set_selfpowered, +}; + +static void usb_do_remote_wakeup(struct work_struct *w) +{ + struct usb_info *ui = the_usb_info; + + msm72k_wakeup(&ui->gadget); +} + +static ssize_t usb_remote_wakeup(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + + msm72k_wakeup(&ui->gadget); + + return count; +} + +static ssize_t show_usb_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + size_t i; + char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED", + "USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED", + "USB_STATE_RECONNECTING", "USB_STATE_DEFAULT", + "USB_STATE_ADDRESS", "USB_STATE_CONFIGURED", + "USB_STATE_SUSPENDED" + }; + + i = scnprintf(buf, PAGE_SIZE, "%s\n", state[msm_hsusb_get_state()]); + return i; +} + +static ssize_t show_usb_speed(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct usb_info *ui = the_usb_info; + size_t i; + char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW", + "USB_SPEED_FULL", "USB_SPEED_HIGH"}; + + i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->gadget.speed]); + return i; +} + +static ssize_t store_usb_chg_current(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + unsigned long mA; + + if (ui->gadget.is_a_peripheral) + return -EINVAL; + + if (strict_strtoul(buf, 10, &mA)) + return -EINVAL; + + ui->chg_current = mA; + otg_set_power(ui->xceiv, mA); + + return count; +} + +static ssize_t show_usb_chg_current(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + size_t count; + + count = snprintf(buf, PAGE_SIZE, "%d", ui->chg_current); + + return count; +} + +static ssize_t show_usb_chg_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + struct msm_otg *otg = to_msm_otg(ui->xceiv); + size_t count; + char *chg_type[] = {"STD DOWNSTREAM PORT", + "CARKIT", + "DEDICATED CHARGER", + "INVALID"}; + + count = snprintf(buf, PAGE_SIZE, "%s", + chg_type[atomic_read(&otg->chg_type)]); + + return count; +} +static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup); +static DEVICE_ATTR(usb_state, S_IRUSR, show_usb_state, 0); +static DEVICE_ATTR(usb_speed, S_IRUSR, show_usb_speed, 0); +static DEVICE_ATTR(chg_type, S_IRUSR, show_usb_chg_type, 0); +static DEVICE_ATTR(chg_current, S_IWUSR | S_IRUSR, + show_usb_chg_current, store_usb_chg_current); + +#ifdef CONFIG_USB_OTG +static ssize_t store_host_req(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + unsigned long val, flags; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + + dev_dbg(&ui->pdev->dev, "%s host request\n", + val ? "set" : "clear"); + + spin_lock_irqsave(&ui->lock, flags); + if (ui->hnp_avail) + ui->gadget.host_request = !!val; + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} +static DEVICE_ATTR(host_request, S_IWUSR, NULL, store_host_req); + +/* How do we notify user space about HNP availability? + * As we are compliant to Rev 2.0, Host will not set a_hnp_support. + * Introduce hnp_avail flag and set when HNP polling request arrives. + * The expectation is that user space checks hnp availability before + * requesting host role via above sysfs node. + */ +static ssize_t show_host_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + size_t count; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + count = snprintf(buf, PAGE_SIZE, "%d\n", ui->hnp_avail); + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} +static DEVICE_ATTR(host_avail, S_IRUSR, show_host_avail, NULL); + +static struct attribute *otg_attrs[] = { + &dev_attr_host_request.attr, + &dev_attr_host_avail.attr, + NULL, +}; + +static struct attribute_group otg_attr_grp = { + .name = "otg", + .attrs = otg_attrs, +}; +#endif + +static int msm72k_probe(struct platform_device *pdev) +{ + struct usb_info *ui; + struct msm_otg *otg; + int retval; + + dev_dbg(&pdev->dev, "msm72k_probe\n"); + ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); + if (!ui) + return -ENOMEM; + + ui->pdev = pdev; + ui->pdata = pdev->dev.platform_data; + + ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL); + if (!ui->buf) + return usb_free(ui, -ENOMEM); + + ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0); + if (!ui->pool) + return usb_free(ui, -ENOMEM); + + ui->xceiv = otg_get_transceiver(); + if (!ui->xceiv) + return usb_free(ui, -ENODEV); + + otg = to_msm_otg(ui->xceiv); + ui->addr = otg->regs; + + ui->gadget.ops = &msm72k_ops; + ui->gadget.is_dualspeed = 1; + device_initialize(&ui->gadget.dev); + dev_set_name(&ui->gadget.dev, "gadget"); + ui->gadget.dev.parent = &pdev->dev; + ui->gadget.dev.dma_mask = pdev->dev.dma_mask; + +#ifdef CONFIG_USB_OTG + ui->gadget.is_otg = 1; +#endif + + ui->sdev.name = DRIVER_NAME; + ui->sdev.print_name = print_switch_name; + ui->sdev.print_state = print_switch_state; + + retval = switch_dev_register(&ui->sdev); + if (retval) + return usb_free(ui, retval); + + the_usb_info = ui; + + wake_lock_init(&ui->wlock, + WAKE_LOCK_SUSPEND, "usb_bus_active"); + + usb_debugfs_init(ui); + + usb_prepare(ui); + +#ifdef CONFIG_USB_OTG + retval = sysfs_create_group(&pdev->dev.kobj, &otg_attr_grp); + if (retval) { + dev_err(&ui->pdev->dev, + "failed to create otg sysfs directory:" + "err:(%d)\n", retval); + } +#endif + + retval = otg_set_peripheral(ui->xceiv, &ui->gadget); + if (retval) { + dev_err(&ui->pdev->dev, + "%s: Cannot bind the transceiver, retval:(%d)\n", + __func__, retval); + switch_dev_unregister(&ui->sdev); + wake_lock_destroy(&ui->wlock); + return usb_free(ui, retval); + } + + pm_runtime_enable(&pdev->dev); + + /* Setup phy stuck timer */ + if (ui->pdata && ui->pdata->is_phy_status_timer_on) + setup_timer(&phy_status_timer, usb_phy_status_check_timer, 0); + return 0; +} + +int usb_gadget_probe_driver(struct usb_gadget_driver *driver, + int (*bind)(struct usb_gadget *)) +{ + struct usb_info *ui = the_usb_info; + int retval, n; + + if (!driver + || driver->speed < USB_SPEED_FULL + || !bind + || !driver->disconnect + || !driver->setup) + return -EINVAL; + if (!ui) + return -ENODEV; + if (ui->driver) + return -EBUSY; + + /* first hook up the driver ... */ + ui->driver = driver; + ui->gadget.dev.driver = &driver->driver; + ui->gadget.name = driver_name; + INIT_LIST_HEAD(&ui->gadget.ep_list); + ui->gadget.ep0 = &ui->ep0in.ep; + INIT_LIST_HEAD(&ui->gadget.ep0->ep_list); + ui->gadget.speed = USB_SPEED_UNKNOWN; + atomic_set(&ui->softconnect, 1); + + for (n = 1; n < 16; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + for (n = 17; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + + retval = device_add(&ui->gadget.dev); + if (retval) + goto fail; + + retval = bind(&ui->gadget); + if (retval) { + dev_err(&ui->pdev->dev, "bind to driver %s --> error %d\n", + driver->driver.name, retval); + device_del(&ui->gadget.dev); + goto fail; + } + + retval = device_create_file(&ui->gadget.dev, &dev_attr_wakeup); + if (retval != 0) + dev_err(&ui->pdev->dev, "failed to create sysfs entry:" + "(wakeup) error: (%d)\n", retval); + retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_state); + if (retval != 0) + dev_err(&ui->pdev->dev, "failed to create sysfs entry:" + " (usb_state) error: (%d)\n", retval); + + retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_speed); + if (retval != 0) + dev_err(&ui->pdev->dev, "failed to create sysfs entry:" + " (usb_speed) error: (%d)\n", retval); + + retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_type); + if (retval != 0) + dev_err(&ui->pdev->dev, + "failed to create sysfs entry(chg_type): err:(%d)\n", + retval); + retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_current); + if (retval != 0) + dev_err(&ui->pdev->dev, + "failed to create sysfs entry(chg_current):" + "err:(%d)\n", retval); + + dev_dbg(&ui->pdev->dev, "registered gadget driver '%s'\n", + driver->driver.name); + usb_start(ui); + + return 0; + +fail: + ui->driver = NULL; + ui->gadget.dev.driver = NULL; + return retval; +} +EXPORT_SYMBOL(usb_gadget_probe_driver); + +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) +{ + struct usb_info *dev = the_usb_info; + + if (!dev) + return -ENODEV; + if (!driver || driver != dev->driver || !driver->unbind) + return -EINVAL; + + msm72k_pullup_internal(&dev->gadget, 0); + if (dev->irq) { + free_irq(dev->irq, dev); + dev->irq = 0; + } + dev->state = USB_STATE_IDLE; + atomic_set(&dev->configured, 0); + switch_set_state(&dev->sdev, 0); + /* cancel pending ep0 transactions */ + flush_endpoint(&dev->ep0out); + flush_endpoint(&dev->ep0in); + + device_remove_file(&dev->gadget.dev, &dev_attr_wakeup); + device_remove_file(&dev->gadget.dev, &dev_attr_usb_state); + device_remove_file(&dev->gadget.dev, &dev_attr_usb_speed); + device_remove_file(&dev->gadget.dev, &dev_attr_chg_type); + device_remove_file(&dev->gadget.dev, &dev_attr_chg_current); + driver->disconnect(&dev->gadget); + driver->unbind(&dev->gadget); + dev->gadget.dev.driver = NULL; + dev->driver = NULL; + + device_del(&dev->gadget.dev); + + dev_dbg(&dev->pdev->dev, + "unregistered gadget driver '%s'\n", driver->driver.name); + return 0; +} +EXPORT_SYMBOL(usb_gadget_unregister_driver); + + +static int msm72k_udc_runtime_suspend(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: suspending...\n"); + return 0; +} + +static int msm72k_udc_runtime_resume(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: resuming...\n"); + return 0; +} + +static int msm72k_udc_runtime_idle(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: idling...\n"); + return 0; +} + +static struct dev_pm_ops msm72k_udc_dev_pm_ops = { + .runtime_suspend = msm72k_udc_runtime_suspend, + .runtime_resume = msm72k_udc_runtime_resume, + .runtime_idle = msm72k_udc_runtime_idle +}; + +static struct platform_driver usb_driver = { + .probe = msm72k_probe, + .driver = { .name = "msm_hsusb", + .pm = &msm72k_udc_dev_pm_ops, }, +}; + +static int __init init(void) +{ + return platform_driver_register(&usb_driver); +} +module_init(init); + +static void __exit cleanup(void) +{ + platform_driver_unregister(&usb_driver); +} +module_exit(cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Mike Lockwood, Brian Swetland"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 88a464cc..271ef946 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -1602,7 +1602,7 @@ cleanup(void) if (status) ERROR(dev, "usb_gadget_unregister_driver %x\n", status); - unregister_chrdev_region(g_printer_devno, 1); + unregister_chrdev_region(g_printer_devno, 2); class_destroy(usb_gadget_class); mutex_unlock(&usb_printer_gadget.lock_printer_io); } diff --git a/drivers/usb/gadget/qcom_maemo.c b/drivers/usb/gadget/qcom_maemo.c new file mode 100644 index 00000000..39686c4e --- /dev/null +++ b/drivers/usb/gadget/qcom_maemo.c @@ -0,0 +1,304 @@ +/* + * Qualcomm Maemo Composite driver + * + * Copyright (C) 2008 David Brownell + * Copyright (C) 2008 Nokia Corporation + * Copyright (C) 2009 Samsung Electronics + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program from the Code Aurora Forum is free software; you can + * redistribute it and/or modify it under the GNU General Public License + * version 2 and only version 2 as published by the Free Software Foundation. + * The original work available from [git.kernel.org ] is subject to the + * notice below. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include + + +#define DRIVER_DESC "Qcom Maemo Composite Gadget" +#define VENDOR_ID 0x05c6 +#define PRODUCT_ID 0x902E + +/* + * kbuild is not very cooperative with respect to linking separately + * compiled library objects into one module. So for now we won't use + * separate compilation ... ensuring init/exit sections work to shrink + * the runtime footprint, and giving us at least some parts of what + * a "gcc --combine ... part1.c part2.c part3.c ... " build would. + */ + +#include "composite.c" +#include "usbstring.c" +#include "config.c" +#include "epautoconf.c" + +#define USB_ETH + +#define USB_ETH_RNDIS +#ifdef USB_ETH_RNDIS +# include "f_rndis.c" +# include "rndis.c" +#endif + + +#include "u_serial.c" +#include "f_serial.c" + +#include "u_ether.c" + +#undef DBG /* u_ether.c has broken idea about macros */ +#undef VDBG /* so clean up after it */ +#undef ERROR +#undef INFO + +#include "f_mass_storage.c" +#include "f_diag.c" +#include "f_rmnet.c" + +/*-------------------------------------------------------------------------*/ +/* string IDs are assigned dynamically */ + +#define STRING_MANUFACTURER_IDX 0 +#define STRING_PRODUCT_IDX 1 +#define STRING_SERIAL_IDX 2 + +/* String Table */ +static struct usb_string strings_dev[] = { + /* These dummy values should be overridden by platform data */ + [STRING_MANUFACTURER_IDX].s = "Qualcomm Incorporated", + [STRING_PRODUCT_IDX].s = "Usb composition", + [STRING_SERIAL_IDX].s = "0123456789ABCDEF", + { } /* end of list */ +}; + +static struct usb_gadget_strings stringtab_dev = { + .language = 0x0409, /* en-us */ + .strings = strings_dev, +}; + +static struct usb_gadget_strings *dev_strings[] = { + &stringtab_dev, + NULL, +}; + +static struct usb_device_descriptor device_desc = { + .bLength = sizeof(device_desc), + .bDescriptorType = USB_DT_DEVICE, + .bcdUSB = __constant_cpu_to_le16(0x0200), + .bDeviceClass = USB_CLASS_PER_INTERFACE, + .bDeviceSubClass = 0, + .bDeviceProtocol = 0, + .idVendor = __constant_cpu_to_le16(VENDOR_ID), + .idProduct = __constant_cpu_to_le16(PRODUCT_ID), + .bcdDevice = __constant_cpu_to_le16(0xffff), + .bNumConfigurations = 1, +}; + +static u8 hostaddr[ETH_ALEN]; +static struct usb_diag_ch *diag_ch; +static struct usb_diag_platform_data usb_diag_pdata = { + .ch_name = DIAG_LEGACY, +}; + +/****************************** Configurations ******************************/ +static struct fsg_module_parameters mod_data = { + .stall = 0 +}; +FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); + +static struct fsg_common *fsg_common; +static int maemo_setup_config(struct usb_configuration *c, + const struct usb_ctrlrequest *ctrl); + +static int maemo_do_config(struct usb_configuration *c) +{ + int ret; + + ret = rndis_bind_config(c, hostaddr); + if (ret < 0) + return ret; + + ret = diag_function_add(c); + if (ret < 0) + return ret; + + ret = gser_bind_config(c, 0); + if (ret < 0) + return ret; + + ret = gser_bind_config(c, 1); + if (ret < 0) + return ret; + + ret = rmnet_function_add(c); + if (ret < 0) + return ret; + + ret = fsg_add(c->cdev, c, fsg_common); + if (ret < 0) + return ret; + + return 0; +} + +static struct usb_configuration maemo_config_driver = { + .label = "Qcom Maemo Gadget", + .bind = maemo_do_config, + .setup = maemo_setup_config, + .bConfigurationValue = 1, + .bMaxPower = 0xFA, +}; +static int maemo_setup_config(struct usb_configuration *c, + const struct usb_ctrlrequest *ctrl) +{ + int i; + int ret = -EOPNOTSUPP; + + for (i = 0; i < maemo_config_driver.next_interface_id; i++) { + if (maemo_config_driver.interface[i]->setup) { + ret = maemo_config_driver.interface[i]->setup( + maemo_config_driver.interface[i], ctrl); + if (ret >= 0) + return ret; + } + } + + return ret; +} + +static int maemo_bind(struct usb_composite_dev *cdev) +{ + struct usb_gadget *gadget = cdev->gadget; + int status, gcnum; + + /* set up diag channel */ + diag_ch = diag_setup(&usb_diag_pdata); + if (IS_ERR(diag_ch)) + return PTR_ERR(diag_ch); + + /* set up network link layer */ + status = gether_setup(cdev->gadget, hostaddr); + if (status < 0) + goto diag_clean; + + /* set up serial link layer */ + status = gserial_setup(cdev->gadget, 2); + if (status < 0) + goto fail0; + + /* set up mass storage function */ + fsg_common = fsg_common_from_params(0, cdev, &mod_data); + if (IS_ERR(fsg_common)) { + status = PTR_ERR(fsg_common); + goto fail1; + } + + gcnum = usb_gadget_controller_number(gadget); + if (gcnum >= 0) + device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum); + else { + /* gadget zero is so simple (for now, no altsettings) that + * it SHOULD NOT have problems with bulk-capable hardware. + * so just warn about unrcognized controllers -- don't panic. + * + * things like configuration and altsetting numbering + * can need hardware-specific attention though. + */ + WARNING(cdev, "controller '%s' not recognized\n", + gadget->name); + device_desc.bcdDevice = __constant_cpu_to_le16(0x9999); + } + + /* Allocate string descriptor numbers ... note that string + * contents can be overridden by the composite_dev glue. + */ + + status = usb_string_id(cdev); + if (status < 0) + goto fail2; + strings_dev[STRING_MANUFACTURER_IDX].id = status; + device_desc.iManufacturer = status; + + status = usb_string_id(cdev); + if (status < 0) + goto fail2; + strings_dev[STRING_PRODUCT_IDX].id = status; + device_desc.iProduct = status; + + if (!usb_gadget_set_selfpowered(gadget)) + maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_SELFPOWER; + + if (gadget->ops->wakeup) + maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; + + /* register our first configuration */ + status = usb_add_config(cdev, &maemo_config_driver); + if (status < 0) + goto fail2; + + usb_gadget_set_selfpowered(gadget); + dev_info(&gadget->dev, DRIVER_DESC "\n"); + fsg_common_put(fsg_common); + return 0; + +fail2: + fsg_common_put(fsg_common); +fail1: + gserial_cleanup(); +fail0: + gether_cleanup(); +diag_clean: + diag_cleanup(diag_ch); + + return status; +} + +static int __exit maemo_unbind(struct usb_composite_dev *cdev) +{ + gserial_cleanup(); + gether_cleanup(); + diag_cleanup(diag_ch); + return 0; +} + +static struct usb_composite_driver qcom_maemo_driver = { + .name = "Qcom Maemo Gadget", + .dev = &device_desc, + .strings = dev_strings, + .bind = maemo_bind, + .unbind = __exit_p(maemo_unbind), +}; + +static int __init qcom_maemo_usb_init(void) +{ + return usb_composite_register(&qcom_maemo_driver); +} +module_init(qcom_maemo_usb_init); + +static void __exit qcom_maemo_usb_cleanup(void) +{ + usb_composite_unregister(&qcom_maemo_driver); +} +module_exit(qcom_maemo_usb_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("1.0"); diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 791cb0fa..2b994a5c 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c @@ -605,12 +605,12 @@ static int rndis_init_response(int configNr, rndis_init_msg_type *buf) resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION); resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS); resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3); - resp->MaxPacketsPerTransfer = cpu_to_le32(1); - resp->MaxTransferSize = cpu_to_le32( - params->dev->mtu + resp->MaxPacketsPerTransfer = cpu_to_le32(TX_SKB_HOLD_THRESHOLD); + resp->MaxTransferSize = cpu_to_le32(TX_SKB_HOLD_THRESHOLD * + (params->dev->mtu + sizeof(struct ethhdr) + sizeof(struct rndis_packet_msg_type) - + 22); + + 22)); resp->PacketAlignmentFactor = cpu_to_le32(0); resp->AFListOffset = cpu_to_le32(0); resp->AFListSize = cpu_to_le32(0); @@ -1166,15 +1166,11 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ -static bool rndis_initialized; int rndis_init(void) { u8 i; - if (rndis_initialized) - return 0; - for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { #ifdef CONFIG_USB_GADGET_DEBUG_FILES char name [20]; @@ -1201,7 +1197,6 @@ int rndis_init(void) INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue)); } - rndis_initialized = true; return 0; } @@ -1210,13 +1205,7 @@ void rndis_exit(void) #ifdef CONFIG_USB_GADGET_DEBUG_FILES u8 i; char name[20]; -#endif - if (!rndis_initialized) - return; - rndis_initialized = false; - -#ifdef CONFIG_USB_GADGET_DEBUG_FILES for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { sprintf(name, NAME_TEMPLATE, i); remove_proc_entry(name, NULL); diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index 680d0326..15059670 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -274,7 +274,12 @@ static struct fsg_lun *fsg_lun_from_dev(struct device *dev) #define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */ /* Number of buffers for CBW, DATA and CSW */ -#define FSG_NUM_BUFFERS 8 +#ifdef CONFIG_USB_CSW_HACK +#define FSG_NUM_BUFFERS 4 +#else +#define FSG_NUM_BUFFERS 2 +#endif + /* Default size of buffer length. */ #define FSG_BUFLEN ((u32)16384) diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c index 70cbd2f4..f48e7f7f 100644 --- a/drivers/usb/gadget/u_bam.c +++ b/drivers/usb/gadget/u_bam.c @@ -23,12 +23,18 @@ #include #include +#include +#include + #include "u_rmnet.h" #define BAM_N_PORTS 1 +#define BAM2BAM_N_PORTS 1 static struct workqueue_struct *gbam_wq; static int n_bam_ports; +static int n_bam2bam_ports; +static unsigned n_tx_req_queued; static unsigned bam_ch_ids[] = { 8 }; static const char *bam_ch_names[] = { "bam_dmux_ch_8" }; @@ -45,6 +51,8 @@ static const char *bam_ch_names[] = { "bam_dmux_ch_8" }; #define BAM_MUX_TX_Q_SIZE 200 #define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR) +#define DL_INTR_THRESHOLD 20 + unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD; module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR); @@ -66,8 +74,16 @@ module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR); unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE; module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR); +unsigned int dl_intr_threshold = DL_INTR_THRESHOLD; +module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR); + #define BAM_CH_OPENED BIT(0) #define BAM_CH_READY BIT(1) +#define SPS_PARAMS_PIPE_ID_MASK (0x1F) +#define SPS_PARAMS_SPS_MODE BIT(5) +#define SPS_PARAMS_TBE BIT(6) +#define MSM_VENDOR_ID BIT(16) + struct bam_ch_info { unsigned long flags; unsigned id; @@ -80,6 +96,14 @@ struct bam_ch_info { struct gbam_port *port; struct work_struct write_tobam_w; + struct work_struct write_tohost_w; + + struct usb_request *rx_req; + struct usb_request *tx_req; + + u8 src_pipe_idx; + u8 dst_pipe_idx; + u8 connection_idx; /* stats */ unsigned int pending_with_bam; @@ -93,9 +117,11 @@ struct bam_ch_info { struct gbam_port { unsigned port_num; - spinlock_t port_lock; + spinlock_t port_lock_ul; + spinlock_t port_lock_dl; struct grmnet *port_usb; + struct grmnet *gr; struct bam_ch_info data_ch; @@ -108,7 +134,10 @@ static struct bam_portmaster { struct platform_driver pdrv; } bam_ports[BAM_N_PORTS]; +struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS]; static void gbam_start_rx(struct gbam_port *port); +static void gbam_start_endless_rx(struct gbam_port *port); +static void gbam_start_endless_tx(struct gbam_port *port); /*---------------misc functions---------------- */ static void gbam_free_requests(struct usb_ep *ep, struct list_head *head) @@ -157,9 +186,9 @@ static void gbam_write_data_tohost(struct gbam_port *port) struct usb_request *req; struct usb_ep *ep; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } @@ -168,7 +197,7 @@ static void gbam_write_data_tohost(struct gbam_port *port) while (!list_empty(&d->tx_idle)) { skb = __skb_dequeue(&d->tx_skb_q); if (!skb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } req = list_first_entry(&d->tx_idle, @@ -177,12 +206,19 @@ static void gbam_write_data_tohost(struct gbam_port *port) req->context = skb; req->buf = skb->data; req->length = skb->len; + n_tx_req_queued++; + if (n_tx_req_queued == dl_intr_threshold) { + req->no_interrupt = 0; + n_tx_req_queued = 0; + } else { + req->no_interrupt = 1; + } list_del(&req->list); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_dl); ret = usb_ep_queue(ep, req, GFP_ATOMIC); - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_dl); if (ret) { pr_err("%s: usb epIn failed\n", __func__); list_add(&req->list, &d->tx_idle); @@ -191,7 +227,18 @@ static void gbam_write_data_tohost(struct gbam_port *port) } d->to_host++; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); +} + +static void gbam_write_data_tohost_w(struct work_struct *w) +{ + struct bam_ch_info *d; + struct gbam_port *port; + + d = container_of(w, struct bam_ch_info, write_tohost_w); + port = d->port; + + gbam_write_data_tohost(port); } void gbam_data_recv_cb(void *p, struct sk_buff *skb) @@ -206,9 +253,9 @@ void gbam_data_recv_cb(void *p, struct sk_buff *skb) pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__, port, port->port_num, d, skb->len); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } @@ -218,13 +265,13 @@ void gbam_data_recv_cb(void *p, struct sk_buff *skb) if (printk_ratelimit()) pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n", __func__, d->tohost_drp_cnt); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } __skb_queue_tail(&d->tx_skb_q, skb); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); gbam_write_data_tohost(port); } @@ -240,7 +287,7 @@ void gbam_data_write_done(void *p, struct sk_buff *skb) dev_kfree_skb_any(skb); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); d->pending_with_bam--; @@ -248,7 +295,7 @@ void gbam_data_write_done(void *p, struct sk_buff *skb) port, d, d->to_modem, d->pending_with_bam, port->port_num); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); queue_work(gbam_wq, &d->write_tobam_w); } @@ -265,18 +312,17 @@ static void gbam_data_write_tobam(struct work_struct *w) d = container_of(w, struct bam_ch_info, write_tobam_w); port = d->port; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } while (d->pending_with_bam < BAM_PENDING_LIMIT) { skb = __skb_dequeue(&d->rx_skb_q); - if (!skb) { - spin_unlock_irqrestore(&port->port_lock, flags); - return; - } + if (!skb) + break; + d->pending_with_bam++; d->to_modem++; @@ -284,9 +330,9 @@ static void gbam_data_write_tobam(struct work_struct *w) port, d, d->to_modem, d->pending_with_bam, port->port_num); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = msm_bam_dmux_write(d->id, skb); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { pr_debug("%s: write error:%d\n", __func__, ret); d->pending_with_bam--; @@ -299,7 +345,7 @@ static void gbam_data_write_tobam(struct work_struct *w) qlen = d->rx_skb_q.qlen; - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD) gbam_start_rx(port); @@ -331,12 +377,12 @@ static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req) if (!port) return; - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_dl); d = &port->data_ch; list_add_tail(&req->list, &d->tx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_dl); - gbam_write_data_tohost(port); + queue_work(gbam_wq, &d->write_tohost_w); } static void @@ -369,7 +415,7 @@ gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) break; } - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_ul); if (queue) { __skb_queue_tail(&d->rx_skb_q, skb); queue_work(gbam_wq, &d->write_tobam_w); @@ -382,16 +428,16 @@ gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) { list_add_tail(&req->list, &d->rx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); return; } - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC); if (!skb) { - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); return; } skb_reserve(skb, BAM_MUX_HDR); @@ -408,12 +454,26 @@ gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) pr_err("%s: data rx enqueue err %d\n", __func__, status); - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); } } +static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req) +{ + int status = req->status; + + pr_debug("%s status: %d\n", __func__, status); +} + +static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req) +{ + int status = req->status; + + pr_debug("%s status: %d\n", __func__, status); +} + static void gbam_start_rx(struct gbam_port *port) { struct usb_request *req; @@ -423,9 +483,9 @@ static void gbam_start_rx(struct gbam_port *port) int ret; struct sk_buff *skb; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } @@ -450,9 +510,9 @@ static void gbam_start_rx(struct gbam_port *port) req->length = bam_mux_rx_req_size; req->context = skb; - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = usb_ep_queue(ep, req, GFP_ATOMIC); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { dev_kfree_skb_any(skb); @@ -466,7 +526,27 @@ static void gbam_start_rx(struct gbam_port *port) break; } } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); +} + +static void gbam_start_endless_rx(struct gbam_port *port) +{ + struct bam_ch_info *d = &port->data_ch; + int status; + + status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC); + if (status) + pr_err("%s: error enqueuing transfer, %d\n", __func__, status); +} + +static void gbam_start_endless_tx(struct gbam_port *port) +{ + struct bam_ch_info *d = &port->data_ch; + int status; + + status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC); + if (status) + pr_err("%s: error enqueuing transfer, %d\n", __func__, status); } static void gbam_start_io(struct gbam_port *port) @@ -478,9 +558,9 @@ static void gbam_start_io(struct gbam_port *port) pr_debug("%s: port:%p\n", __func__, port); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } @@ -493,6 +573,8 @@ static void gbam_start_io(struct gbam_port *port) return; } + spin_unlock_irqrestore(&port->port_lock_ul, flags); + spin_lock_irqsave(&port->port_lock_dl, flags); ep = port->port_usb->in; ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size, gbam_epin_complete, GFP_ATOMIC); @@ -502,7 +584,7 @@ static void gbam_start_io(struct gbam_port *port) return; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); /* queue out requests */ gbam_start_rx(port); @@ -520,6 +602,34 @@ static void gbam_notify(void *p, int event, unsigned long data) } } +static void gbam_free_buffers(struct gbam_port *port) +{ + struct sk_buff *skb; + unsigned long flags; + struct bam_ch_info *d; + + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + + if (!port || !port->port_usb) + goto free_buf_out; + + d = &port->data_ch; + + gbam_free_requests(port->port_usb->in, &d->tx_idle); + gbam_free_requests(port->port_usb->out, &d->rx_idle); + + while ((skb = __skb_dequeue(&d->tx_skb_q))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&d->rx_skb_q))) + dev_kfree_skb_any(skb); + +free_buf_out: + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); +} + static void gbam_disconnect_work(struct work_struct *w) { struct gbam_port *port = @@ -533,6 +643,24 @@ static void gbam_disconnect_work(struct work_struct *w) clear_bit(BAM_CH_OPENED, &d->flags); } +static void gbam2bam_disconnect_work(struct work_struct *w) +{ + struct gbam_port *port = + container_of(w, struct gbam_port, disconnect_w); + unsigned long flags; + + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + port->port_usb = 0; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + /* disable endpoints */ + usb_ep_disable(port->gr->out); + usb_ep_disable(port->gr->in); + +} + static void gbam_connect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, connect_w); @@ -540,12 +668,15 @@ static void gbam_connect_work(struct work_struct *w) int ret; unsigned long flags; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); if (!test_bit(BAM_CH_READY, &d->flags)) return; @@ -563,30 +694,70 @@ static void gbam_connect_work(struct work_struct *w) pr_debug("%s: done\n", __func__); } -static void gbam_free_buffers(struct gbam_port *port) +static void gbam2bam_connect_work(struct work_struct *w) { - struct sk_buff *skb; - unsigned long flags; - struct bam_ch_info *d; + struct gbam_port *port = container_of(w, struct gbam_port, connect_w); + struct bam_ch_info *d = &port->data_ch; + u32 sps_params; + int ret; + unsigned long flags; - spin_lock_irqsave(&port->port_lock, flags); + ret = usb_ep_enable(port->gr->in, port->gr->in_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", + __func__, port->gr->in); + return; + } + port->gr->in->driver_data = port; - if (!port || !port->port_usb) - goto free_buf_out; + ret = usb_ep_enable(port->gr->out, port->gr->out_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", + __func__, port->gr->out); + port->gr->in->driver_data = 0; + return; + } + port->gr->out->driver_data = port; + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + port->port_usb = port->gr; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx, + &d->dst_pipe_idx); + if (ret) { + pr_err("%s: usb_bam_connect failed: err:%d\n", + __func__, ret); + return; + } - d = &port->data_ch; + d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL); + if (!d->rx_req) + return; - gbam_free_requests(port->port_usb->in, &d->tx_idle); - gbam_free_requests(port->port_usb->out, &d->rx_idle); + d->rx_req->context = port; + d->rx_req->complete = gbam_endless_rx_complete; + d->rx_req->length = 0; + sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx | + MSM_VENDOR_ID) & ~SPS_PARAMS_TBE; + d->rx_req->udc_priv = sps_params; + d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL); + if (!d->tx_req) + return; - while ((skb = __skb_dequeue(&d->tx_skb_q))) - dev_kfree_skb_any(skb); + d->tx_req->context = port; + d->tx_req->complete = gbam_endless_tx_complete; + d->tx_req->length = 0; + sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx | + MSM_VENDOR_ID) & ~SPS_PARAMS_TBE; + d->tx_req->udc_priv = sps_params; - while ((skb = __skb_dequeue(&d->rx_skb_q))) - dev_kfree_skb_any(skb); + /* queue in & out requests */ + gbam_start_endless_rx(port); + gbam_start_endless_tx(port); -free_buf_out: - spin_unlock_irqrestore(&port->port_lock, flags); + pr_debug("%s: done\n", __func__); } /* BAM data channel ready, allow attempt to open */ @@ -608,10 +779,12 @@ static int gbam_data_ch_probe(struct platform_device *pdev) set_bit(BAM_CH_READY, &d->flags); /* if usb is online, try opening bam_ch */ - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); if (port->port_usb) queue_work(gbam_wq, &port->connect_w); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); break; } @@ -638,12 +811,14 @@ static int gbam_data_ch_remove(struct platform_device *pdev) port = bam_ports[i].port; d = &port->data_ch; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); if (port->port_usb) { ep_in = port->port_usb->in; ep_out = port->port_usb->out; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); if (ep_in) usb_ep_fifo_flush(ep_in); @@ -654,6 +829,9 @@ static int gbam_data_ch_remove(struct platform_device *pdev) msm_bam_dmux_close(d->id); + /* bam dmux will free all pending skbs */ + d->pending_with_bam = 0; + clear_bit(BAM_CH_READY, &d->flags); clear_bit(BAM_CH_OPENED, &d->flags); } @@ -673,6 +851,13 @@ static void gbam_port_free(int portno) } } +static void gbam2bam_port_free(int portno) +{ + struct gbam_port *port = bam2bam_ports[portno]; + + kfree(port); +} + static int gbam_port_alloc(int portno) { struct gbam_port *port; @@ -686,7 +871,8 @@ static int gbam_port_alloc(int portno) port->port_num = portno; /* port initialization */ - spin_lock_init(&port->port_lock); + spin_lock_init(&port->port_lock_ul); + spin_lock_init(&port->port_lock_dl); INIT_WORK(&port->connect_w, gbam_connect_work); INIT_WORK(&port->disconnect_w, gbam_disconnect_work); @@ -696,6 +882,7 @@ static int gbam_port_alloc(int portno) INIT_LIST_HEAD(&d->tx_idle); INIT_LIST_HEAD(&d->rx_idle); INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam); + INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w); skb_queue_head_init(&d->tx_skb_q); skb_queue_head_init(&d->rx_skb_q); d->id = bam_ch_ids[portno]; @@ -709,6 +896,33 @@ static int gbam_port_alloc(int portno) pdrv->driver.owner = THIS_MODULE; platform_driver_register(pdrv); + pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); + + return 0; +} + +static int gbam2bam_port_alloc(int portno) +{ + struct gbam_port *port; + struct bam_ch_info *d; + + port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->port_num = portno; + + /* port initialization */ + spin_lock_init(&port->port_lock_ul); + spin_lock_init(&port->port_lock_dl); + + INIT_WORK(&port->connect_w, gbam2bam_connect_work); + INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work); + + /* data ch */ + d = &port->data_ch; + d->port = port; + bam2bam_ports[portno] = port; pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); @@ -736,7 +950,8 @@ static ssize_t gbam_read_stats(struct file *file, char __user *ubuf, port = bam_ports[i].port; if (!port) continue; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); d = &port->data_ch; @@ -759,7 +974,8 @@ static ssize_t gbam_read_stats(struct file *file, char __user *ubuf, test_bit(BAM_CH_OPENED, &d->flags), test_bit(BAM_CH_READY, &d->flags)); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); } ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); @@ -782,7 +998,8 @@ static ssize_t gbam_reset_stats(struct file *file, const char __user *buf, if (!port) continue; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); d = &port->data_ch; @@ -792,7 +1009,8 @@ static ssize_t gbam_reset_stats(struct file *file, const char __user *buf, d->tohost_drp_cnt = 0; d->tomodem_drp_cnt = 0; - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); } return count; } @@ -820,7 +1038,7 @@ static void gbam_debugfs_init(void) static void gam_debugfs_init(void) { } #endif -void gbam_disconnect(struct grmnet *gr, u8 port_num) +void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; unsigned long flags; @@ -828,8 +1046,17 @@ void gbam_disconnect(struct grmnet *gr, u8 port_num) pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); - if (port_num >= n_bam_ports) { - pr_err("%s: invalid portno#%d\n", __func__, port_num); + if (trans == USB_GADGET_XPORT_BAM && + port_num >= n_bam_ports) { + pr_err("%s: invalid bam portno#%d\n", + __func__, port_num); + return; + } + + if (trans == USB_GADGET_XPORT_BAM2BAM && + port_num >= n_bam2bam_ports) { + pr_err("%s: invalid bam2bam portno#%d\n", + __func__, port_num); return; } @@ -837,24 +1064,34 @@ void gbam_disconnect(struct grmnet *gr, u8 port_num) pr_err("%s: grmnet port is null\n", __func__); return; } + if (trans == USB_GADGET_XPORT_BAM) + port = bam_ports[port_num].port; + else + port = bam2bam_ports[port_num]; - port = bam_ports[port_num].port; d = &port->data_ch; + port->gr = gr; - gbam_free_buffers(port); + if (trans == USB_GADGET_XPORT_BAM) { + gbam_free_buffers(port); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); port->port_usb = 0; - spin_unlock_irqrestore(&port->port_lock, flags); + n_tx_req_queued = 0; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); - /* disable endpoints */ - usb_ep_disable(gr->out); - usb_ep_disable(gr->in); + /* disable endpoints */ + usb_ep_disable(gr->out); + usb_ep_disable(gr->in); + } queue_work(gbam_wq, &port->disconnect_w); } -int gbam_connect(struct grmnet *gr, u8 port_num) +int gbam_connect(struct grmnet *gr, u8 port_num, + enum transport_type trans, u8 connection_idx) { struct gbam_port *port; struct bam_ch_info *d; @@ -863,7 +1100,12 @@ int gbam_connect(struct grmnet *gr, u8 port_num) pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); - if (port_num >= n_bam_ports) { + if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return -ENODEV; + } + + if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return -ENODEV; } @@ -873,52 +1115,66 @@ int gbam_connect(struct grmnet *gr, u8 port_num) return -ENODEV; } - port = bam_ports[port_num].port; + if (trans == USB_GADGET_XPORT_BAM) + port = bam_ports[port_num].port; + else + port = bam2bam_ports[port_num]; + d = &port->data_ch; - ret = usb_ep_enable(gr->in, gr->in_desc); - if (ret) { - pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", - __func__, gr->in); - return ret; - } - gr->in->driver_data = port; + if (trans == USB_GADGET_XPORT_BAM) { + ret = usb_ep_enable(gr->in, gr->in_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", + __func__, gr->in); + return ret; + } + gr->in->driver_data = port; - ret = usb_ep_enable(gr->out, gr->out_desc); - if (ret) { - pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", - __func__, gr->out); - gr->in->driver_data = 0; - return ret; - } - gr->out->driver_data = port; + ret = usb_ep_enable(gr->out, gr->out_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", + __func__, gr->out); + gr->in->driver_data = 0; + return ret; + } + gr->out->driver_data = port; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); port->port_usb = gr; - d->to_host = 0; - d->to_modem = 0; - d->pending_with_bam = 0; - d->tohost_drp_cnt = 0; - d->tomodem_drp_cnt = 0; - spin_unlock_irqrestore(&port->port_lock, flags); + d->to_host = 0; + d->to_modem = 0; + d->pending_with_bam = 0; + d->tohost_drp_cnt = 0; + d->tomodem_drp_cnt = 0; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + } + if (trans == USB_GADGET_XPORT_BAM2BAM) { + port->gr = gr; + d->connection_idx = connection_idx; + } queue_work(gbam_wq, &port->connect_w); return 0; } -int gbam_setup(unsigned int count) +int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port) { int i; int ret; - pr_debug("%s: requested ports:%d\n", __func__, count); + pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n", + __func__, no_bam_port, no_bam2bam_port); - if (!count || count > BAM_N_PORTS) { - pr_err("%s: Invalid num of ports count:%d\n", - __func__, count); + if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS + || no_bam2bam_port > BAM2BAM_N_PORTS) { + pr_err("%s: Invalid num of ports count:%d,%d\n", + __func__, no_bam_port, no_bam2bam_port); return -EINVAL; } @@ -929,7 +1185,7 @@ int gbam_setup(unsigned int count) return -ENOMEM; } - for (i = 0; i < count; i++) { + for (i = 0; i < no_bam_port; i++) { n_bam_ports++; ret = gbam_port_alloc(i); if (ret) { @@ -939,13 +1195,23 @@ int gbam_setup(unsigned int count) } } + for (i = 0; i < no_bam2bam_port; i++) { + n_bam2bam_ports++; + ret = gbam2bam_port_alloc(i); + if (ret) { + n_bam2bam_ports--; + pr_err("%s: Unable to alloc port:%d\n", __func__, i); + goto free_bam_ports; + } + } gbam_debugfs_init(); - return 0; + free_bam_ports: for (i = 0; i < n_bam_ports; i++) gbam_port_free(i); - + for (i = 0; i < n_bam2bam_ports; i++) + gbam2bam_port_free(i); destroy_workqueue(gbam_wq); return ret; diff --git a/drivers/usb/gadget/u_ctrl_hsic.c b/drivers/usb/gadget/u_ctrl_hsic.c new file mode 100644 index 00000000..fdfab968 --- /dev/null +++ b/drivers/usb/gadget/u_ctrl_hsic.c @@ -0,0 +1,617 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* from cdc-acm.h */ +#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */ +#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */ +#define ACM_CTRL_OVERRUN (1 << 6) +#define ACM_CTRL_PARITY (1 << 5) +#define ACM_CTRL_FRAMING (1 << 4) +#define ACM_CTRL_RI (1 << 3) +#define ACM_CTRL_BRK (1 << 2) +#define ACM_CTRL_DSR (1 << 1) +#define ACM_CTRL_DCD (1 << 0) + + +static unsigned int no_ctrl_ports; + +static const char *ctrl_bridge_names[] = { + "dun_ctrl_hsic0", + "rmnet_ctrl_hsic0" +}; + +#define CTRL_BRIDGE_NAME_MAX_LEN 20 +#define READ_BUF_LEN 1024 + +#define CH_OPENED 0 +#define CH_READY 1 + +struct gctrl_port { + /* port */ + unsigned port_num; + + /* gadget */ + spinlock_t port_lock; + void *port_usb; + + /* work queue*/ + struct workqueue_struct *wq; + struct work_struct connect_w; + struct work_struct disconnect_w; + + enum gadget_type gtype; + + /*ctrl pkt response cb*/ + int (*send_cpkt_response)(void *g, void *buf, size_t len); + + struct bridge brdg; + + /* bridge status */ + unsigned long bridge_sts; + + /* control bits */ + unsigned cbits_tomodem; + unsigned cbits_tohost; + + /* counters */ + unsigned long to_modem; + unsigned long to_host; + unsigned long drp_cpkt_cnt; +}; + +static struct { + struct gctrl_port *port; + struct platform_driver pdrv; +} gctrl_ports[NUM_PORTS]; + +static int ghsic_ctrl_receive(void *dev, void *buf, size_t actual) +{ + struct gctrl_port *port = dev; + int retval = 0; + + pr_debug_ratelimited("%s: read complete bytes read: %d\n", + __func__, actual); + + /* send it to USB here */ + if (port && port->send_cpkt_response) { + retval = port->send_cpkt_response(port->port_usb, buf, actual); + port->to_host++; + } + + return retval; +} + +static int +ghsic_send_cpkt_tomodem(u8 portno, void *buf, size_t len) +{ + void *cbuf; + struct gctrl_port *port; + + if (portno >= no_ctrl_ports) { + pr_err("%s: Invalid portno#%d\n", __func__, portno); + return -ENODEV; + } + + port = gctrl_ports[portno].port; + if (!port) { + pr_err("%s: port is null\n", __func__); + return -ENODEV; + } + + cbuf = kmalloc(len, GFP_ATOMIC); + if (!cbuf) + return -ENOMEM; + + memcpy(cbuf, buf, len); + + /* drop cpkt if ch is not open */ + if (!test_bit(CH_OPENED, &port->bridge_sts)) { + port->drp_cpkt_cnt++; + kfree(cbuf); + return 0; + } + + pr_debug("%s: ctrl_pkt:%d bytes\n", __func__, len); + + ctrl_bridge_write(port->brdg.ch_id, cbuf, len); + + port->to_modem++; + + return 0; +} + +static void +ghsic_send_cbits_tomodem(void *gptr, u8 portno, int cbits) +{ + struct gctrl_port *port; + + if (portno >= no_ctrl_ports || !gptr) { + pr_err("%s: Invalid portno#%d\n", __func__, portno); + return; + } + + port = gctrl_ports[portno].port; + if (!port) { + pr_err("%s: port is null\n", __func__); + return; + } + + if (cbits == port->cbits_tomodem) + return; + + port->cbits_tomodem = cbits; + + if (!test_bit(CH_OPENED, &port->bridge_sts)) + return; + + pr_debug("%s: ctrl_tomodem:%d\n", __func__, cbits); + + ctrl_bridge_set_cbits(port->brdg.ch_id, cbits); +} + +static void ghsic_ctrl_connect_w(struct work_struct *w) +{ + struct gserial *gser = NULL; + struct grmnet *gr = NULL; + struct gctrl_port *port = + container_of(w, struct gctrl_port, connect_w); + unsigned long flags; + int retval; + unsigned cbits; + + if (!port || !test_bit(CH_READY, &port->bridge_sts)) + return; + + pr_debug("%s: port:%p\n", __func__, port); + + retval = ctrl_bridge_open(&port->brdg); + if (retval) { + pr_err("%s: ctrl bridge open failed :%d\n", __func__, retval); + return; + } + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + ctrl_bridge_close(port->brdg.ch_id); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + set_bit(CH_OPENED, &port->bridge_sts); + spin_unlock_irqrestore(&port->port_lock, flags); + + cbits = ctrl_bridge_get_cbits_tohost(port->brdg.ch_id); + + if (port->gtype == USB_GADGET_SERIAL && (cbits & ACM_CTRL_DCD)) { + gser = port->port_usb; + if (gser && gser->connect) + gser->connect(gser); + return; + } + + if (port->gtype == USB_GADGET_RMNET) { + gr = port->port_usb; + if (gr && gr->connect) + gr->connect(gr); + } +} + +int ghsic_ctrl_connect(void *gptr, int port_num) +{ + struct gctrl_port *port; + struct gserial *gser; + struct grmnet *gr; + unsigned long flags; + + pr_debug("%s: port#%d\n", __func__, port_num); + + if (port_num > no_ctrl_ports || !gptr) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return -ENODEV; + } + + port = gctrl_ports[port_num].port; + if (!port) { + pr_err("%s: port is null\n", __func__); + return -ENODEV; + } + + spin_lock_irqsave(&port->port_lock, flags); + if (port->gtype == USB_GADGET_SERIAL) { + gser = gptr; + gser->notify_modem = ghsic_send_cbits_tomodem; + } + + if (port->gtype == USB_GADGET_RMNET) { + gr = gptr; + port->send_cpkt_response = gr->send_cpkt_response; + gr->send_encap_cmd = ghsic_send_cpkt_tomodem; + gr->notify_modem = ghsic_send_cbits_tomodem; + } + + port->port_usb = gptr; + port->to_host = 0; + port->to_modem = 0; + port->drp_cpkt_cnt = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->connect_w); + + return 0; +} + +static void gctrl_disconnect_w(struct work_struct *w) +{ + struct gctrl_port *port = + container_of(w, struct gctrl_port, disconnect_w); + + if (!test_bit(CH_OPENED, &port->bridge_sts)) + return; + + /* send the dtr zero */ + ctrl_bridge_close(port->brdg.ch_id); + clear_bit(CH_OPENED, &port->bridge_sts); +} + +void ghsic_ctrl_disconnect(void *gptr, int port_num) +{ + struct gctrl_port *port; + struct gserial *gser = NULL; + struct grmnet *gr = NULL; + unsigned long flags; + + pr_debug("%s: port#%d\n", __func__, port_num); + + port = gctrl_ports[port_num].port; + + if (port_num > no_ctrl_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return; + } + + if (!gptr || !port) { + pr_err("%s: grmnet port is null\n", __func__); + return; + } + + if (port->gtype == USB_GADGET_SERIAL) + gser = gptr; + else + gr = gptr; + + spin_lock_irqsave(&port->port_lock, flags); + if (gr) { + gr->send_encap_cmd = 0; + gr->notify_modem = 0; + } + + if (gser) + gser->notify_modem = 0; + port->cbits_tomodem = 0; + port->port_usb = 0; + port->send_cpkt_response = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->disconnect_w); +} + +static void ghsic_ctrl_status(void *ctxt, unsigned int ctrl_bits) +{ + struct gctrl_port *port = ctxt; + struct gserial *gser; + + pr_debug("%s - input control lines: dcd%c dsr%c break%c " + "ring%c framing%c parity%c overrun%c\n", __func__, + ctrl_bits & ACM_CTRL_DCD ? '+' : '-', + ctrl_bits & ACM_CTRL_DSR ? '+' : '-', + ctrl_bits & ACM_CTRL_BRK ? '+' : '-', + ctrl_bits & ACM_CTRL_RI ? '+' : '-', + ctrl_bits & ACM_CTRL_FRAMING ? '+' : '-', + ctrl_bits & ACM_CTRL_PARITY ? '+' : '-', + ctrl_bits & ACM_CTRL_OVERRUN ? '+' : '-'); + + port->cbits_tohost = ctrl_bits; + gser = port->port_usb; + if (gser && gser->send_modem_ctrl_bits) + gser->send_modem_ctrl_bits(gser, ctrl_bits); +} + +static int ghsic_ctrl_probe(struct platform_device *pdev) +{ + struct gctrl_port *port; + unsigned long flags; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + if (pdev->id >= no_ctrl_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gctrl_ports[pdev->id].port; + set_bit(CH_READY, &port->bridge_sts); + + /* if usb is online, start read */ + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) + queue_work(port->wq, &port->connect_w); + spin_unlock_irqrestore(&port->port_lock, flags); + + return 0; +} + +static int ghsic_ctrl_remove(struct platform_device *pdev) +{ + struct gctrl_port *port; + struct gserial *gser = NULL; + struct grmnet *gr = NULL; + unsigned long flags; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + if (pdev->id >= no_ctrl_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gctrl_ports[pdev->id].port; + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + goto not_ready; + } + + if (port->gtype == USB_GADGET_SERIAL) + gser = port->port_usb; + else + gr = port->port_usb; + + port->cbits_tohost = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + if (gr && gr->disconnect) + gr->disconnect(gr); + + if (gser && gser->disconnect) + gser->disconnect(gser); + + ctrl_bridge_close(port->brdg.ch_id); + + clear_bit(CH_OPENED, &port->bridge_sts); +not_ready: + clear_bit(CH_READY, &port->bridge_sts); + + return 0; +} + +static void ghsic_ctrl_port_free(int portno) +{ + struct gctrl_port *port = gctrl_ports[portno].port; + struct platform_driver *pdrv = &gctrl_ports[portno].pdrv; + + destroy_workqueue(port->wq); + kfree(port); + + if (pdrv) + platform_driver_unregister(pdrv); +} + +static int gctrl_port_alloc(int portno, enum gadget_type gtype) +{ + struct gctrl_port *port; + struct platform_driver *pdrv; + + port = kzalloc(sizeof(struct gctrl_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->wq = create_singlethread_workqueue(ctrl_bridge_names[portno]); + if (!port->wq) { + pr_err("%s: Unable to create workqueue:%s\n", + __func__, ctrl_bridge_names[portno]); + return -ENOMEM; + } + + port->port_num = portno; + port->gtype = gtype; + + spin_lock_init(&port->port_lock); + + INIT_WORK(&port->connect_w, ghsic_ctrl_connect_w); + INIT_WORK(&port->disconnect_w, gctrl_disconnect_w); + + port->brdg.ch_id = portno; + port->brdg.ctx = port; + port->brdg.ops.send_pkt = ghsic_ctrl_receive; + if (port->gtype == USB_GADGET_SERIAL) + port->brdg.ops.send_cbits = ghsic_ctrl_status; + gctrl_ports[portno].port = port; + + pdrv = &gctrl_ports[portno].pdrv; + pdrv->probe = ghsic_ctrl_probe; + pdrv->remove = ghsic_ctrl_remove; + pdrv->driver.name = ctrl_bridge_names[portno]; + pdrv->driver.owner = THIS_MODULE; + + platform_driver_register(pdrv); + + pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); + + return 0; +} + +int ghsic_ctrl_setup(unsigned int num_ports, enum gadget_type gtype) +{ + int first_port_id = no_ctrl_ports; + int total_num_ports = num_ports + no_ctrl_ports; + int i; + int ret = 0; + + if (!num_ports || total_num_ports > NUM_PORTS) { + pr_err("%s: Invalid num of ports count:%d\n", + __func__, num_ports); + return -EINVAL; + } + + pr_debug("%s: requested ports:%d\n", __func__, num_ports); + + for (i = first_port_id; i < (first_port_id + num_ports); i++) { + + /*probe can be called while port_alloc,so update no_ctrl_ports*/ + no_ctrl_ports++; + ret = gctrl_port_alloc(i, gtype); + if (ret) { + no_ctrl_ports--; + pr_err("%s: Unable to alloc port:%d\n", __func__, i); + goto free_ports; + } + } + + return first_port_id; + +free_ports: + for (i = first_port_id; i < no_ctrl_ports; i++) + ghsic_ctrl_port_free(i); + no_ctrl_ports = first_port_id; + return ret; +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t gctrl_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct gctrl_port *port; + struct platform_driver *pdrv; + char *buf; + unsigned long flags; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < no_ctrl_ports; i++) { + port = gctrl_ports[i].port; + if (!port) + continue; + pdrv = &gctrl_ports[i].pdrv; + spin_lock_irqsave(&port->port_lock, flags); + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName: %s\n" + "#PORT:%d port: %p\n" + "to_usbhost: %lu\n" + "to_modem: %lu\n" + "cpkt_drp_cnt: %lu\n" + "DTR: %s\n" + "ch_open: %d\n" + "ch_ready: %d\n", + pdrv->driver.name, + i, port, + port->to_host, port->to_modem, + port->drp_cpkt_cnt, + port->cbits_tomodem ? "HIGH" : "LOW", + test_bit(CH_OPENED, &port->bridge_sts), + test_bit(CH_READY, &port->bridge_sts)); + + spin_unlock_irqrestore(&port->port_lock, flags); + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t gctrl_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct gctrl_port *port; + int i; + unsigned long flags; + + for (i = 0; i < no_ctrl_ports; i++) { + port = gctrl_ports[i].port; + if (!port) + continue; + + spin_lock_irqsave(&port->port_lock, flags); + port->to_host = 0; + port->to_modem = 0; + port->drp_cpkt_cnt = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + } + return count; +} + +const struct file_operations gctrl_stats_ops = { + .read = gctrl_read_stats, + .write = gctrl_reset_stats, +}; + +struct dentry *gctrl_dent; +struct dentry *gctrl_dfile; +static void gctrl_debugfs_init(void) +{ + gctrl_dent = debugfs_create_dir("ghsic_ctrl_xport", 0); + if (IS_ERR(gctrl_dent)) + return; + + gctrl_dfile = + debugfs_create_file("status", 0444, gctrl_dent, 0, + &gctrl_stats_ops); + if (!gctrl_dfile || IS_ERR(gctrl_dfile)) + debugfs_remove(gctrl_dent); +} + +static void gctrl_debugfs_exit(void) +{ + debugfs_remove(gctrl_dfile); + debugfs_remove(gctrl_dent); +} + +#else +static void gctrl_debugfs_init(void) { } +static void gctrl_debugfs_exit(void) { } +#endif + +static int __init gctrl_init(void) +{ + gctrl_debugfs_init(); + + return 0; +} +module_init(gctrl_init); + +static void __exit gctrl_exit(void) +{ + gctrl_debugfs_exit(); +} +module_exit(gctrl_exit); +MODULE_DESCRIPTION("hsic control xport driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/gadget/u_data_hsic.c b/drivers/usb/gadget/u_data_hsic.c new file mode 100644 index 00000000..6f5e7b3c --- /dev/null +++ b/drivers/usb/gadget/u_data_hsic.c @@ -0,0 +1,962 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int no_data_ports; + +static const char *data_bridge_names[] = { + "dun_data_hsic0", + "rmnet_data_hsic0" +}; + +#define DATA_BRIDGE_NAME_MAX_LEN 20 + +#define GHSIC_DATA_RMNET_RX_Q_SIZE 50 +#define GHSIC_DATA_RMNET_TX_Q_SIZE 300 +#define GHSIC_DATA_SERIAL_RX_Q_SIZE 2 +#define GHSIC_DATA_SERIAL_TX_Q_SIZE 2 +#define GHSIC_DATA_RX_REQ_SIZE 2048 + +static unsigned int ghsic_data_rmnet_tx_q_size = GHSIC_DATA_RMNET_TX_Q_SIZE; +module_param(ghsic_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_rmnet_rx_q_size = GHSIC_DATA_RMNET_RX_Q_SIZE; +module_param(ghsic_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_serial_tx_q_size = GHSIC_DATA_SERIAL_TX_Q_SIZE; +module_param(ghsic_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_serial_rx_q_size = GHSIC_DATA_SERIAL_RX_Q_SIZE; +module_param(ghsic_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_rx_req_size = GHSIC_DATA_RX_REQ_SIZE; +module_param(ghsic_data_rx_req_size, uint, S_IRUGO | S_IWUSR); + +/*flow ctrl*/ +#define GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD 500 +#define GHSIC_DATA_FLOW_CTRL_DISABLE 300 +#define GHSIC_DATA_FLOW_CTRL_SUPPORT 1 +#define GHSIC_DATA_PENDLIMIT_WITH_BRIDGE 500 + +static unsigned int ghsic_data_fctrl_support = GHSIC_DATA_FLOW_CTRL_SUPPORT; +module_param(ghsic_data_fctrl_support, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_fctrl_en_thld = + GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD; +module_param(ghsic_data_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_fctrl_dis_thld = GHSIC_DATA_FLOW_CTRL_DISABLE; +module_param(ghsic_data_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_pend_limit_with_bridge = + GHSIC_DATA_PENDLIMIT_WITH_BRIDGE; +module_param(ghsic_data_pend_limit_with_bridge, uint, S_IRUGO | S_IWUSR); + +#define CH_OPENED 0 +#define CH_READY 1 + +struct gdata_port { + /* port */ + unsigned port_num; + + /* gadget */ + spinlock_t port_lock; + void *port_usb; + struct usb_ep *in; + struct usb_ep *out; + + enum gadget_type gtype; + + /* data transfer queues */ + unsigned int tx_q_size; + struct list_head tx_idle; + struct sk_buff_head tx_skb_q; + + unsigned int rx_q_size; + struct list_head rx_idle; + struct sk_buff_head rx_skb_q; + + /* work */ + struct workqueue_struct *wq; + struct work_struct connect_w; + struct work_struct disconnect_w; + struct work_struct write_tomdm_w; + struct work_struct write_tohost_w; + + struct bridge brdg; + + /*bridge status*/ + unsigned long bridge_sts; + + /*counters*/ + unsigned long to_modem; + unsigned long to_host; + unsigned int rx_throttled_cnt; + unsigned int rx_unthrottled_cnt; + unsigned int tx_throttled_cnt; + unsigned int tx_unthrottled_cnt; + unsigned int tomodem_drp_cnt; + unsigned int unthrottled_pnd_skbs; +}; + +static struct { + struct gdata_port *port; + struct platform_driver pdrv; +} gdata_ports[NUM_PORTS]; + +static void ghsic_data_start_rx(struct gdata_port *port); + +static void ghsic_data_free_requests(struct usb_ep *ep, struct list_head *head) +{ + struct usb_request *req; + + while (!list_empty(head)) { + req = list_entry(head->next, struct usb_request, list); + list_del(&req->list); + usb_ep_free_request(ep, req); + } +} + +static int ghsic_data_alloc_requests(struct usb_ep *ep, struct list_head *head, + int num, + void (*cb)(struct usb_ep *ep, struct usb_request *), + gfp_t flags) +{ + int i; + struct usb_request *req; + + pr_debug("%s: ep:%s head:%p num:%d cb:%p", __func__, + ep->name, head, num, cb); + + for (i = 0; i < num; i++) { + req = usb_ep_alloc_request(ep, flags); + if (!req) { + pr_debug("%s: req allocated:%d\n", __func__, i); + return list_empty(head) ? -ENOMEM : 0; + } + req->complete = cb; + list_add(&req->list, head); + } + + return 0; +} + +static void ghsic_data_unthrottle_tx(void *ctx) +{ + struct gdata_port *port = ctx; + unsigned long flags; + + if (!port) + return; + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) { + port->tx_unthrottled_cnt++; + queue_work(port->wq, &port->write_tomdm_w); + pr_debug("%s: port num =%d unthrottled\n", __func__, + port->port_num); + } + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static void ghsic_data_write_tohost(struct work_struct *w) +{ + unsigned long flags; + struct sk_buff *skb; + int ret; + struct usb_request *req; + struct usb_ep *ep; + struct gdata_port *port; + + port = container_of(w, struct gdata_port, write_tohost_w); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->in; + + while (!list_empty(&port->tx_idle)) { + skb = __skb_dequeue(&port->tx_skb_q); + if (!skb) + break; + + req = list_first_entry(&port->tx_idle, struct usb_request, + list); + req->context = skb; + req->buf = skb->data; + req->length = skb->len; + + list_del(&req->list); + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = usb_ep_queue(ep, req, GFP_KERNEL); + spin_lock_irqsave(&port->port_lock, flags); + if (ret) { + pr_err("%s: usb epIn failed\n", __func__); + list_add(&req->list, &port->tx_idle); + dev_kfree_skb_any(skb); + break; + } + port->to_host++; + if (ghsic_data_fctrl_support && + port->tx_skb_q.qlen <= ghsic_data_fctrl_dis_thld && + test_and_clear_bit(RX_THROTTLED, &port->brdg.flags)) { + port->rx_unthrottled_cnt++; + port->unthrottled_pnd_skbs = port->tx_skb_q.qlen; + pr_debug_ratelimited("%s: disable flow ctrl:" + " tx skbq len: %u\n", + __func__, port->tx_skb_q.qlen); + data_bridge_unthrottle_rx(port->brdg.ch_id); + } + } + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static int ghsic_data_receive(void *p, void *data, size_t len) +{ + struct gdata_port *port = p; + unsigned long flags; + struct sk_buff *skb = data; + + if (!port) { + dev_kfree_skb_any(skb); + return -EINVAL; + } + + pr_debug("%s: p:%p#%d skb_len:%d\n", __func__, + port, port->port_num, skb->len); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + dev_kfree_skb_any(skb); + return -ENOTCONN; + } + + __skb_queue_tail(&port->tx_skb_q, skb); + + if (ghsic_data_fctrl_support && + port->tx_skb_q.qlen >= ghsic_data_fctrl_en_thld) { + set_bit(RX_THROTTLED, &port->brdg.flags); + port->rx_throttled_cnt++; + pr_debug_ratelimited("%s: flow ctrl enabled: tx skbq len: %u\n", + __func__, port->tx_skb_q.qlen); + spin_unlock_irqrestore(&port->port_lock, flags); + queue_work(port->wq, &port->write_tohost_w); + return -EBUSY; + } + + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->write_tohost_w); + + return 0; +} + +static void ghsic_data_write_tomdm(struct work_struct *w) +{ + struct gdata_port *port; + struct sk_buff *skb; + unsigned long flags; + int ret; + + port = container_of(w, struct gdata_port, write_tomdm_w); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + if (test_bit(TX_THROTTLED, &port->brdg.flags)) { + spin_unlock_irqrestore(&port->port_lock, flags); + goto start_rx; + } + + while ((skb = __skb_dequeue(&port->rx_skb_q))) { + pr_debug("%s: port:%p tom:%lu pno:%d\n", __func__, + port, port->to_modem, port->port_num); + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = data_bridge_write(port->brdg.ch_id, skb); + spin_lock_irqsave(&port->port_lock, flags); + if (ret < 0) { + if (ret == -EBUSY) { + /*flow control*/ + port->tx_throttled_cnt++; + break; + } + pr_err_ratelimited("%s: write error:%d\n", + __func__, ret); + port->tomodem_drp_cnt++; + dev_kfree_skb_any(skb); + break; + } + port->to_modem++; + } + spin_unlock_irqrestore(&port->port_lock, flags); +start_rx: + ghsic_data_start_rx(port); +} + +static void ghsic_data_epin_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gdata_port *port = ep->driver_data; + struct sk_buff *skb = req->context; + int status = req->status; + + switch (status) { + case 0: + /* successful completion */ + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + dev_kfree_skb_any(skb); + req->buf = 0; + usb_ep_free_request(ep, req); + return; + default: + pr_err("%s: data tx ep error %d\n", __func__, status); + break; + } + + dev_kfree_skb_any(skb); + + spin_lock(&port->port_lock); + list_add_tail(&req->list, &port->tx_idle); + spin_unlock(&port->port_lock); + + queue_work(port->wq, &port->write_tohost_w); +} + +static void +ghsic_data_epout_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gdata_port *port = ep->driver_data; + struct sk_buff *skb = req->context; + int status = req->status; + int queue = 0; + + switch (status) { + case 0: + skb_put(skb, req->actual); + queue = 1; + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* cable disconnection */ + dev_kfree_skb_any(skb); + req->buf = 0; + usb_ep_free_request(ep, req); + return; + default: + pr_err_ratelimited("%s: %s response error %d, %d/%d\n", + __func__, ep->name, status, + req->actual, req->length); + dev_kfree_skb_any(skb); + break; + } + + spin_lock(&port->port_lock); + if (queue) { + __skb_queue_tail(&port->rx_skb_q, skb); + list_add_tail(&req->list, &port->rx_idle); + queue_work(port->wq, &port->write_tomdm_w); + } + spin_unlock(&port->port_lock); +} + +static void ghsic_data_start_rx(struct gdata_port *port) +{ + struct usb_request *req; + struct usb_ep *ep; + unsigned long flags; + int ret; + struct sk_buff *skb; + + pr_debug("%s: port:%p\n", __func__, port); + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->out; + + while (port->port_usb && !list_empty(&port->rx_idle)) { + if (port->rx_skb_q.qlen > ghsic_data_pend_limit_with_bridge) + break; + + req = list_first_entry(&port->rx_idle, + struct usb_request, list); + + skb = alloc_skb(ghsic_data_rx_req_size, GFP_ATOMIC); + if (!skb) + break; + + list_del(&req->list); + req->buf = skb->data; + req->length = ghsic_data_rx_req_size; + req->context = skb; + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = usb_ep_queue(ep, req, GFP_KERNEL); + spin_lock_irqsave(&port->port_lock, flags); + if (ret) { + dev_kfree_skb_any(skb); + + pr_err_ratelimited("%s: rx queue failed\n", __func__); + + if (port->port_usb) + list_add(&req->list, &port->rx_idle); + else + usb_ep_free_request(ep, req); + break; + } + } + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static void ghsic_data_start_io(struct gdata_port *port) +{ + unsigned long flags; + struct usb_ep *ep; + int ret; + + pr_debug("%s: port:%p\n", __func__, port); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->out; + ret = ghsic_data_alloc_requests(ep, &port->rx_idle, + port->rx_q_size, ghsic_data_epout_complete, GFP_ATOMIC); + if (ret) { + pr_err("%s: rx req allocation failed\n", __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->in; + ret = ghsic_data_alloc_requests(ep, &port->tx_idle, + port->tx_q_size, ghsic_data_epin_complete, GFP_ATOMIC); + if (ret) { + pr_err("%s: tx req allocation failed\n", __func__); + ghsic_data_free_requests(ep, &port->rx_idle); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + spin_unlock_irqrestore(&port->port_lock, flags); + + /* queue out requests */ + ghsic_data_start_rx(port); +} + +static void ghsic_data_connect_w(struct work_struct *w) +{ + struct gdata_port *port = + container_of(w, struct gdata_port, connect_w); + int ret; + + if (!port || !test_bit(CH_READY, &port->bridge_sts)) + return; + + pr_debug("%s: port:%p\n", __func__, port); + + ret = data_bridge_open(&port->brdg); + if (ret) { + pr_err("%s: unable open bridge ch:%d err:%d\n", + __func__, port->brdg.ch_id, ret); + return; + } + + set_bit(CH_OPENED, &port->bridge_sts); + + ghsic_data_start_io(port); +} + +static void ghsic_data_disconnect_w(struct work_struct *w) +{ + struct gdata_port *port = + container_of(w, struct gdata_port, disconnect_w); + + if (!test_bit(CH_OPENED, &port->bridge_sts)) + return; + + data_bridge_close(port->brdg.ch_id); + clear_bit(CH_OPENED, &port->bridge_sts); +} + +static void ghsic_data_free_buffers(struct gdata_port *port) +{ + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&port->port_lock, flags); + + if (!port || !port->port_usb) + goto free_buf_out; + + ghsic_data_free_requests(port->in, &port->tx_idle); + ghsic_data_free_requests(port->out, &port->rx_idle); + + while ((skb = __skb_dequeue(&port->tx_skb_q))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&port->rx_skb_q))) + dev_kfree_skb_any(skb); + +free_buf_out: + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static int ghsic_data_probe(struct platform_device *pdev) +{ + struct gdata_port *port; + unsigned long flags; + + pr_debug("%s: name:%s no_data_ports= %d\n", + __func__, pdev->name, no_data_ports); + + if (pdev->id >= no_data_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gdata_ports[pdev->id].port; + set_bit(CH_READY, &port->bridge_sts); + + spin_lock_irqsave(&port->port_lock, flags); + /* if usb is online, try opening bridge */ + if (port->port_usb) + queue_work(port->wq, &port->connect_w); + spin_unlock_irqrestore(&port->port_lock, flags); + + return 0; +} + +/* mdm disconnect */ +static int ghsic_data_remove(struct platform_device *pdev) +{ + struct gdata_port *port; + struct usb_ep *ep_in = NULL; + struct usb_ep *ep_out = NULL; + unsigned long flags; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + if (pdev->id >= no_data_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gdata_ports[pdev->id].port; + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) { + ep_in = port->in; + ep_out = port->out; + } + spin_unlock_irqrestore(&port->port_lock, flags); + + if (ep_in) + usb_ep_fifo_flush(ep_in); + if (ep_out) + usb_ep_fifo_flush(ep_out); + + ghsic_data_free_buffers(port); + + data_bridge_close(port->brdg.ch_id); + + clear_bit(CH_READY, &port->bridge_sts); + clear_bit(CH_OPENED, &port->bridge_sts); + + return 0; +} + +static void ghsic_data_port_free(int portno) +{ + struct gdata_port *port = gdata_ports[portno].port; + struct platform_driver *pdrv = &gdata_ports[portno].pdrv; + + destroy_workqueue(port->wq); + kfree(port); + + if (pdrv) + platform_driver_unregister(pdrv); +} + +static int ghsic_data_port_alloc(unsigned port_num, enum gadget_type gtype) +{ + struct gdata_port *port; + struct platform_driver *pdrv; + + port = kzalloc(sizeof(struct gdata_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->wq = create_singlethread_workqueue(data_bridge_names[port_num]); + if (!port->wq) { + pr_err("%s: Unable to create workqueue:%s\n", + __func__, data_bridge_names[port_num]); + kfree(port); + return -ENOMEM; + } + port->port_num = port_num; + + /* port initialization */ + spin_lock_init(&port->port_lock); + + INIT_WORK(&port->connect_w, ghsic_data_connect_w); + INIT_WORK(&port->disconnect_w, ghsic_data_disconnect_w); + INIT_WORK(&port->write_tohost_w, ghsic_data_write_tohost); + INIT_WORK(&port->write_tomdm_w, ghsic_data_write_tomdm); + + INIT_LIST_HEAD(&port->tx_idle); + INIT_LIST_HEAD(&port->rx_idle); + + skb_queue_head_init(&port->tx_skb_q); + skb_queue_head_init(&port->rx_skb_q); + + port->gtype = gtype; + port->brdg.ch_id = port_num; + port->brdg.ctx = port; + port->brdg.ops.send_pkt = ghsic_data_receive; + port->brdg.ops.unthrottle_tx = ghsic_data_unthrottle_tx; + gdata_ports[port_num].port = port; + + pdrv = &gdata_ports[port_num].pdrv; + pdrv->probe = ghsic_data_probe; + pdrv->remove = ghsic_data_remove; + pdrv->driver.name = data_bridge_names[port_num]; + pdrv->driver.owner = THIS_MODULE; + + platform_driver_register(pdrv); + + pr_debug("%s: port:%p portno:%d\n", __func__, port, port_num); + + return 0; +} + +void ghsic_data_disconnect(void *gptr, int port_num) +{ + struct gdata_port *port; + unsigned long flags; + + pr_debug("%s: port#%d\n", __func__, port_num); + + port = gdata_ports[port_num].port; + + if (port_num > no_data_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return; + } + + if (!gptr || !port) { + pr_err("%s: port is null\n", __func__); + return; + } + + ghsic_data_free_buffers(port); + + /* disable endpoints */ + if (port->in) + usb_ep_disable(port->out); + + if (port->out) + usb_ep_disable(port->in); + + spin_lock_irqsave(&port->port_lock, flags); + port->port_usb = 0; + port->in = NULL; + port->out = NULL; + clear_bit(TX_THROTTLED, &port->brdg.flags); + clear_bit(RX_THROTTLED, &port->brdg.flags); + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->disconnect_w); +} + +int ghsic_data_connect(void *gptr, int port_num) +{ + struct gdata_port *port; + struct gserial *gser; + struct grmnet *gr; + struct usb_endpoint_descriptor *in_desc; + struct usb_endpoint_descriptor *out_desc; + unsigned long flags; + int ret = 0; + + pr_debug("%s: port#%d\n", __func__, port_num); + + port = gdata_ports[port_num].port; + + if (port_num > no_data_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return -ENODEV; + } + + if (!gptr || !port) { + pr_err("%s: port is null\n", __func__); + return -ENODEV; + } + + if (port->gtype == USB_GADGET_SERIAL) { + gser = gptr; + port->in = gser->in; + port->out = gser->out; + port->tx_q_size = ghsic_data_serial_tx_q_size; + port->rx_q_size = ghsic_data_serial_rx_q_size; + gser->in->driver_data = port; + gser->out->driver_data = port; + in_desc = gser->in_desc; + out_desc = gser->out_desc; + } else { + gr = gptr; + port->in = gr->in; + port->out = gr->out; + port->tx_q_size = ghsic_data_rmnet_tx_q_size; + port->rx_q_size = ghsic_data_rmnet_rx_q_size; + gr->in->driver_data = port; + gr->out->driver_data = port; + in_desc = gr->in_desc; + out_desc = gr->out_desc; + } + + ret = usb_ep_enable(port->in, in_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", + __func__, port->in); + goto fail; + } + + ret = usb_ep_enable(port->out, out_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", + __func__, port->out); + usb_ep_disable(port->in); + goto fail; + } + spin_lock_irqsave(&port->port_lock, flags); + port->port_usb = gptr; + port->to_host = 0; + port->to_modem = 0; + port->tomodem_drp_cnt = 0; + port->rx_throttled_cnt = 0; + port->rx_unthrottled_cnt = 0; + port->tx_throttled_cnt = 0; + port->tx_unthrottled_cnt = 0; + port->unthrottled_pnd_skbs = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->connect_w); +fail: + return ret; +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t ghsic_data_read_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + struct gdata_port *port; + struct platform_driver *pdrv; + char *buf; + unsigned long flags; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < no_data_ports; i++) { + port = gdata_ports[i].port; + if (!port) + continue; + pdrv = &gdata_ports[i].pdrv; + spin_lock_irqsave(&port->port_lock, flags); + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName: %s\n" + "#PORT:%d port#: %p\n" + "dpkts_to_usbhost: %lu\n" + "dpkts_to_modem: %lu\n" + "tomodem_drp_cnt: %u\n" + "tx_buf_len: %u\n" + "rx_buf_len: %u\n" + "rx thld cnt %u\n" + "rx unthld cnt %u\n" + "tx thld cnt %u\n" + "tx unthld cnt %u\n" + "uthld pnd skbs %u\n" + "RX_THROTTLED %d\n" + "TX_THROTTLED %d\n" + "data_ch_open: %d\n" + "data_ch_ready: %d\n", + pdrv->driver.name, + i, port, + port->to_host, port->to_modem, + port->tomodem_drp_cnt, + port->tx_skb_q.qlen, + port->rx_skb_q.qlen, + port->rx_throttled_cnt, + port->rx_unthrottled_cnt, + port->tx_throttled_cnt, + port->tx_unthrottled_cnt, + port->unthrottled_pnd_skbs, + test_bit(RX_THROTTLED, &port->brdg.flags), + test_bit(TX_THROTTLED, &port->brdg.flags), + test_bit(CH_OPENED, &port->bridge_sts), + test_bit(CH_READY, &port->bridge_sts)); + + spin_unlock_irqrestore(&port->port_lock, flags); + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t ghsic_data_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct gdata_port *port; + int i; + unsigned long flags; + + for (i = 0; i < no_data_ports; i++) { + port = gdata_ports[i].port; + if (!port) + continue; + + spin_lock_irqsave(&port->port_lock, flags); + port->to_host = 0; + port->to_modem = 0; + port->tomodem_drp_cnt = 0; + port->rx_throttled_cnt = 0; + port->rx_unthrottled_cnt = 0; + port->tx_throttled_cnt = 0; + port->tx_unthrottled_cnt = 0; + port->unthrottled_pnd_skbs = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + } + return count; +} + +const struct file_operations ghsic_stats_ops = { + .read = ghsic_data_read_stats, + .write = ghsic_data_reset_stats, +}; + +static struct dentry *gdata_dent; +static struct dentry *gdata_dfile; + +static void ghsic_data_debugfs_init(void) +{ + gdata_dent = debugfs_create_dir("ghsic_data_xport", 0); + if (IS_ERR(gdata_dent)) + return; + + gdata_dfile = debugfs_create_file("status", 0444, gdata_dent, 0, + &ghsic_stats_ops); + if (!gdata_dfile || IS_ERR(gdata_dfile)) + debugfs_remove(gdata_dent); +} + +static void ghsic_data_debugfs_exit(void) +{ + debugfs_remove(gdata_dfile); + debugfs_remove(gdata_dent); +} + +#else +static void ghsic_data_debugfs_init(void) { } +static void ghsic_data_debugfs_exit(void) { } + +#endif + +int ghsic_data_setup(unsigned num_ports, enum gadget_type gtype) +{ + int first_port_id = no_data_ports; + int total_num_ports = num_ports + no_data_ports; + int ret = 0; + int i; + + if (!num_ports || total_num_ports > NUM_PORTS) { + pr_err("%s: Invalid num of ports count:%d\n", + __func__, num_ports); + return -EINVAL; + } + pr_debug("%s: count: %d\n", __func__, num_ports); + + for (i = first_port_id; i < (num_ports + first_port_id); i++) { + + /*probe can be called while port_alloc,so update no_data_ports*/ + no_data_ports++; + ret = ghsic_data_port_alloc(i, gtype); + if (ret) { + no_data_ports--; + pr_err("%s: Unable to alloc port:%d\n", __func__, i); + goto free_ports; + } + } + + /*return the starting index*/ + return first_port_id; + +free_ports: + for (i = first_port_id; i < no_data_ports; i++) + ghsic_data_port_free(i); + no_data_ports = first_port_id; + + return ret; +} + +static int __init ghsic_data_init(void) +{ + ghsic_data_debugfs_init(); + + return 0; +} +module_init(ghsic_data_init); + +static void __exit ghsic_data_exit(void) +{ + ghsic_data_debugfs_exit(); +} +module_exit(ghsic_data_exit); +MODULE_DESCRIPTION("hsic data xport driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 5cf444ee..ea77ce4f 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -67,7 +67,12 @@ struct eth_dev { spinlock_t req_lock; /* guard {rx,tx}_reqs */ struct list_head tx_reqs, rx_reqs; - atomic_t tx_qlen; + unsigned tx_qlen; +/* Minimum number of TX USB request queued to UDC */ +#define TX_REQ_THRESHOLD 5 + int no_tx_req_used; + int tx_skb_hold_count; + u32 tx_req_bufsize; struct sk_buff_head rx_frames; @@ -340,8 +345,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req) DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: - if (skb) - dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb); goto clean; /* data overrun */ @@ -466,6 +470,11 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; + struct net_device *net = dev->net; + struct usb_request *new_req; + struct usb_ep *in; + int length; + int retval; switch (req->status) { default: @@ -476,16 +485,74 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) case -ESHUTDOWN: /* disconnect etc */ break; case 0: - dev->net->stats.tx_bytes += skb->len; + if (!req->zero) + dev->net->stats.tx_bytes += req->length-1; + else + dev->net->stats.tx_bytes += req->length; } dev->net->stats.tx_packets++; spin_lock(&dev->req_lock); - list_add(&req->list, &dev->tx_reqs); - spin_unlock(&dev->req_lock); - dev_kfree_skb_any(skb); + list_add_tail(&req->list, &dev->tx_reqs); + + if (dev->port_usb->multi_pkt_xfer) { + dev->no_tx_req_used--; + req->length = 0; + in = dev->port_usb->in_ep; + + if (!list_empty(&dev->tx_reqs)) { + new_req = container_of(dev->tx_reqs.next, + struct usb_request, list); + list_del(&new_req->list); + spin_unlock(&dev->req_lock); + if (new_req->length > 0) { + length = new_req->length; + + /* NCM requires no zlp if transfer is + * dwNtbInMaxSize */ + if (dev->port_usb->is_fixed && + length == dev->port_usb->fixed_in_len && + (length % in->maxpacket) == 0) + new_req->zero = 0; + else + new_req->zero = 1; + + /* use zlp framing on tx for strict CDC-Ether + * conformance, though any robust network rx + * path ignores extra padding. and some hardware + * doesn't like to write zlps. + */ + if (new_req->zero && !dev->zlp && + (length % in->maxpacket) == 0) { + new_req->zero = 0; + length++; + } + + new_req->length = length; + retval = usb_ep_queue(in, new_req, GFP_ATOMIC); + switch (retval) { + default: + DBG(dev, "tx queue err %d\n", retval); + break; + case 0: + spin_lock(&dev->req_lock); + dev->no_tx_req_used++; + spin_unlock(&dev->req_lock); + net->trans_start = jiffies; + } + } else { + spin_lock(&dev->req_lock); + list_add(&new_req->list, &dev->tx_reqs); + spin_unlock(&dev->req_lock); + } + } else { + spin_unlock(&dev->req_lock); + } + } else { + spin_unlock(&dev->req_lock); + dev_kfree_skb_any(skb); + } - atomic_dec(&dev->tx_qlen); if (netif_carrier_ok(dev->net)) netif_wake_queue(dev->net); } @@ -495,6 +562,26 @@ static inline int is_promisc(u16 cdc_filter) return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; } +static void alloc_tx_buffer(struct eth_dev *dev) +{ + struct list_head *act; + struct usb_request *req; + + dev->tx_req_bufsize = (TX_SKB_HOLD_THRESHOLD * + (dev->net->mtu + + sizeof(struct ethhdr) + /* size of rndis_packet_msg_type */ + + 44 + + 22)); + + list_for_each(act, &dev->tx_reqs) { + req = container_of(act, struct usb_request, list); + if (!req->buf) + req->buf = kmalloc(dev->tx_req_bufsize, + GFP_ATOMIC); + } +} + static netdev_tx_t eth_start_xmit(struct sk_buff *skb, struct net_device *net) { @@ -521,6 +608,10 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } + /* Allocate memory for tx_reqs to support multi packet transfer */ + if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize) + alloc_tx_buffer(dev); + /* apply outgoing CDC or RNDIS filters */ if (!is_promisc(cdc_filter)) { u8 *dest = skb->data; @@ -575,11 +666,39 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&dev->lock, flags); if (!skb) goto drop; + } + + spin_lock_irqsave(&dev->req_lock, flags); + dev->tx_skb_hold_count++; + spin_unlock_irqrestore(&dev->req_lock, flags); + + if (dev->port_usb->multi_pkt_xfer) { + memcpy(req->buf + req->length, skb->data, skb->len); + req->length = req->length + skb->len; + length = req->length; + dev_kfree_skb_any(skb); + spin_lock_irqsave(&dev->req_lock, flags); + if (dev->tx_skb_hold_count < TX_SKB_HOLD_THRESHOLD) { + if (dev->no_tx_req_used > TX_REQ_THRESHOLD) { + list_add(&req->list, &dev->tx_reqs); + spin_unlock_irqrestore(&dev->req_lock, flags); + goto success; + } + } + + dev->no_tx_req_used++; + spin_unlock_irqrestore(&dev->req_lock, flags); + + spin_lock_irqsave(&dev->lock, flags); + dev->tx_skb_hold_count = 0; + spin_unlock_irqrestore(&dev->lock, flags); + } else { length = skb->len; + req->buf = skb->data; + req->context = skb; } - req->buf = skb->data; - req->context = skb; + req->complete = tx_complete; /* NCM requires no zlp if transfer is dwNtbInMaxSize */ @@ -594,16 +713,26 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, * though any robust network rx path ignores extra padding. * and some hardware doesn't like to write zlps. */ - if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) + if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) { + req->zero = 0; length++; + } req->length = length; /* throttle highspeed IRQ rate back slightly */ - if (gadget_is_dualspeed(dev->gadget)) - req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH) - ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) - : 0; + if (gadget_is_dualspeed(dev->gadget) && + (dev->gadget->speed == USB_SPEED_HIGH)) { + dev->tx_qlen++; + if (dev->tx_qlen == (qmult/2)) { + req->no_interrupt = 0; + dev->tx_qlen = 0; + } else { + req->no_interrupt = 1; + } + } else { + req->no_interrupt = 0; + } retval = usb_ep_queue(in, req, GFP_ATOMIC); switch (retval) { @@ -612,11 +741,11 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, break; case 0: net->trans_start = jiffies; - atomic_inc(&dev->tx_qlen); } if (retval) { - dev_kfree_skb_any(skb); + if (!dev->port_usb->multi_pkt_xfer) + dev_kfree_skb_any(skb); drop: dev->net->stats.tx_dropped++; spin_lock_irqsave(&dev->req_lock, flags); @@ -625,6 +754,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, list_add(&req->list, &dev->tx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); } +success: return NETDEV_TX_OK; } @@ -638,7 +768,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) rx_fill(dev, gfp_flags); /* and open the tx floodgates */ - atomic_set(&dev->tx_qlen, 0); + dev->tx_qlen = 0; netif_wake_queue(dev->net); } @@ -693,7 +823,7 @@ static int eth_stop(struct net_device *net) usb_ep_disable(link->in_ep); usb_ep_disable(link->out_ep); if (netif_carrier_ok(net)) { - ERROR(dev, "host still using in/out endpoints\n"); + DBG(dev, "host still using in/out endpoints\n"); usb_ep_enable(link->in_ep, link->in); usb_ep_enable(link->out_ep, link->out); } @@ -790,10 +920,8 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN], struct net_device *net; int status; - if (the_dev) { - memcpy(ethaddr, the_dev->host_mac, ETH_ALEN); - return 0; - } + if (the_dev) + return -EBUSY; net = alloc_etherdev(sizeof *dev); if (!net) @@ -921,6 +1049,9 @@ struct net_device *gether_connect(struct gether *link) dev->wrap = link->wrap; spin_lock(&dev->lock); + dev->tx_skb_hold_count = 0; + dev->no_tx_req_used = 0; + dev->tx_req_bufsize = 0; dev->port_usb = link; link->ioport = dev; if (netif_running(dev->net)) { @@ -986,6 +1117,8 @@ void gether_disconnect(struct gether *link) list_del(&req->list); spin_unlock(&dev->req_lock); + if (link->multi_pkt_xfer) + kfree(req->buf); usb_ep_free_request(link->in_ep, req); spin_lock(&dev->req_lock); } diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h index 64b65f92..09b6cccb 100644 --- a/drivers/usb/gadget/u_ether.h +++ b/drivers/usb/gadget/u_ether.h @@ -66,6 +66,9 @@ struct gether { bool is_fixed; u32 fixed_out_len; u32 fixed_in_len; +/* Max number of SKB packets to be used to create Multi Packet RNDIS */ +#define TX_SKB_HOLD_THRESHOLD 3 + bool multi_pkt_xfer; struct sk_buff *(*wrap)(struct gether *port, struct sk_buff *skb); int (*unwrap)(struct gether *port, diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h index 3c21316d..fd1e124f 100644 --- a/drivers/usb/gadget/u_rmnet.h +++ b/drivers/usb/gadget/u_rmnet.h @@ -35,28 +35,23 @@ struct grmnet { /* to usb host, aka laptop, windows pc etc. Will * be filled by usb driver of rmnet functionality */ - int (*send_cpkt_response)(struct grmnet *g, - struct rmnet_ctrl_pkt *pkt); + int (*send_cpkt_response)(void *g, void *buf, size_t len); /* to modem, and to be filled by driver implementing * control function */ - int (*send_cpkt_request)(struct grmnet *g, - u8 port_num, - struct rmnet_ctrl_pkt *pkt); + int (*send_encap_cmd)(u8 port_num, void *buf, size_t len); - void (*send_cbits_tomodem)(struct grmnet *g, - u8 port_num, - int cbits); + void (*notify_modem)(void *g, u8 port_num, int cbits); void (*disconnect)(struct grmnet *g); void (*connect)(struct grmnet *g); }; -int gbam_setup(unsigned int count); -int gbam_connect(struct grmnet *, u8 port_num); -void gbam_disconnect(struct grmnet *, u8 port_num); - +int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port); +int gbam_connect(struct grmnet *gr, u8 port_num, + enum transport_type trans, u8 connection_idx); +void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans); int gsmd_ctrl_connect(struct grmnet *gr, int port_num); void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num); int gsmd_ctrl_setup(unsigned int count); diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c index 68b7bb8a..d42ac935 100644 --- a/drivers/usb/gadget/u_rmnet_ctrl_smd.c +++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -59,7 +59,7 @@ struct rmnet_ctrl_port { struct grmnet *port_usb; spinlock_t port_lock; - struct work_struct connect_w; + struct delayed_work connect_w; }; static struct rmnet_ctrl_ports { @@ -83,6 +83,7 @@ static struct rmnet_ctrl_pkt *alloc_rmnet_ctrl_pkt(unsigned len, gfp_t flags) kfree(pkt); return ERR_PTR(-ENOMEM); } + pkt->len = len; return pkt; @@ -103,35 +104,37 @@ static void grmnet_ctrl_smd_read_w(struct work_struct *w) struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w); struct rmnet_ctrl_port *port = c->port; int sz; - struct rmnet_ctrl_pkt *cpkt; + size_t len; + void *buf; unsigned long flags; - while (1) { + spin_lock_irqsave(&port->port_lock, flags); + while (c->ch) { sz = smd_cur_packet_size(c->ch); - if (sz == 0) + if (sz <= 0) break; if (smd_read_avail(c->ch) < sz) break; - cpkt = alloc_rmnet_ctrl_pkt(sz, GFP_KERNEL); - if (IS_ERR(cpkt)) { - pr_err("%s: unable to allocate rmnet control pkt\n", - __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + + buf = kmalloc(sz, GFP_KERNEL); + if (!buf) return; - } - cpkt->len = smd_read(c->ch, cpkt->buf, sz); + + len = smd_read(c->ch, buf, sz); /* send it to USB here */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb && port->port_usb->send_cpkt_response) { - port->port_usb->send_cpkt_response( - port->port_usb, - cpkt); + port->port_usb->send_cpkt_response(port->port_usb, + buf, len); c->to_host++; } - spin_unlock_irqrestore(&port->port_lock, flags); + kfree(buf); } + spin_unlock_irqrestore(&port->port_lock, flags); } static void grmnet_ctrl_smd_write_w(struct work_struct *w) @@ -143,7 +146,7 @@ static void grmnet_ctrl_smd_write_w(struct work_struct *w) int ret; spin_lock_irqsave(&port->port_lock, flags); - while (1) { + while (c->ch) { if (list_empty(&c->tx_q)) break; @@ -157,8 +160,7 @@ static void grmnet_ctrl_smd_write_w(struct work_struct *w) ret = smd_write(c->ch, cpkt->buf, cpkt->len); spin_lock_irqsave(&port->port_lock, flags); if (ret != cpkt->len) { - pr_err("%s: smd_write failed err:%d\n", - __func__, ret); + pr_err("%s: smd_write failed err:%d\n", __func__, ret); free_rmnet_ctrl_pkt(cpkt); break; } @@ -169,24 +171,29 @@ static void grmnet_ctrl_smd_write_w(struct work_struct *w) } static int -grmnet_ctrl_smd_send_cpkt_tomodem(struct grmnet *gr, u8 portno, - struct rmnet_ctrl_pkt *cpkt) +grmnet_ctrl_smd_send_cpkt_tomodem(u8 portno, + void *buf, size_t len) { unsigned long flags; struct rmnet_ctrl_port *port; struct smd_ch_info *c; + struct rmnet_ctrl_pkt *cpkt; if (portno >= n_rmnet_ctrl_ports) { pr_err("%s: Invalid portno#%d\n", __func__, portno); return -ENODEV; } - if (!gr) { - pr_err("%s: grmnet is null\n", __func__); - return -ENODEV; + port = ctrl_smd_ports[portno].port; + + cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC); + if (IS_ERR(cpkt)) { + pr_err("%s: Unable to allocate ctrl pkt\n", __func__); + return -ENOMEM; } - port = ctrl_smd_ports[portno].port; + memcpy(cpkt->buf, buf, len); + cpkt->len = len; spin_lock_irqsave(&port->port_lock, flags); c = &port->ctrl_ch; @@ -207,7 +214,7 @@ grmnet_ctrl_smd_send_cpkt_tomodem(struct grmnet *gr, u8 portno, #define RMNET_CTRL_DTR 0x01 static void -gsmd_ctrl_send_cbits_tomodem(struct grmnet *gr, u8 portno, int cbits) +gsmd_ctrl_send_cbits_tomodem(void *gptr, u8 portno, int cbits) { struct rmnet_ctrl_port *port; struct smd_ch_info *c; @@ -220,7 +227,7 @@ gsmd_ctrl_send_cbits_tomodem(struct grmnet *gr, u8 portno, int cbits) return; } - if (!gr) { + if (!gptr) { pr_err("%s: grmnet is null\n", __func__); return; } @@ -316,7 +323,7 @@ static void grmnet_ctrl_smd_notify(void *p, unsigned event) static void grmnet_ctrl_smd_connect_w(struct work_struct *w) { struct rmnet_ctrl_port *port = - container_of(w, struct rmnet_ctrl_port, connect_w); + container_of(w, struct rmnet_ctrl_port, connect_w.work); struct smd_ch_info *c = &port->ctrl_ch; unsigned long flags; int ret; @@ -328,17 +335,22 @@ static void grmnet_ctrl_smd_connect_w(struct work_struct *w) ret = smd_open(c->name, &c->ch, port, grmnet_ctrl_smd_notify); if (ret) { - pr_err("%s: Unable to open smd ch:%s err:%d\n", - __func__, c->name, ret); + if (ret == -EAGAIN) { + /* port not ready - retry */ + pr_debug("%s: SMD port not ready - rescheduling:%s err:%d\n", + __func__, c->name, ret); + queue_delayed_work(grmnet_ctrl_wq, &port->connect_w, + msecs_to_jiffies(250)); + } else { + pr_err("%s: unable to open smd port:%s err:%d\n", + __func__, c->name, ret); + } return; } spin_lock_irqsave(&port->port_lock, flags); - if (port->port_usb) { - pr_warning("%s: SMD notify closing before open\n", __func__); - c->cbits_tomodem |= TIOCM_RTS; + if (port->port_usb) smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem); - } spin_unlock_irqrestore(&port->port_lock, flags); } @@ -365,11 +377,11 @@ int gsmd_ctrl_connect(struct grmnet *gr, int port_num) spin_lock_irqsave(&port->port_lock, flags); port->port_usb = gr; - gr->send_cpkt_request = grmnet_ctrl_smd_send_cpkt_tomodem; - gr->send_cbits_tomodem = gsmd_ctrl_send_cbits_tomodem; + gr->send_encap_cmd = grmnet_ctrl_smd_send_cpkt_tomodem; + gr->notify_modem = gsmd_ctrl_send_cbits_tomodem; spin_unlock_irqrestore(&port->port_lock, flags); - queue_work(grmnet_ctrl_wq, &port->connect_w); + queue_delayed_work(grmnet_ctrl_wq, &port->connect_w, 0); return 0; } @@ -398,8 +410,8 @@ void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num) spin_lock_irqsave(&port->port_lock, flags); port->port_usb = 0; - gr->send_cpkt_request = 0; - gr->send_cbits_tomodem = 0; + gr->send_encap_cmd = 0; + gr->notify_modem = 0; c->cbits_tomodem = 0; while (!list_empty(&c->tx_q)) { @@ -411,10 +423,13 @@ void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num) spin_unlock_irqrestore(&port->port_lock, flags); - if (test_bit(CH_OPENED, &c->flags)) { - /* this should send the dtr zero */ + if (test_and_clear_bit(CH_OPENED, &c->flags)) + /* send dtr zero */ + smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem); + + if (c->ch) { smd_close(c->ch); - clear_bit(CH_OPENED, &c->flags); + c->ch = NULL; } } @@ -438,7 +453,8 @@ static int grmnet_ctrl_smd_ch_probe(struct platform_device *pdev) /* if usb is online, try opening smd_ch */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) - queue_work(grmnet_ctrl_wq, &port->connect_w); + queue_delayed_work(grmnet_ctrl_wq, + &port->connect_w, 0); spin_unlock_irqrestore(&port->port_lock, flags); break; @@ -463,7 +479,10 @@ static int grmnet_ctrl_smd_ch_remove(struct platform_device *pdev) if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) { clear_bit(CH_READY, &c->flags); clear_bit(CH_OPENED, &c->flags); - smd_close(c->ch); + if (c->ch) { + smd_close(c->ch); + c->ch = NULL; + } break; } } @@ -496,7 +515,7 @@ static int grmnet_ctrl_smd_port_alloc(int portno) port->port_num = portno; spin_lock_init(&port->port_lock); - INIT_WORK(&port->connect_w, grmnet_ctrl_smd_connect_w); + INIT_DELAYED_WORK(&port->connect_w, grmnet_ctrl_smd_connect_w); c = &port->ctrl_ch; c->name = rmnet_ctrl_names[portno]; @@ -543,12 +562,13 @@ int gsmd_ctrl_setup(unsigned int count) } for (i = 0; i < count; i++) { + n_rmnet_ctrl_ports++; ret = grmnet_ctrl_smd_port_alloc(i); if (ret) { pr_err("%s: Unable to alloc port:%d\n", __func__, i); + n_rmnet_ctrl_ports--; goto free_ctrl_smd_ports; } - n_rmnet_ctrl_ports++; } return 0; @@ -601,8 +621,8 @@ static ssize_t gsmd_ctrl_read_stats(struct file *file, char __user *ubuf, c->cbits_tomodem ? "HIGH" : "LOW", test_bit(CH_OPENED, &c->flags), test_bit(CH_READY, &c->flags), - smd_read_avail(c->ch), - smd_write_avail(c->ch)); + c->ch ? smd_read_avail(c->ch) : 0, + c->ch ? smd_write_avail(c->ch) : 0); spin_unlock_irqrestore(&port->port_lock, flags); } diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c index 056920ae..8ff4c4bd 100644 --- a/drivers/usb/gadget/u_sdio.c +++ b/drivers/usb/gadget/u_sdio.c @@ -232,7 +232,7 @@ int gsdio_write(struct gsdio_port *port, struct usb_request *req) { unsigned avail; char *packet; - unsigned size; + unsigned size = req->actual; unsigned n; int ret = 0; @@ -248,8 +248,6 @@ int gsdio_write(struct gsdio_port *port, struct usb_request *req) return -ENODEV; } - size = req->actual; - packet = req->buf; pr_debug("%s: port:%p port#%d req:%p actual:%d n_read:%d\n", __func__, port, port->port_num, req, req->actual, port->n_read); @@ -536,20 +534,6 @@ void gsdio_tx_pull(struct work_struct *w) goto tx_pull_end; } - /* Do not send data if DTR is not set */ - if (!(port->cbits_to_modem & TIOCM_DTR)) { - pr_info("%s: DTR low. flush %d bytes.", __func__, avail); - /* check if usb is still active */ - if (!port->port_usb) { - gsdio_free_req(in, req); - } else { - list_add(&req->list, pool); - port->wp_len++; - } - goto tx_pull_end; - } - - req->length = avail; spin_unlock_irq(&port->port_lock); @@ -655,10 +639,11 @@ void gsdio_ctrl_wq(struct work_struct *w) port->cbits_to_modem, ~(port->cbits_to_modem)); } -void gsdio_ctrl_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits) +void gsdio_ctrl_notify_modem(void *gptr, u8 portno, int ctrl_bits) { struct gsdio_port *port; int temp; + struct gserial *gser = gptr; if (portno >= n_sdio_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); @@ -1146,8 +1131,8 @@ int gsdio_setup(struct usb_gadget *g, unsigned count) for (i = 0; i < count; i++) { mutex_init(&sdio_ports[i].lock); - n_sdio_ports++; ret = gsdio_port_alloc(i, &coding, sport_info + i); + n_sdio_ports++; if (ret) { n_sdio_ports--; pr_err("%s: sdio logical port allocation failed\n", diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index bde6a3f3..c9da4bd7 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c @@ -55,7 +55,7 @@ * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. */ -#define PREFIX "ttyHSUSB" +#define PREFIX "ttyGS" /* * gserial is the lifecycle interface, used by USB functions @@ -371,16 +371,11 @@ __acquires(&port->port_lock) */ { struct list_head *pool = &port->write_pool; - struct usb_ep *in; + struct usb_ep *in = port->port_usb->in; int status = 0; static long prev_len; bool do_tty_wake = false; - if (port->port_usb) - in = port->port_usb->in; - else - return 0; - while (!list_empty(pool)) { struct usb_request *req; int len; @@ -391,7 +386,9 @@ __acquires(&port->port_lock) req = list_entry(pool->next, struct usb_request, list); len = gs_send_packet(port, req->buf, TX_BUF_SIZE); if (len == 0) { - /* Queue zero length packet */ + /* Queue zero length packet explicitly to make it + * work with UDCs which don't support req->zero flag + */ if (prev_len && (prev_len % in->maxpacket == 0)) { req->length = 0; list_del(&req->list); @@ -416,7 +413,6 @@ __acquires(&port->port_lock) req->length = len; list_del(&req->list); - req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0); pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", port->port_num, len, *((u8 *)req->buf), @@ -468,14 +464,9 @@ __acquires(&port->port_lock) */ { struct list_head *pool = &port->read_pool; - struct usb_ep *out; + struct usb_ep *out = port->port_usb->out; unsigned started = 0; - if (port->port_usb) - out = port->port_usb->out; - else - return 0; - while (!list_empty(pool)) { struct usb_request *req; int status; @@ -736,15 +727,10 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, static int gs_start_io(struct gs_port *port) { struct list_head *head = &port->read_pool; - struct usb_ep *ep; + struct usb_ep *ep = port->port_usb->out; int status; unsigned started; - if (port->port_usb) - ep = port->port_usb->out; - else - return -EIO; - /* Allocate RX and TX I/O buffers. We can't easily do this much * earlier (with GFP_KERNEL) because the requests are coupled to * endpoints, as are the packet sizes we'll be using. Different @@ -1361,9 +1347,6 @@ int gserial_setup(struct usb_gadget *g, unsigned count) if (count == 0 || count > N_PORTS) return -EINVAL; - if (gs_tty_driver) - return 0; - gs_tty_driver = alloc_tty_driver(count); if (!gs_tty_driver) return -ENOMEM; @@ -1388,10 +1371,6 @@ int gserial_setup(struct usb_gadget *g, unsigned count) gs_tty_driver->init_termios.c_ispeed = 9600; gs_tty_driver->init_termios.c_ospeed = 9600; - gs_tty_driver->init_termios.c_lflag = 0; - gs_tty_driver->init_termios.c_iflag = 0; - gs_tty_driver->init_termios.c_oflag = 0; - coding.dwDTERate = cpu_to_le32(9600); coding.bCharFormat = 8; coding.bParityType = USB_CDC_NO_PARITY; diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h index fea53d8c..c9370068 100644 --- a/drivers/usb/gadget/u_serial.h +++ b/drivers/usb/gadget/u_serial.h @@ -55,7 +55,7 @@ struct gserial { int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits); /* notification changes to modem */ - void (*notify_modem)(struct gserial *gser, u8 portno, int ctrl_bits); + void (*notify_modem)(void *gser, u8 portno, int ctrl_bits); }; /* utilities to allocate/free request and buffer */ diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c index fe55363a..3de82b37 100644 --- a/drivers/usb/gadget/u_smd.c +++ b/drivers/usb/gadget/u_smd.c @@ -71,7 +71,7 @@ struct gsmd_port { struct gserial *port_usb; struct smd_port_info *pi; - struct work_struct connect_work; + struct delayed_work connect_work; /* At present, smd does not notify * control bit change info from modem @@ -209,6 +209,7 @@ static void gsmd_start_rx(struct gsmd_port *port) static void gsmd_rx_push(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, push); + struct smd_port_info *pi = port->pi; struct list_head *q; pr_debug("%s: port:%p port#%d", __func__, port, port->port_num); @@ -216,10 +217,9 @@ static void gsmd_rx_push(struct work_struct *w) spin_lock_irq(&port->port_lock); q = &port->read_queue; - while (!list_empty(q)) { + while (pi->ch && !list_empty(q)) { struct usb_request *req; int avail; - struct smd_port_info *pi = port->pi; req = list_first_entry(q, struct usb_request, list); @@ -245,7 +245,7 @@ static void gsmd_rx_push(struct work_struct *w) char *packet = req->buf; unsigned size = req->actual; unsigned n; - int count; + int count; n = port->n_read; if (n) { @@ -296,21 +296,24 @@ static void gsmd_tx_pull(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, pull); struct list_head *pool = &port->write_pool; + struct smd_port_info *pi = port->pi; + struct usb_ep *in; pr_debug("%s: port:%p port#%d pool:%p\n", __func__, port, port->port_num, pool); + spin_lock_irq(&port->port_lock); + if (!port->port_usb) { pr_debug("%s: usb is disconnected\n", __func__); + spin_unlock_irq(&port->port_lock); gsmd_read_pending(port); return; } - spin_lock_irq(&port->port_lock); - while (!list_empty(pool)) { + in = port->port_usb->in; + while (pi->ch && !list_empty(pool)) { struct usb_request *req; - struct usb_ep *in = port->port_usb->in; - struct smd_port_info *pi = port->pi; int avail; int ret; @@ -562,7 +565,7 @@ static void gsmd_connect_work(struct work_struct *w) struct smd_port_info *pi; int ret; - port = container_of(w, struct gsmd_port, connect_work); + port = container_of(w, struct gsmd_port, connect_work.work); pi = port->pi; pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num); @@ -573,16 +576,24 @@ static void gsmd_connect_work(struct work_struct *w) ret = smd_named_open_on_edge(pi->name, SMD_APPS_MODEM, &pi->ch, port, gsmd_notify); if (ret) { - pr_err("%s: unable to open smd port:%s err:%d\n", - __func__, pi->name, ret); - return; + if (ret == -EAGAIN) { + /* port not ready - retry */ + pr_debug("%s: SMD port not ready - rescheduling:%s err:%d\n", + __func__, pi->name, ret); + queue_delayed_work(gsmd_wq, &port->connect_work, + msecs_to_jiffies(250)); + } else { + pr_err("%s: unable to open smd port:%s err:%d\n", + __func__, pi->name, ret); + } } } -static void gsmd_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits) +static void gsmd_notify_modem(void *gptr, u8 portno, int ctrl_bits) { struct gsmd_port *port; int temp; + struct gserial *gser = gptr; if (portno >= n_smd_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); @@ -671,7 +682,7 @@ int gsmd_connect(struct gserial *gser, u8 portno) } gser->out->driver_data = port; - queue_work(gsmd_wq, &port->connect_work); + queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(0)); return 0; } @@ -710,17 +721,18 @@ void gsmd_disconnect(struct gserial *gser, u8 portno) port->n_read = 0; spin_unlock_irqrestore(&port->port_lock, flags); - if (!test_bit(CH_OPENED, &port->pi->flags)) - return; - - /* lower the dtr */ - port->cbits_to_modem = 0; - smd_tiocmset(port->pi->ch, - port->cbits_to_modem, - ~port->cbits_to_modem); + if (test_and_clear_bit(CH_OPENED, &port->pi->flags)) { + /* lower the dtr */ + port->cbits_to_modem = 0; + smd_tiocmset(port->pi->ch, + port->cbits_to_modem, + ~port->cbits_to_modem); + } - smd_close(port->pi->ch); - clear_bit(CH_OPENED, &port->pi->flags); + if (port->pi->ch) { + smd_close(port->pi->ch); + port->pi->ch = NULL; + } } #define SMD_CH_MAX_LEN 20 @@ -741,7 +753,8 @@ static int gsmd_ch_probe(struct platform_device *pdev) set_bit(CH_READY, &pi->flags); spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) - queue_work(gsmd_wq, &port->connect_work); + queue_delayed_work(gsmd_wq, &port->connect_work, + msecs_to_jiffies(0)); spin_unlock_irqrestore(&port->port_lock, flags); break; } @@ -764,7 +777,10 @@ static int gsmd_ch_remove(struct platform_device *pdev) if (!strncmp(pi->name, pdev->name, SMD_CH_MAX_LEN)) { clear_bit(CH_READY, &pi->flags); clear_bit(CH_OPENED, &pi->flags); - smd_close(pi->ch); + if (pi->ch) { + smd_close(pi->ch); + pi->ch = NULL; + } break; } } @@ -800,7 +816,7 @@ static int gsmd_port_alloc(int portno, struct usb_cdc_line_coding *coding) INIT_LIST_HEAD(&port->write_pool); INIT_WORK(&port->pull, gsmd_tx_pull); - INIT_WORK(&port->connect_work, gsmd_connect_work); + INIT_DELAYED_WORK(&port->connect_work, gsmd_connect_work); smd_ports[portno].port = port; pdrv = &smd_ports[portno].pdrv; @@ -820,6 +836,7 @@ static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct gsmd_port *port; + struct smd_port_info *pi; char *buf; unsigned long flags; int temp = 0; @@ -832,6 +849,7 @@ static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; + pi = port->pi; spin_lock_irqsave(&port->port_lock, flags); temp += scnprintf(buf + temp, 512 - temp, "###PORT:%d###\n" @@ -847,10 +865,10 @@ static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, i, port->nbytes_tolaptop, port->nbytes_tomodem, port->cbits_to_modem, port->cbits_to_laptop, port->n_read, - smd_read_avail(port->pi->ch), - smd_write_avail(port->pi->ch), - test_bit(CH_OPENED, &port->pi->flags), - test_bit(CH_READY, &port->pi->flags)); + pi->ch ? smd_read_avail(pi->ch) : 0, + pi->ch ? smd_write_avail(pi->ch) : 0, + test_bit(CH_OPENED, &pi->flags), + test_bit(CH_READY, &pi->flags)); spin_unlock_irqrestore(&port->port_lock, flags); } diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index 3f75445e..bd6671dd 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,7 +19,6 @@ #define PKT_TYPE 8 #define DEINIT_TYPE 16 #define USER_SPACE_LOG_TYPE 32 -#define USERMODE_DIAGFWD 64 #define USB_MODE 1 #define MEMORY_DEVICE_MODE 2 #define NO_LOGGING_MODE 3 @@ -41,8 +40,21 @@ #define APQ8060_MACHINE_ID 86 #define AO8960_MACHINE_ID 87 #define MSM8660_MACHINE_ID 71 +#define MDM9615_MACHINE_ID 104 +#define APQ8064_MACHINE_ID 109 +#define MSM8930_MACHINE_ID 116 +#define MSM8630_MACHINE_ID 117 +#define MSM8230_MACHINE_ID 118 +#define APQ8030_MACHINE_ID 119 +#define MSM8627_MACHINE_ID 120 +#define MSM8227_MACHINE_ID 121 +#define MSM8260A_MACHINE_ID 123 +#define MSM8974_MACHINE_ID 126 #define APQ8060_TOOLS_ID 4062 #define AO8960_TOOLS_ID 4064 +#define APQ8064_TOOLS_ID 4072 +#define MSM8930_TOOLS_ID 4072 +#define MSM8974_TOOLS_ID 4072 #define MSG_MASK_0 (0x00000001) #define MSG_MASK_1 (0x00000002) @@ -97,11 +109,11 @@ the appropriate macros. */ /* This needs to be modified manually now, when we add a new RANGE of SSIDs to the msg_mask_tbl */ -#define MSG_MASK_TBL_CNT 19 +#define MSG_MASK_TBL_CNT 23 #define EVENT_LAST_ID 0x083F #define MSG_SSID_0 0 -#define MSG_SSID_0_LAST 68 +#define MSG_SSID_0_LAST 90 #define MSG_SSID_1 500 #define MSG_SSID_1_LAST 506 #define MSG_SSID_2 1000 @@ -109,19 +121,19 @@ the appropriate macros. */ #define MSG_SSID_3 2000 #define MSG_SSID_3_LAST 2008 #define MSG_SSID_4 3000 -#define MSG_SSID_4_LAST 3012 +#define MSG_SSID_4_LAST 3014 #define MSG_SSID_5 4000 #define MSG_SSID_5_LAST 4010 #define MSG_SSID_6 4500 #define MSG_SSID_6_LAST 4526 #define MSG_SSID_7 4600 -#define MSG_SSID_7_LAST 4611 +#define MSG_SSID_7_LAST 4612 #define MSG_SSID_8 5000 -#define MSG_SSID_8_LAST 5024 +#define MSG_SSID_8_LAST 5029 #define MSG_SSID_9 5500 -#define MSG_SSID_9_LAST 5514 +#define MSG_SSID_9_LAST 5516 #define MSG_SSID_10 6000 -#define MSG_SSID_10_LAST 6050 +#define MSG_SSID_10_LAST 6072 #define MSG_SSID_11 6500 #define MSG_SSID_11_LAST 6521 #define MSG_SSID_12 7000 @@ -138,6 +150,14 @@ the appropriate macros. */ #define MSG_SSID_17_LAST 9008 #define MSG_SSID_18 9500 #define MSG_SSID_18_LAST 9509 +#define MSG_SSID_19 10200 +#define MSG_SSID_19_LAST 10210 +#define MSG_SSID_20 10251 +#define MSG_SSID_20_LAST 10255 +#define MSG_SSID_21 10300 +#define MSG_SSID_21_LAST 10300 +#define MSG_SSID_22 10350 +#define MSG_SSID_22_LAST 10361 struct diagpkt_delay_params { void *rsp_ptr; @@ -232,6 +252,28 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_MED, MSG_LVL_LOW, MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL, + MSG_LVL_MED, + MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL, + MSG_LVL_LOW, + MSG_LVL_MED, + MSG_LVL_LOW }; static const uint32_t msg_bld_masks_1[] = { @@ -241,7 +283,7 @@ static const uint32_t msg_bld_masks_1[] = { MSG_LVL_LOW, MSG_LVL_HIGH, MSG_LVL_HIGH, - MSG_LVL_HIGH, + MSG_LVL_HIGH }; static const uint32_t msg_bld_masks_2[] = { @@ -280,7 +322,9 @@ static const uint32_t msg_bld_masks_4[] = { MSG_LVL_HIGH, MSG_LVL_HIGH, MSG_LVL_HIGH, - MSG_LVL_HIGH + MSG_LVL_HIGH, + MSG_LVL_LOW, + MSG_LVL_LOW }; static const uint32_t msg_bld_masks_5[] = { @@ -293,7 +337,8 @@ static const uint32_t msg_bld_masks_5[] = { MSG_LVL_MED, MSG_LVL_MED, MSG_LVL_MED, - MSG_LVL_MED, + MSG_LVL_MED|MSG_LVL_MED|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7| \ + MSG_MASK_8|MSG_MASK_9, MSG_LVL_MED }; @@ -340,6 +385,7 @@ static const uint32_t msg_bld_masks_7[] = { MSG_LVL_MED, MSG_LVL_MED, MSG_LVL_MED, + MSG_LVL_LOW }; static const uint32_t msg_bld_masks_8[] = { @@ -368,6 +414,11 @@ static const uint32_t msg_bld_masks_8[] = { MSG_LVL_MED, MSG_LVL_MED, MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED }; static const uint32_t msg_bld_masks_9[] = { @@ -386,6 +437,8 @@ static const uint32_t msg_bld_masks_9[] = { MSG_LVL_MED|MSG_MASK_5, MSG_LVL_MED|MSG_MASK_5, MSG_LVL_MED|MSG_MASK_5, + MSG_LVL_MED|MSG_MASK_5, + MSG_LVL_MED|MSG_MASK_5 }; static const uint32_t msg_bld_masks_10[] = { @@ -445,6 +498,28 @@ static const uint32_t msg_bld_masks_10[] = { MSG_LVL_LOW, MSG_LVL_LOW, MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_LOW }; static const uint32_t msg_bld_masks_11[] = { @@ -567,10 +642,51 @@ static const uint32_t msg_bld_masks_18[] = { MSG_LVL_LOW }; +static const uint32_t msg_bld_masks_19[] = { + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW +}; + +static const uint32_t msg_bld_masks_20[] = { + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW +}; + +static const uint32_t msg_bld_masks_21[] = { + MSG_LVL_HIGH +}; + +static const uint32_t msg_bld_masks_22[] = { + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH +}; + /* LOG CODES */ #define LOG_0 0x0 -#define LOG_1 0x1520 +#define LOG_1 0x15A7 #define LOG_2 0x0 #define LOG_3 0x0 #define LOG_4 0x4910 diff --git a/include/linux/usb/android.h b/include/linux/usb/android.h new file mode 100644 index 00000000..9d7e4a84 --- /dev/null +++ b/include/linux/usb/android.h @@ -0,0 +1,24 @@ +/* + * Platform data for Android USB + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_USB_ANDROID_H +#define __LINUX_USB_ANDROID_H + +struct android_usb_platform_data { + int (*update_pid_and_serial_num)(uint32_t, const char *); +}; + +#endif /* __LINUX_USB_ANDROID_H */ diff --git a/include/linux/usb/ccid_desc.h b/include/linux/usb/ccid_desc.h new file mode 100644 index 00000000..2d1ae741 --- /dev/null +++ b/include/linux/usb/ccid_desc.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details + */ + +#ifndef __LINUX_USB_CCID_DESC_H +#define __LINUX_USB_CCID_DESC_H + +/*CCID specification version 1.10*/ +#define CCID1_10 0x0110 + +#define SMART_CARD_DEVICE_CLASS 0x0B +/* Smart Card Device Class Descriptor Type */ +#define CCID_DECRIPTOR_TYPE 0x21 + +/* Table 5.3-1 Summary of CCID Class Specific Request */ +#define CCIDGENERICREQ_ABORT 0x01 +#define CCIDGENERICREQ_GET_CLOCK_FREQUENCIES 0x02 +#define CCIDGENERICREQ_GET_DATA_RATES 0x03 + +/* 6.1 Command Pipe, Bulk-OUT Messages */ +#define PC_TO_RDR_ICCPOWERON 0x62 +#define PC_TO_RDR_ICCPOWEROFF 0x63 +#define PC_TO_RDR_GETSLOTSTATUS 0x65 +#define PC_TO_RDR_XFRBLOCK 0x6F +#define PC_TO_RDR_GETPARAMETERS 0x6C +#define PC_TO_RDR_RESETPARAMETERS 0x6D +#define PC_TO_RDR_SETPARAMETERS 0x61 +#define PC_TO_RDR_ESCAPE 0x6B +#define PC_TO_RDR_ICCCLOCK 0x6E +#define PC_TO_RDR_T0APDU 0x6A +#define PC_TO_RDR_SECURE 0x69 +#define PC_TO_RDR_MECHANICAL 0x71 +#define PC_TO_RDR_ABORT 0x72 +#define PC_TO_RDR_SETDATARATEANDCLOCKFREQUENCY 0x73 + +/* 6.2 Response Pipe, Bulk-IN Messages */ +#define RDR_TO_PC_DATABLOCK 0x80 +#define RDR_TO_PC_SLOTSTATUS 0x81 +#define RDR_TO_PC_PARAMETERS 0x82 +#define RDR_TO_PC_ESCAPE 0x83 +#define RDR_TO_PC_DATARATEANDCLOCKFREQUENCY 0x84 + +/* 6.3 Interrupt-IN Messages */ +#define RDR_TO_PC_NOTIFYSLOTCHANGE 0x50 +#define RDR_TO_PC_HARDWAREERROR 0x51 + +/* Table 6.2-2 Slot error register when bmCommandStatus = 1 */ +#define CMD_ABORTED 0xFF +#define ICC_MUTE 0xFE +#define XFR_PARITY_ERROR 0xFD +#define XFR_OVERRUN 0xFC +#define HW_ERROR 0xFB +#define BAD_ATR_TS 0xF8 +#define BAD_ATR_TCK 0xF7 +#define ICC_PROTOCOL_NOT_SUPPORTED 0xF6 +#define ICC_CLASS_NOT_SUPPORTED 0xF5 +#define PROCEDURE_BYTE_CONFLICT 0xF4 +#define DEACTIVATED_PROTOCOL 0xF3 +#define BUSY_WITH_AUTO_SEQUENCE 0xF2 +#define PIN_TIMEOUT 0xF0 +#define PIN_CANCELLED 0xEF +#define CMD_SLOT_BUSY 0xE0 + +/* CCID rev 1.1, p.27 */ +#define VOLTS_AUTO 0x00 +#define VOLTS_5_0 0x01 +#define VOLTS_3_0 0x02 +#define VOLTS_1_8 0x03 + +/* 6.3.1 RDR_to_PC_NotifySlotChange */ +#define ICC_NOT_PRESENT 0x00 +#define ICC_PRESENT 0x01 +#define ICC_CHANGE 0x02 +#define ICC_INSERTED_EVENT (ICC_PRESENT+ICC_CHANGE) + +/* Identifies the length of type of subordinate descriptors of a CCID device + * Table 5.1-1 Smart Card Device Class descriptors + */ +struct usb_ccid_class_descriptor { + unsigned char bLength; + unsigned char bDescriptorType; + unsigned short bcdCCID; + unsigned char bMaxSlotIndex; + unsigned char bVoltageSupport; + unsigned long dwProtocols; + unsigned long dwDefaultClock; + unsigned long dwMaximumClock; + unsigned char bNumClockSupported; + unsigned long dwDataRate; + unsigned long dwMaxDataRate; + unsigned char bNumDataRatesSupported; + unsigned long dwMaxIFSD; + unsigned long dwSynchProtocols; + unsigned long dwMechanical; + unsigned long dwFeatures; + unsigned long dwMaxCCIDMessageLength; + unsigned char bClassGetResponse; + unsigned char bClassEnvelope; + unsigned short wLcdLayout; + unsigned char bPINSupport; + unsigned char bMaxCCIDBusySlots; +} __packed; +#endif diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 47d319bd..66a29a91 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -36,7 +36,6 @@ #include #include -#include /* * USB function drivers should return USB_GADGET_DELAYED_STATUS if they @@ -362,13 +361,6 @@ struct usb_composite_dev { /* protects deactivations and delayed_status counts*/ spinlock_t lock; - - /* switch indicating Connect_to_PC App only */ - struct switch_dev sw_connect2pc; - /* current connected state for sw_connected */ - bool connected; - - struct work_struct switch_work; }; extern int usb_string_id(struct usb_composite_dev *c); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 2f2e3dbf..47e84270 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -57,6 +57,7 @@ struct usb_ep; * Note that for writes (IN transfers) some data bytes may still * reside in a device-side FIFO when the request is reported as * complete. + *@udc_priv: Vendor private data in usage by the UDC. * * These are allocated/freed through the endpoint they're used with. The * hardware's driver can add extra per-request data to the memory it returns, @@ -92,6 +93,7 @@ struct usb_request { int status; unsigned actual; + unsigned udc_priv; }; /*-------------------------------------------------------------------------*/ @@ -111,7 +113,6 @@ struct usb_ep_ops { struct usb_request *(*alloc_request) (struct usb_ep *ep, gfp_t gfp_flags); void (*free_request) (struct usb_ep *ep, struct usb_request *req); - int (*queue) (struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags); int (*dequeue) (struct usb_ep *ep, struct usb_request *req); @@ -774,7 +775,6 @@ struct usb_gadget_driver { int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *); void (*disconnect)(struct usb_gadget *); - void (*mute_disconnect)(struct usb_gadget *); void (*suspend)(struct usb_gadget *); void (*resume)(struct usb_gadget *); From 9bd85f6eb174107179d21e983814f557af0b8f20 Mon Sep 17 00:00:00 2001 From: David Hays Date: Wed, 29 May 2013 01:28:26 -0500 Subject: [PATCH 116/117] msm: vigor: bulk update USB gadget CAF Change-Id: I05998d85acb0e9c2729da1a7dbbec38da035bbd3 --- arch/arm/configs/vigor_aosp_defconfig | 26 +- drivers/usb/gadget/Kconfig | 13 + drivers/usb/gadget/android.c | 47 +- drivers/usb/gadget/ci13xxx_udc.c | 14 +- drivers/usb/gadget/f_adb.c | 2 + drivers/usb/gadget/f_projector.c | 889 ++++++++++++++++++++------ drivers/usb/gadget/f_usbnet.c | 840 ------------------------ drivers/usb/gadget/htc_attr.c | 628 +----------------- drivers/usb/gadget/u_xpst.c | 5 +- include/linux/usb/android.h | 1 + 10 files changed, 770 insertions(+), 1695 deletions(-) delete mode 100644 drivers/usb/gadget/f_usbnet.c diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index c6f17b6a..7c678bfe 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -2268,40 +2268,32 @@ CONFIG_USB_GADGET_DUALSPEED=y # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set CONFIG_USB_G_ANDROID=y -# CONFIG_USB_ANDROID_ACM is not set -CONFIG_USB_ANDROID_ADB=y -CONFIG_USB_ANDROID_DIAG=y -CONFIG_USB_ANDROID_MDM9K_DIAG=y -CONFIG_USB_ANDROID_MDM9K_MODEM=y -CONFIG_USB_ANDROID_MASS_STORAGE=y -CONFIG_USB_ANDROID_MTP=y -CONFIG_USB_ANDROID_RNDIS=y -# CONFIG_USB_ANDROID_RMNET is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_MSC_PROFILING is not set +CONFIG_MODEM_SUPPORT=y CONFIG_RMNET_SMD_CTL_CHANNEL="" CONFIG_RMNET_SMD_DATA_CHANNEL="" -# CONFIG_USB_ANDROID_RMNET_SDIO is not set CONFIG_RMNET_SDIO_CTL_CHANNEL=8 CONFIG_RMNET_SDIO_DATA_CHANNEL=8 -CONFIG_USB_ANDROID_RMNET_SMD_SDIO=y CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL=8 CONFIG_RMNET_SMD_SDIO_DATA_CHANNEL=8 CONFIG_RMNET_SDIO_SMD_DATA_CHANNEL="DATA40" # CONFIG_USB_ANDROID_RMNET_CTRL_SMD is not set # CONFIG_USB_F_SERIAL is not set -CONFIG_MODEM_SUPPORT=y -CONFIG_USB_ANDROID_SERIAL=y -CONFIG_USB_ANDROID_PROJECTOR=y -CONFIG_USB_ANDROID_ECM=y # CONFIG_USB_F_SERIAL_SDIO is not set # CONFIG_USB_F_SERIAL_SMD is not set -CONFIG_USB_ANDROID_USBNET=y # CONFIG_USB_CDC_COMPOSITE is not set # CONFIG_USB_G_MULTI is not set # CONFIG_USB_G_HID is not set # CONFIG_USB_G_DBGP is not set # CONFIG_USB_ACCESSORY_DETECT_BY_ADC is not set -# CONFIG_USB_CSW_HACK is not set +CONFIG_USB_CSW_HACK=y CONFIG_USB_GADGET_VERIZON_PRODUCT_ID=y +CONFIG_USB_HTC_SWITCH_STUB=y # # OTG and related infrastructure diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index aa1fa639..6f6eb1fd 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -1090,6 +1090,12 @@ config USB_CSW_HACK This csw hack feature is for increasing the performance of the mass storage +config USB_MSC_PROFILING + bool "USB MSC performance profiling" + help + If you say Y here, support will be added for collecting + Mass-storage performance numbers at the VFS level. + config MODEM_SUPPORT boolean "modem support in generic serial function driver" depends on USB_G_ANDROID @@ -1164,3 +1170,10 @@ config USB_ANDROID_RMNET_CTRL_SMD Data interface used is BAM. endif # USB_GADGET + +config USB_HTC_SWITCH_STUB + depends on USB_G_ANDROID + bool "USB HTC function switch stub" + default n + help + Provide dummy HTC USB function switch sysfs device for libhtc_ril. diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c index 403c3737..f24e74d8 100644 --- a/drivers/usb/gadget/android.c +++ b/drivers/usb/gadget/android.c @@ -2,6 +2,7 @@ * Gadget Driver for Android * * Copyright (C) 2008 Google, Inc. + * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * Author: Mike Lockwood * * This software is licensed under the terms of the GNU General Public @@ -878,14 +879,28 @@ static int mass_storage_function_init(struct android_usb_function *f, struct mass_storage_function_config *config; struct fsg_common *common; int err; + struct android_dev *dev = _android_dev; + int i; config = kzalloc(sizeof(struct mass_storage_function_config), GFP_KERNEL); if (!config) return -ENOMEM; - config->fsg.nluns = 1; - config->fsg.luns[0].removable = 1; + if (dev->pdata->nluns) { + config->fsg.nluns = dev->pdata->nluns; + if (config->fsg.nluns > FSG_MAX_LUNS) + config->fsg.nluns = FSG_MAX_LUNS; + for (i = 0; i < config->fsg.nluns; i++) { + config->fsg.luns[i].cdrom = 0; + config->fsg.luns[i].removable = 1; + config->fsg.luns[i].ro = 0; + } + } else { + /* default value */ + config->fsg.nluns = 1; + config->fsg.luns[0].removable = 1; + } common = fsg_common_init(NULL, cdev, &config->fsg); if (IS_ERR(common)) { @@ -893,13 +908,15 @@ static int mass_storage_function_init(struct android_usb_function *f, return PTR_ERR(common); } - err = sysfs_create_link(&f->dev->kobj, - &common->luns[0].dev.kobj, - "lun"); - if (err) { - fsg_common_release(&common->ref); - kfree(config); - return err; + for (i = 0; i < config->fsg.nluns; i++) { + err = sysfs_create_link(&f->dev->kobj, + &common->luns[i].dev.kobj, + common->luns[i].dev.kobj.name); + if (err) { + fsg_common_release(&common->ref); + kfree(config); + return err; + } } config->common = common; @@ -1323,6 +1340,10 @@ static struct device_attribute *android_usb_attributes[] = { NULL }; +#ifdef CONFIG_USB_HTC_SWITCH_STUB +#include "htc_attr.c" +#endif + /*-------------------------------------------------------------------------*/ /* Composite driver */ @@ -1519,6 +1540,14 @@ static int __devinit android_probe(struct platform_device *pdev) struct android_usb_platform_data *pdata = pdev->dev.platform_data; struct android_dev *dev = _android_dev; +#ifdef CONFIG_USB_HTC_SWITCH_STUB + int err; + err = sysfs_create_group(&pdev->dev.kobj, &htc_attr_group); + if (err) { + pr_err("%s: failed to create HTC USB devices\n", __func__); + } +#endif + dev->pdata = pdata; return 0; diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c index e97602d7..287c9933 100644 --- a/drivers/usb/gadget/ci13xxx_udc.c +++ b/drivers/usb/gadget/ci13xxx_udc.c @@ -878,7 +878,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra) stamp = stamp * 1000000 + tval.tv_usec; scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG, - "%04X\t» %02X %-7.7s %4i «\t%s\n", + "%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); dbg_inc(&dbg_data.idx); @@ -886,7 +886,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra) write_unlock_irqrestore(&dbg_data.lck, flags); if (dbg_data.tty != 0) - pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n", + pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); } @@ -1046,15 +1046,15 @@ static ssize_t show_inters(struct device *dev, struct device_attribute *attr, n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n", isr_statistics.test); - n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n", isr_statistics.ui); - n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n", isr_statistics.uei); - n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n", isr_statistics.pci); - n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n", isr_statistics.uri); - n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n", isr_statistics.sli); n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n", isr_statistics.none); diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c index b85805c0..cae01368 100644 --- a/drivers/usb/gadget/f_adb.c +++ b/drivers/usb/gadget/f_adb.c @@ -446,6 +446,8 @@ static struct miscdevice adb_device = { }; + + static int adb_function_bind(struct usb_configuration *c, struct usb_function *f) { diff --git a/drivers/usb/gadget/f_projector.c b/drivers/usb/gadget/f_projector.c index 5d8feda9..517fdba0 100644 --- a/drivers/usb/gadget/f_projector.c +++ b/drivers/usb/gadget/f_projector.c @@ -21,9 +21,18 @@ #include #include #include +#include +#include +#include #include - #include +#include +#include + +#ifdef DUMMY_DISPLAY_MODE +#include "f_projector_debug.h" +#endif + #ifdef DBG #undef DBG #endif @@ -34,25 +43,41 @@ #define DBG(x...) printk(KERN_INFO x) #endif +#ifdef VDBG +#undef VDBG +#endif + +#if 1 +#define VDBG(x...) do {} while (0) +#else +#define VDBG(x...) printk(KERN_INFO x) +#endif + + /*16KB*/ #define TXN_MAX 16384 #define RXN_MAX 4096 -/* number of rx and tx requests to allocate */ +/* number of rx requests to allocate */ #define PROJ_RX_REQ_MAX 4 -#if 0 -#define PROJ_TX_REQ_MAX 115 /*for resolution 1280*736*2 / 16k */ -#define PROJ_TX_REQ_MAX 75 /*for resolution 1024*600*2 / 16k */ -#define PROJ_TX_REQ_MAX 56 /*for 8k resolution 480*800*2 / 16k */ -#endif + +#define DEFAULT_PROJ_WIDTH 480 +#define DEFAULT_PROJ_HEIGHT 800 + +#define TOUCH_WIDTH 480 +#define TOUCH_HEIGHT 800 #define BITSPIXEL 16 #define PROJECTOR_FUNCTION_NAME "projector" +#define htc_mode_info(fmt, args...) \ + printk(KERN_INFO "[htc_mode] " pr_fmt(fmt), ## args) + static struct wake_lock prj_idle_wake_lock; static int keypad_code[] = {KEY_WAKEUP, 0, 0, 0, KEY_HOME, KEY_MENU, KEY_BACK}; -static const char shortname[] = "android_projector"; +static const char cand_shortname[] = "htc_cand"; +static const char htcmode_shortname[] = "htcmode"; struct projector_dev { struct usb_function function; @@ -62,6 +87,9 @@ struct projector_dev { struct usb_ep *ep_in; struct usb_ep *ep_out; + struct usb_endpoint_descriptor *in; + struct usb_endpoint_descriptor *out; + int online; int error; @@ -82,7 +110,20 @@ struct projector_dev { struct input_dev *keypad_input; struct input_dev *touch_input; char *fbaddr; - struct platform_device *pdev; + + atomic_t cand_online; + struct switch_dev cand_sdev; + struct switch_dev htcmode_sdev; + struct work_struct notifier_work; + struct work_struct htcmode_notifier_work; + + struct workqueue_struct *wq_display; + struct work_struct send_fb_work; + int start_send_fb; + + /* HTC Mode Protocol Info */ + struct htcmode_protocol *htcmode_proto; + u8 is_htcmode; }; static struct usb_interface_descriptor projector_interface_desc = { @@ -155,8 +196,31 @@ static struct usb_gadget_strings *projector_strings[] = { &projector_string_table, NULL, }; -static struct projector_dev _projector_dev; -struct device prj_dev; + +static struct projector_dev *projector_dev = NULL; + +struct size { + int w; + int h; +}; + +enum { + NOT_ON_AUTOBOT, + DOCK_ON_AUTOBOT, + HTC_MODE_RUNNING +}; +/* the value of htc_mode_status should be one of above status */ +static atomic_t htc_mode_status = ATOMIC_INIT(0); + +static void usb_setup_andriod_projector(struct work_struct *work); +static DECLARE_WORK(conf_usb_work, usb_setup_andriod_projector); + + +static void usb_setup_andriod_projector(struct work_struct *work) +{ + android_switch_htc_mode(); + htc_mode_enable(1); +} static inline struct projector_dev *proj_func_to_dev(struct usb_function *f) { @@ -188,21 +252,6 @@ static void projector_request_free(struct usb_request *req, struct usb_ep *ep) } } -static inline int _lock(atomic_t *excl) -{ - if (atomic_inc_return(excl) == 1) { - return 0; - } else { - atomic_dec(excl); - return -1; - } -} - -static inline void _unlock(atomic_t *excl) -{ - atomic_dec(excl); -} - /* add a request to the tail of a list */ static void proj_req_put(struct projector_dev *dev, struct list_head *head, struct usb_request *req) @@ -231,20 +280,20 @@ static struct usb_request *proj_req_get(struct projector_dev *dev, struct list_h return req; } -static void projector_queue_out(struct projector_dev *ctxt) +static void projector_queue_out(struct projector_dev *dev) { int ret; struct usb_request *req; /* if we have idle read requests, get them queued */ - while ((req = proj_req_get(ctxt, &ctxt->rx_idle))) { + while ((req = proj_req_get(dev, &dev->rx_idle))) { req->length = RXN_MAX; - DBG("%s: queue %p\n", __func__, req); - ret = usb_ep_queue(ctxt->ep_out, req, GFP_ATOMIC); + VDBG("%s: queue %p\n", __func__, req); + ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC); if (ret < 0) { - DBG("projector: failed to queue out req (%d)\n", ret); - ctxt->error = 1; - proj_req_put(ctxt, &ctxt->rx_idle, req); + VDBG("projector: failed to queue out req (%d)\n", ret); + dev->error = 1; + proj_req_put(dev, &dev->rx_idle, req); break; } } @@ -305,10 +354,10 @@ static void projector_send_touch_event(struct projector_dev *dev, } /* key code: 4 -> home, 5-> menu, 6 -> back, 0 -> system wake */ -static void projector_send_Key_event(struct projector_dev *ctxt, +static void projector_send_Key_event(struct projector_dev *dev, int iKeycode) { - struct input_dev *kdev = ctxt->keypad_input; + struct input_dev *kdev = dev->keypad_input; printk(KERN_INFO "%s keycode %d\n", __func__, iKeycode); /* ics will use default Generic.kl to translate linux keycode WAKEUP @@ -325,60 +374,146 @@ static void projector_send_Key_event(struct projector_dev *ctxt, input_sync(kdev); } -static void send_fb(struct projector_dev *ctxt) +#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) +extern char *get_fb_addr(void); +#endif + +static void send_fb(struct projector_dev *dev) { struct usb_request *req; - char *frame; int xfer; - int count = ctxt->framesize; + int count = dev->framesize; +#ifdef DUMMY_DISPLAY_MODE + unsigned short *frame; +#else + char *frame; +#endif - if (msmfb_get_fb_area()) - frame = (ctxt->fbaddr + ctxt->framesize); - else - frame = ctxt->fbaddr; + +#ifdef DUMMY_DISPLAY_MODE + frame = test_frame; +#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + frame = get_fb_addr(); +#else + if (msmfb_get_fb_area()) + frame = (dev->fbaddr + dev->framesize); + else + frame = dev->fbaddr; +#endif + if (frame == NULL) + return; while (count > 0) { - req = proj_req_get(ctxt, &ctxt->tx_idle); + req = proj_req_get(dev, &dev->tx_idle); if (req) { xfer = count > TXN_MAX? TXN_MAX : count; req->length = xfer; memcpy(req->buf, frame, xfer); - if (usb_ep_queue(ctxt->ep_in, req, GFP_ATOMIC) < 0) { - proj_req_put(ctxt, &ctxt->tx_idle, req); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); printk(KERN_WARNING "%s: failed to queue req %p\n", __func__, req); break; } + + count -= xfer; +#ifdef DUMMY_DISPLAY_MODE + frame += xfer/2; +#else + frame += xfer; +#endif + } else { + printk(KERN_ERR "send_fb: no req to send\n"); + break; + } + } +} + +static void send_fb2(struct projector_dev *dev) +{ + struct usb_request *req; + int xfer; + +#ifdef DUMMY_DISPLAY_MODE + unsigned short *frame; + int count = dev->framesize; +#else + char *frame; + int count = dev->htcmode_proto->server_info.width * + dev->htcmode_proto->server_info.height * (BITSPIXEL / 8); +#endif + +#ifdef DUMMY_DISPLAY_MODE + frame = test_frame; +#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + frame = get_fb_addr(); +#else + if (msmfb_get_fb_area()) + frame = (dev->fbaddr + dev->framesize); + else + frame = dev->fbaddr; +#endif + if (frame == NULL) + return; + + while (count > 0 && dev->online) { + + while (!(req = proj_req_get(dev, &dev->tx_idle))) { + msleep(1); + + if (!dev->online) + break; + } + + if (req) { + xfer = count > TXN_MAX? TXN_MAX : count; + req->length = xfer; + memcpy(req->buf, frame, xfer); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); + printk(KERN_WARNING "%s: failed to queue req" + " %p\n", __func__, req); + break; + } count -= xfer; +#ifdef DUMMY_DISPLAY_MODE + frame += xfer/2; +#else frame += xfer; +#endif } else { printk(KERN_ERR "send_fb: no req to send\n"); break; } } } -static void send_info(struct projector_dev *ctxt) + +void send_fb_do_work(struct work_struct *work) +{ + struct projector_dev *dev = projector_dev; + while (dev->start_send_fb) { + send_fb2(dev); + msleep(1); + } +} + + + +static void send_info(struct projector_dev *dev) { struct usb_request *req; - req = proj_req_get(ctxt, &ctxt->tx_idle); + req = proj_req_get(dev, &dev->tx_idle); if (req) { req->length = 20; memcpy(req->buf, "okay", 4); - memcpy(req->buf + 4, &ctxt->bitsPixel, 4); - #if defined(CONFIG_MACH_PARADISE) - if (machine_is_paradise()) { - ctxt->framesize = 320 * 480 * 2; - printk(KERN_INFO "send_info: framesize %d\n", - ctxt->framesize); - } - #endif - memcpy(req->buf + 8, &ctxt->framesize, 4); - memcpy(req->buf + 12, &ctxt->width, 4); - memcpy(req->buf + 16, &ctxt->height, 4); - if (usb_ep_queue(ctxt->ep_in, req, GFP_ATOMIC) < 0) { - proj_req_put(ctxt, &ctxt->tx_idle, req); + memcpy(req->buf + 4, &dev->bitsPixel, 4); + memcpy(req->buf + 8, &dev->framesize, 4); + memcpy(req->buf + 12, &dev->width, 4); + memcpy(req->buf + 16, &dev->height, 4); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); printk(KERN_WARNING "%s: failed to queue req %p\n", __func__, req); } @@ -386,81 +521,263 @@ static void send_info(struct projector_dev *ctxt) printk(KERN_INFO "%s: no req to send\n", __func__); } -static void projector_get_msmfb(struct projector_dev *ctxt) + +static void send_server_info(struct projector_dev *dev) +{ + struct usb_request *req; + + req = proj_req_get(dev, &dev->tx_idle); + if (req) { + req->length = sizeof(struct msm_server_info); + memcpy(req->buf, &dev->htcmode_proto->server_info, req->length); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); + printk(KERN_WARNING "%s: failed to queue req %p\n", + __func__, req); + } + } else { + printk(KERN_INFO "%s: no req to send\n", __func__); + } +} + +static void send_server_nonce(struct projector_dev *dev) +{ + struct usb_request *req; + int nonce[NONCE_SIZE]; + int i = 0; + + req = proj_req_get(dev, &dev->tx_idle); + if (req) { + req->length = NONCE_SIZE * sizeof(int); + for (i = 0; i < NONCE_SIZE; i++) + nonce[i] = get_random_int(); + memcpy(req->buf, nonce, req->length); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); + printk(KERN_WARNING "%s: failed to queue req %p\n", + __func__, req); + } + } else { + printk(KERN_INFO "%s: no req to send\n", __func__); + } +} + +struct size rotate(struct size v) +{ + struct size r; + r.w = v.h; + r.h = v.w; + return r; +} + +static struct size get_projection_size(struct projector_dev *dev, struct msm_client_info *client_info) +{ + int server_width = 0; + int server_height = 0; + struct size client; + struct size server; + struct size ret; + int perserve_aspect_ratio = client_info->display_conf & (1 << 0); + int server_orientation = 0; + int client_orientation = (client_info->width > client_info->height); + int align_w = 0; + + server_width = dev->width; + server_height = dev->height; + + server_orientation = (server_width > server_height); + + printk(KERN_INFO "%s(): perserve_aspect_ratio= %d\n", __func__, perserve_aspect_ratio); + + client.w = client_info->width; + client.h = client_info->height; + server.w = server_width; + server.h = server_height; + + if (server_orientation != client_orientation) + client = rotate(client); + + align_w = client.h * server.w > server.h * client.w; + + if (perserve_aspect_ratio) { + if (align_w) { + ret.w = client.w; + ret.h = (client.w * server.h) / server.w; + } else { + ret.w = (client.h * server.w) / server.h; + ret.h = client.h; + } + + ret.w = round_down(ret.w, 32); + } else { + ret = client; + } + + printk(KERN_INFO "projector size(w=%d, h=%d)\n", ret.w, ret.h); + + return ret; +} + + +static void projector_get_msmfb(struct projector_dev *dev) { struct msm_fb_info fb_info; msmfb_get_var(&fb_info); - ctxt->bitsPixel = BITSPIXEL; - ctxt->width = fb_info.xres; - ctxt->height = fb_info.yres; - ctxt->fbaddr = fb_info.fb_addr; - printk(KERN_INFO "projector: width %d, height %d\n", - fb_info.xres, fb_info.yres); + dev->bitsPixel = BITSPIXEL; + dev->width = fb_info.xres; + dev->height = fb_info.yres; +#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + dev->fbaddr = get_fb_addr(); +#else + dev->fbaddr = fb_info.fb_addr; +#endif + dev->framesize = dev->width * dev->height * (dev->bitsPixel / 8); + printk(KERN_INFO "projector: width %d, height %d framesize %d, %p\n", + fb_info.xres, fb_info.yres, dev->framesize, dev->fbaddr); +} + +/* + * Handle HTC Mode specific messages and return 1 if message has been handled + */ +static int projector_handle_htcmode_msg(struct projector_dev *dev, struct usb_request *req) +{ + unsigned char *data = req->buf; + int handled = 1; + struct size projector_size; + + if ((data[0] == CLIENT_INFO_MESGID) && (req->actual == sizeof(struct msm_client_info))) { + memcpy(&dev->htcmode_proto->client_info, req->buf, sizeof(struct msm_client_info)); + + projector_size = get_projection_size(dev, &dev->htcmode_proto->client_info); + projector_get_msmfb(dev); + + dev->htcmode_proto->server_info.mesg_id = SERVER_INFO_MESGID; + dev->htcmode_proto->server_info.width = projector_size.w; + dev->htcmode_proto->server_info.height = projector_size.h; + dev->htcmode_proto->server_info.pixel_format = PIXEL_FORMAT_RGB565; + dev->htcmode_proto->server_info.ctrl_conf = CTRL_CONF_TOUCH_EVENT_SUPPORTED | + CTRL_CONF_NUM_SIMULTANEOUS_TOUCH; + send_server_info(dev); - ctxt->framesize = (ctxt->width)*(ctxt->height)*2; - printk(KERN_INFO "projector: width %d, height %d %d\n", - fb_info.xres, fb_info.yres, ctxt->framesize); + if (dev->htcmode_proto->version >= 0x0005) + send_server_nonce(dev); + } else if (dev->htcmode_proto->version >= 0x0005 && + data[0] == AUTH_CLIENT_NONCE_MESGID) { + /* TODO: Future extension */ + } else if (!strncmp("startfb", data, 7)) { + dev->start_send_fb = true; + queue_work(dev->wq_display, &dev->send_fb_work); + + dev->frame_count++; + + if (atomic_inc_return(&htc_mode_status) != HTC_MODE_RUNNING) + atomic_dec(&htc_mode_status); + + htc_mode_info("startfb current htc_mode_status = %d\n", + atomic_read(&htc_mode_status)); + schedule_work(&dev->htcmode_notifier_work); + + /* 30s send system wake code */ + if (dev->frame_count == 30 * 30) { + projector_send_Key_event(dev, 0); + dev->frame_count = 0; + } + } else if (!strncmp("endfb", data, 5)) { + dev->start_send_fb = false; + if (atomic_dec_return(&htc_mode_status) != DOCK_ON_AUTOBOT) + atomic_inc(&htc_mode_status); + htc_mode_info("endfb current htc_mode_status = %d\n", + atomic_read(&htc_mode_status)); + schedule_work(&dev->htcmode_notifier_work); + } else if (!strncmp("startcand", data, 9)) { + atomic_set(&dev->cand_online, 1); + htc_mode_info("startcand %d\n", atomic_read(&dev->cand_online)); + + schedule_work(&dev->notifier_work); + } else if (!strncmp("endcand", data, 7)) { + atomic_set(&dev->cand_online, 0); + htc_mode_info("endcand %d\n", atomic_read(&dev->cand_online)); + + schedule_work(&dev->notifier_work); + } else { + handled = 0; + } + + return handled; } static void projector_complete_in(struct usb_ep *ep, struct usb_request *req) { - struct projector_dev *dev = &_projector_dev; + struct projector_dev *dev = projector_dev; proj_req_put(dev, &dev->tx_idle, req); } static void projector_complete_out(struct usb_ep *ep, struct usb_request *req) { - struct projector_dev *ctxt = &_projector_dev; + struct projector_dev *dev = projector_dev; unsigned char *data = req->buf; int mouse_data[3] = {0, 0, 0}; int i; - DBG("%s: status %d, %d bytes\n", __func__, + int handled = 0; + VDBG("%s: status %d, %d bytes\n", __func__, req->status, req->actual); if (req->status != 0) { - ctxt->error = 1; - proj_req_put(ctxt, &ctxt->rx_idle, req); + dev->error = 1; + proj_req_put(dev, &dev->rx_idle, req); return ; } - /* for mouse event type, 1 :move, 2:down, 3:up */ - mouse_data[0] = *((int *)(req->buf)); - - if (!strncmp("init", data, 4)) { - if (!ctxt->init_done) { - projector_get_msmfb(ctxt); - ctxt->init_done = 1; - } - send_info(ctxt); - /* system wake code */ - projector_send_Key_event(ctxt, 0); - } else if (*data == ' ') { - send_fb(ctxt); - ctxt->frame_count++; - /* 30s send system wake code */ - if (ctxt->frame_count == 30 * 30) { - projector_send_Key_event(ctxt, 0); - ctxt->frame_count = 0; - } - } else if (mouse_data[0] > 0) { - if (mouse_data[0] < 4) { - for (i = 0; i < 3; i++) - mouse_data[i] = *(((int *)(req->buf))+i); - projector_send_touch_event(ctxt, - mouse_data[0], mouse_data[1], mouse_data[2]); - } else { - projector_send_Key_event(ctxt, mouse_data[0]); - printk(KERN_INFO "projector: Key command data %02x, keycode %d\n", - *((char *)(req->buf)), mouse_data[0]); - } - } else if (mouse_data[0] != 0) - printk(KERN_ERR "projector: Unknow command data %02x, mouse %d,%d,%d\n", - *((char *)(req->buf)), mouse_data[0], mouse_data[1], mouse_data[2]); - - proj_req_put(ctxt, &ctxt->rx_idle, req); - projector_queue_out(ctxt); + if (dev->is_htcmode) + handled = projector_handle_htcmode_msg(dev, req); + + if (!handled) { + /* for mouse event type, 1 :move, 2:down, 3:up */ + mouse_data[0] = *((int *)(req->buf)); + + if (!strncmp("init", data, 4)) { + + dev->init_done = 1; + dev->bitsPixel = BITSPIXEL; + dev->width = DEFAULT_PROJ_WIDTH; + dev->height = DEFAULT_PROJ_HEIGHT; + dev->framesize = dev->width * dev->height * (BITSPIXEL / 8); + + send_info(dev); + /* system wake code */ + projector_send_Key_event(dev, 0); + + atomic_set( &htc_mode_status, HTC_MODE_RUNNING); + htc_mode_info("init current htc_mode_status = %d\n", + atomic_read(&htc_mode_status)); + schedule_work(&dev->htcmode_notifier_work); + } else if (*data == ' ') { + send_fb(dev); + dev->frame_count++; + /* 30s send system wake code */ + if (dev->frame_count == 30 * 30) { + projector_send_Key_event(dev, 0); + dev->frame_count = 0; + } + } else if (mouse_data[0] > 0) { + if (mouse_data[0] < 4) { + for (i = 0; i < 3; i++) + mouse_data[i] = *(((int *)(req->buf))+i); + projector_send_touch_event(dev, + mouse_data[0], mouse_data[1], mouse_data[2]); + } else { + projector_send_Key_event(dev, mouse_data[0]); + printk(KERN_INFO "projector: Key command data %02x, keycode %d\n", + *((char *)(req->buf)), mouse_data[0]); + } + } else if (mouse_data[0] != 0) + printk(KERN_ERR "projector: Unknow command data %02x, mouse %d,%d,%d\n", + *((char *)(req->buf)), mouse_data[0], mouse_data[1], mouse_data[2]); + } + proj_req_put(dev, &dev->rx_idle, req); + projector_queue_out(dev); wake_lock_timeout(&prj_idle_wake_lock, HZ / 2); } @@ -529,7 +846,7 @@ projector_function_bind(struct usb_configuration *c, struct usb_function *f) int ret; dev->cdev = cdev; - DBG("projector_function_bind dev: %p\n", dev); + DBG("%s\n", __func__); /* allocate interface ID(s) */ id = usb_interface_id(c, f); @@ -557,70 +874,53 @@ projector_function_bind(struct usb_configuration *c, struct usb_function *f) return 0; } -static void -projector_function_unbind(struct usb_configuration *c, struct usb_function *f) -{ - struct projector_dev *dev = proj_func_to_dev(f); - struct usb_request *req; - - while ((req = proj_req_get(dev, &dev->tx_idle))) - projector_request_free(req, dev->ep_in); - while ((req = proj_req_get(dev, &dev->rx_idle))) - projector_request_free(req, dev->ep_out); - - dev->online = 0; - dev->error = 1; - if (dev->touch_input) - input_unregister_device(dev->touch_input); - if (dev->keypad_input) - input_unregister_device(dev->keypad_input); -} static int projector_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { - struct projector_dev *dev = proj_func_to_dev(f); + struct projector_dev *dev = proj_func_to_dev(f); struct usb_composite_dev *cdev = f->config->cdev; + struct android_dev *adev = _android_dev; + struct android_usb_function *af; int ret; DBG("%s intf: %d alt: %d\n", __func__, intf, alt); - ret = usb_ep_enable(dev->ep_in, - ep_choose(cdev->gadget, + + dev->in = ep_choose(cdev->gadget, &projector_highspeed_in_desc, - &projector_fullspeed_in_desc)); + &projector_fullspeed_in_desc); + + dev->out = ep_choose(cdev->gadget, + &projector_highspeed_out_desc, + &projector_fullspeed_out_desc); + + ret = usb_ep_enable(dev->ep_in, dev->in); if (ret) return ret; - ret = usb_ep_enable(dev->ep_out, - ep_choose(cdev->gadget, - &projector_highspeed_out_desc, - &projector_fullspeed_out_desc)); + + ret = usb_ep_enable(dev->ep_out,dev->out); if (ret) { usb_ep_disable(dev->ep_in); return ret; } - dev->online = 1; + + dev->online = 0; + list_for_each_entry(af, &adev->enabled_functions, enabled_list) { + if (!strcmp(af->name, f->name)) { + dev->online = 1; + break; + } + } projector_queue_out(dev); return 0; } -static void projector_function_disable(struct usb_function *f) -{ - struct projector_dev *dev = proj_func_to_dev(f); - - DBG("projector_function_disable\n"); - dev->online = 0; - dev->error = 1; - usb_ep_disable(dev->ep_in); - usb_ep_disable(dev->ep_out); - - VDBG(dev->cdev, "%s disabled\n", dev->function.name); -} static int projector_touch_init(struct projector_dev *dev) { - int x = dev->width; - int y = dev->height; + int x = TOUCH_WIDTH; + int y = TOUCH_HEIGHT; int ret = 0; struct input_dev *tdev = dev->touch_input; @@ -639,33 +939,6 @@ static int projector_touch_init(struct projector_dev *dev) set_bit(BTN_2, tdev->keybit); set_bit(EV_ABS, tdev->evbit); - if (x == 0) { - printk(KERN_ERR "%s: x=0\n", __func__); - #if defined(CONFIG_ARCH_QSD8X50) - x = 480; - #elif defined(CONFIG_MACH_PARADISE) - if (machine_is_paradise()) - x = 240; - else - x = 320; - #else - x = 320; - #endif - } - - if (y == 0) { - printk(KERN_ERR "%s: y=0\n", __func__); - #if defined(CONFIG_ARCH_QSD8X50) - y = 800; - #elif defined(CONFIG_MACH_PARADISE) - if (machine_is_paradise()) - y = 400; - else - y = 480; - #else - y = 480; - #endif - } /* Set input parameters boundary. */ input_set_abs_params(tdev, ABS_X, 0, x, 0, 0); input_set_abs_params(tdev, ABS_Y, 0, y, 0, 0); @@ -750,13 +1023,128 @@ static ssize_t show_enable(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(enable, 0664, show_enable, store_enable); #endif -static int projector_bind_config(struct usb_configuration *c) + +static void cand_online_notify(struct work_struct *w) +{ + struct projector_dev *dev = container_of(w, + struct projector_dev, notifier_work); + DBG("%s\n", __func__); + switch_set_state(&dev->cand_sdev, atomic_read(&dev->cand_online)); +} + +static void htcmode_status_notify(struct work_struct *w) +{ + struct projector_dev *dev = container_of(w, + struct projector_dev, htcmode_notifier_work); + DBG("%s\n", __func__); + switch_set_state(&dev->htcmode_sdev, atomic_read(&htc_mode_status)); +} + +/* + * 1: enable; 0: disable + */ +void htc_mode_enable(int enable) +{ + htc_mode_info("%s = %d, current htc_mode_status = %d\n", + __func__, enable, atomic_read(&htc_mode_status)); + + if (enable) + atomic_set(&htc_mode_status, DOCK_ON_AUTOBOT); + else + atomic_set(&htc_mode_status, NOT_ON_AUTOBOT); + + htcmode_status_notify(&projector_dev->htcmode_notifier_work); +} + +int check_htc_mode_status(void) +{ + return atomic_read(&htc_mode_status); +} + +static ssize_t print_cand_switch_name(struct switch_dev *sdev, char *buf) +{ + return sprintf(buf, "%s\n", cand_shortname); +} + +static ssize_t print_cand_switch_state(struct switch_dev *cand_sdev, char *buf) +{ + struct projector_dev *dev = container_of(cand_sdev, + struct projector_dev, cand_sdev); + return sprintf(buf, "%s\n", (atomic_read(&dev->cand_online) ? + "online" : "offline")); +} + +static ssize_t print_htcmode_switch_name(struct switch_dev *sdev, char *buf) +{ + return sprintf(buf, "%s\n", htcmode_shortname); +} + +static ssize_t print_htcmode_switch_state(struct switch_dev *htcmode_sdev, char *buf) +{ + return sprintf(buf, "%s\n", (atomic_read(&htc_mode_status)==HTC_MODE_RUNNING ? + "projecting" : (atomic_read(&htc_mode_status)==DOCK_ON_AUTOBOT ? "online" : "offline"))); +} + + +static void projector_function_disable(struct usb_function *f) +{ + struct projector_dev *dev = proj_func_to_dev(f); + + DBG("%s\n", __func__); + + dev->start_send_fb = false; + dev->online = 0; + dev->error = 1; + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + + atomic_set(&dev->cand_online, 0); + schedule_work(&dev->notifier_work); + + VDBG(dev->cdev, "%s disabled\n", dev->function.name); +} + + +static void +projector_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct projector_dev *dev = proj_func_to_dev(f); + struct usb_request *req; + + DBG("%s\n", __func__); + + destroy_workqueue(dev->wq_display); + + while ((req = proj_req_get(dev, &dev->tx_idle))) + projector_request_free(req, dev->ep_in); + while ((req = proj_req_get(dev, &dev->rx_idle))) + projector_request_free(req, dev->ep_out); + + dev->online = 0; + dev->error = 1; + dev->is_htcmode = 0; + + if (dev->touch_input) { + input_unregister_device(dev->touch_input); + input_free_device(dev->touch_input); + } + if (dev->keypad_input) { + input_unregister_device(dev->keypad_input); + input_free_device(dev->keypad_input); + } + +} + + +static int projector_bind_config(struct usb_configuration *c, + struct htcmode_protocol *config) { - struct projector_dev *dev = &_projector_dev; + struct projector_dev *dev; struct msm_fb_info fb_info; int ret = 0; - printk(KERN_INFO "projector_bind_config\n"); + DBG("%s\n", __func__); + dev = projector_dev; if (projector_string_defs[0].id == 0) { ret = usb_string_id(c->cdev); @@ -780,40 +1168,147 @@ static int projector_bind_config(struct usb_configuration *c) dev->bitsPixel = BITSPIXEL; dev->width = fb_info.xres; dev->height = fb_info.yres; +#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + dev->fbaddr = get_fb_addr(); +#else dev->fbaddr = fb_info.fb_addr; - +#endif dev->rx_req_count = PROJ_RX_REQ_MAX; dev->tx_req_count = (dev->width * dev->height * 2 / TXN_MAX) + 1; printk(KERN_INFO "[USB][Projector]resolution: %u*%u" ", rx_cnt: %u, tx_cnt:%u\n", dev->width, dev->height, dev->rx_req_count, dev->tx_req_count); - if (projector_touch_init(dev) < 0) - goto err; + goto err_free; if (projector_keypad_init(dev) < 0) - goto err; + goto err_free; + spin_lock_init(&dev->lock); + INIT_LIST_HEAD(&dev->rx_idle); + INIT_LIST_HEAD(&dev->tx_idle); ret = usb_add_function(c, &dev->function); if (ret) - goto err; + goto err_free; + + dev->wq_display = create_singlethread_workqueue("projector_mode"); + if (!dev->wq_display) + goto err_free_wq; + + workqueue_set_max_active(dev->wq_display,1); + + INIT_WORK(&dev->send_fb_work, send_fb_do_work); + + dev->init_done = 0; + dev->frame_count = 0; + dev->is_htcmode = 0; + dev->htcmode_proto = config; + dev->htcmode_proto->server_info.height = DEFAULT_PROJ_HEIGHT; + dev->htcmode_proto->server_info.width = DEFAULT_PROJ_WIDTH; + dev->htcmode_proto->client_info.display_conf = 0; return 0; -err: - printk(KERN_ERR "projector gadget driver failed to initialize\n"); + +err_free_wq: + destroy_workqueue(dev->wq_display); +err_free: + printk(KERN_ERR "projector gadget driver failed to initialize, err=%d\n", ret); return ret; } static int projector_setup(void) { - struct projector_dev *dev = &_projector_dev; - dev->init_done = 0; - dev->frame_count = 0; - wake_lock_init(&prj_idle_wake_lock, WAKE_LOCK_IDLE, "prj_idle_lock"); + struct projector_dev *dev; + int ret = 0; - spin_lock_init(&dev->lock); - INIT_LIST_HEAD(&dev->rx_idle); - INIT_LIST_HEAD(&dev->tx_idle); + DBG("%s\n", __func__); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + projector_dev = dev; + + INIT_WORK(&dev->notifier_work, cand_online_notify); + INIT_WORK(&dev->htcmode_notifier_work, htcmode_status_notify); + + dev->cand_sdev.name = cand_shortname; + dev->cand_sdev.print_name = print_cand_switch_name; + dev->cand_sdev.print_state = print_cand_switch_state; + ret = switch_dev_register(&dev->cand_sdev); + if (ret < 0) { + printk(KERN_ERR "usb cand_sdev switch_dev_register register fail\n"); + goto err_free; + } + + dev->htcmode_sdev.name = htcmode_shortname; + dev->htcmode_sdev.print_name = print_htcmode_switch_name; + dev->htcmode_sdev.print_state = print_htcmode_switch_state; + ret = switch_dev_register(&dev->htcmode_sdev); + if (ret < 0) { + printk(KERN_ERR "usb htcmode_sdev switch_dev_register register fail\n"); + goto err_unregister_cand; + } + + wake_lock_init(&prj_idle_wake_lock, WAKE_LOCK_IDLE, "prj_idle_lock"); return 0; + +err_unregister_cand: + switch_dev_unregister(&dev->cand_sdev); +err_free: + kfree(dev); + printk(KERN_ERR "projector gadget driver failed to initialize, err=%d\n", ret); + return ret; + } +static void projector_cleanup(void) +{ + struct projector_dev *dev; + + dev = projector_dev; + + switch_dev_unregister(&dev->cand_sdev); + switch_dev_unregister(&dev->htcmode_sdev); + + kfree(dev); +} + +#ifdef CONFIG_USB_ANDROID_PROJECTOR_HTC_MODE +static int projector_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) +{ + int value = -EOPNOTSUPP; + + if (((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) && + (ctrl->bRequest == HTC_MODE_CONTROL_REQ)) { + if (check_htc_mode_status() == NOT_ON_AUTOBOT) + schedule_work(&conf_usb_work); + else { + if (projector_dev) { + projector_dev->htcmode_proto->version = le16_to_cpu(ctrl->wValue); + /* + * 0x0034 is for Autobot. It is not a correct HTC mode version. + */ + if (projector_dev->htcmode_proto->version == 0x0034) + projector_dev->htcmode_proto->version = 0x0003; + projector_dev->is_htcmode = 1; + printk(KERN_INFO "HTC Mode version = 0x%04X\n", projector_dev->htcmode_proto->version); + } else { + printk(KERN_ERR "%s: projector_dev is NULL!!", __func__); + } + } + value = 0; + } + + if (value >= 0) { + cdev->req->zero = 0; + cdev->req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (value < 0) + printk(KERN_ERR "%s setup response queue error\n", + __func__); + } + + return value; +} +#endif diff --git a/drivers/usb/gadget/f_usbnet.c b/drivers/usb/gadget/f_usbnet.c deleted file mode 100644 index 1945b61c..00000000 --- a/drivers/usb/gadget/f_usbnet.c +++ /dev/null @@ -1,840 +0,0 @@ -/* - * Gadget Driver for Motorola USBNet - * - * Copyright (C) 2009 Motorola, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* - * Macro Defines - */ - -#define EP0_BUFSIZE 256 -#define USBNET_FUNCTION_NAME "usbnet" - -/* Vendor Request to config IP */ -#define USBNET_SET_IP_ADDRESS 0x05 -#define USBNET_SET_SUBNET_MASK 0x06 -#define USBNET_SET_HOST_IP 0x07 - -/* Linux Network Interface */ -#define USB_MTU 1536 -#define MAX_BULK_TX_REQ_NUM 8 -#define MAX_BULK_RX_REQ_NUM 8 -#define MAX_INTR_RX_REQ_NUM 8 -#define STRING_INTERFACE 0 - -struct usbnet_context { - spinlock_t lock; /* For RX/TX list */ - struct net_device *dev; - - struct usb_gadget *gadget; - - struct usb_ep *bulk_in; - struct usb_ep *bulk_out; - struct usb_ep *intr_out; - u16 config; /* current USB config w_value */ - - struct list_head rx_reqs; - struct list_head tx_reqs; - - struct net_device_stats stats; - struct work_struct usbnet_config_wq; - u32 ip_addr; - u32 subnet_mask; - u32 router_ip; - u32 iff_flag; -}; - - -struct usbnet_device { - struct usb_function function; - struct usb_composite_dev *cdev; - struct usbnet_context *net_ctxt; -}; - -/* static strings, in UTF-8 */ -static struct usb_string usbnet_string_defs[] = { - [STRING_INTERFACE].s = "HTC Test Command", - { /* ZEROES END LIST */ }, -}; - -static struct usb_gadget_strings usbnet_string_table = { - .language = 0x0409, /* en-us */ - .strings = usbnet_string_defs, -}; - -static struct usb_gadget_strings *usbnet_strings[] = { - &usbnet_string_table, - NULL, -}; - - - -/* There is only one interface. */ - -static struct usb_interface_descriptor usbnet_intf_desc = { - .bLength = sizeof usbnet_intf_desc, - .bDescriptorType = USB_DT_INTERFACE, - - .bNumEndpoints = 3, - .bInterfaceClass = 0x02, - .bInterfaceSubClass = 0x0a, - .bInterfaceProtocol = 0x01, -}; - - -static struct usb_endpoint_descriptor usbnet_fs_bulk_in_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_IN, - .bmAttributes = USB_ENDPOINT_XFER_BULK, -}; - -static struct usb_endpoint_descriptor usbnet_fs_bulk_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_BULK, -}; - -static struct usb_endpoint_descriptor fs_intr_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_INT, - .bInterval = 1, -}; - -static struct usb_descriptor_header *fs_function[] = { - (struct usb_descriptor_header *) &usbnet_intf_desc, - (struct usb_descriptor_header *) &usbnet_fs_bulk_in_desc, - (struct usb_descriptor_header *) &usbnet_fs_bulk_out_desc, - (struct usb_descriptor_header *) &fs_intr_out_desc, - NULL, -}; - -static struct usb_endpoint_descriptor usbnet_hs_bulk_in_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_IN, - .bmAttributes = USB_ENDPOINT_XFER_BULK, - .wMaxPacketSize = __constant_cpu_to_le16(512), - .bInterval = 0, -}; - -static struct usb_endpoint_descriptor usbnet_hs_bulk_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_BULK, - .wMaxPacketSize = __constant_cpu_to_le16(512), - .bInterval = 0, -}; - -static struct usb_endpoint_descriptor hs_intr_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = __constant_cpu_to_le16(64), - .bInterval = 1, -}; - -static struct usb_descriptor_header *hs_function[] = { - (struct usb_descriptor_header *) &usbnet_intf_desc, - (struct usb_descriptor_header *) &usbnet_hs_bulk_in_desc, - (struct usb_descriptor_header *) &usbnet_hs_bulk_out_desc, - (struct usb_descriptor_header *) &hs_intr_out_desc, - NULL, -}; - -#define DO_NOT_STOP_QUEUE 0 -#define STOP_QUEUE 1 - -#define USBNETDBG(context, fmt, args...) \ - do { \ - if (context && (context->gadget)) \ - dev_dbg(&(context->gadget->dev) , fmt , ## args); \ - } while (0) - -struct usbnet_device *_usbnet_dev; - -static inline struct usbnet_device *usbnet_func_to_dev(struct usb_function *f) -{ - return container_of(f, struct usbnet_device, function); -} - - -static int ether_queue_out(struct usb_request *req , - struct usbnet_context *context) -{ - unsigned long flags; - struct sk_buff *skb; - int ret; - - skb = alloc_skb(USB_MTU + NET_IP_ALIGN, GFP_ATOMIC); - if (!skb) { - USBNETDBG(context, "%s: failed to alloc skb\n", __func__); - ret = -ENOMEM; - goto fail; - } - - skb_reserve(skb, NET_IP_ALIGN); - - req->buf = skb->data; - req->length = USB_MTU; - req->context = skb; - - ret = usb_ep_queue(context->bulk_out, req, GFP_KERNEL); - if (ret == 0) - return 0; - else - kfree_skb(skb); -fail: - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->rx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - - return ret; -} - -struct usb_request *usb_get_recv_request(struct usbnet_context *context) -{ - unsigned long flags; - struct usb_request *req; - - spin_lock_irqsave(&context->lock, flags); - if (list_empty(&context->rx_reqs)) { - req = NULL; - } else { - req = list_first_entry(&context->rx_reqs, - struct usb_request, list); - list_del(&req->list); - } - spin_unlock_irqrestore(&context->lock, flags); - - return req; -} - -struct usb_request *usb_get_xmit_request(int stop_flag, struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - unsigned long flags; - struct usb_request *req; - - spin_lock_irqsave(&context->lock, flags); - if (list_empty(&context->tx_reqs)) { - req = NULL; - } else { - req = list_first_entry(&context->tx_reqs, - struct usb_request, list); - list_del(&req->list); - if (stop_flag == STOP_QUEUE && - list_empty(&context->tx_reqs)) - netif_stop_queue(dev); - } - spin_unlock_irqrestore(&context->lock, flags); - return req; -} - -static int usb_ether_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - struct usb_request *req; - unsigned long flags; - unsigned len; - int rc; - - req = usb_get_xmit_request(STOP_QUEUE, dev); - - if (!req) { - USBNETDBG(context, "%s: could not obtain tx request\n", - __func__); - return 1; - } - - /* Add 4 bytes CRC */ - skb->len += 4; - - /* ensure that we end with a short packet */ - len = skb->len; - if (!(len & 63) || !(len & 511)) - len++; - - req->context = skb; - req->buf = skb->data; - req->length = len; - - rc = usb_ep_queue(context->bulk_in, req, GFP_KERNEL); - if (rc != 0) { - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->tx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - - dev_kfree_skb_any(skb); - context->stats.tx_dropped++; - - USBNETDBG(context, - "%s: could not queue tx request\n", __func__); - } - - return 0; -} - -static void usb_ether_tx_timeout(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); -} - -static int usb_ether_open(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); - return 0; -} - -static int usb_ether_stop(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); - return 0; -} - -static struct net_device_stats *usb_ether_get_stats(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); - return &context->stats; -} - -static void usbnet_if_config(struct work_struct *work) -{ - struct ifreq ifr; - mm_segment_t saved_fs; - unsigned err; - struct sockaddr_in *sin; - struct usbnet_context *context = container_of(work, - struct usbnet_context, usbnet_config_wq); - - memset(&ifr, 0, sizeof(ifr)); - sin = (void *) &(ifr.ifr_ifru.ifru_addr); - strncpy(ifr.ifr_ifrn.ifrn_name, context->dev->name, - sizeof(ifr.ifr_ifrn.ifrn_name)); - sin->sin_family = AF_INET; - - sin->sin_addr.s_addr = context->ip_addr; - saved_fs = get_fs(); - set_fs(get_ds()); - err = devinet_ioctl(dev_net(context->dev), SIOCSIFADDR, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFADDR\n", __func__); - - sin->sin_addr.s_addr = context->subnet_mask; - err = devinet_ioctl(dev_net(context->dev), SIOCSIFNETMASK, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFNETMASK\n", __func__); - - sin->sin_addr.s_addr = context->ip_addr | ~(context->subnet_mask); - err = devinet_ioctl(dev_net(context->dev), SIOCSIFBRDADDR, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFBRDADDR\n", __func__); - - memset(&ifr, 0, sizeof(ifr)); - strncpy(ifr.ifr_ifrn.ifrn_name, context->dev->name, - sizeof(ifr.ifr_ifrn.ifrn_name)); - ifr.ifr_flags = ((context->dev->flags) | context->iff_flag); - err = devinet_ioctl(dev_net(context->dev), SIOCSIFFLAGS, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFFLAGS\n", __func__); - - set_fs(saved_fs); -} - -static const struct net_device_ops usb_netdev_ops = { - .ndo_open = usb_ether_open, - .ndo_stop = usb_ether_stop, - .ndo_start_xmit = usb_ether_xmit, - .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = usb_ether_tx_timeout, - .ndo_get_stats = usb_ether_get_stats, -}; - -static void usb_ether_setup(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - INIT_LIST_HEAD(&context->rx_reqs); - INIT_LIST_HEAD(&context->tx_reqs); - - spin_lock_init(&context->lock); - context->dev = dev; - dev->netdev_ops = &usb_netdev_ops; - ether_setup(dev); - - random_ether_addr(dev->dev_addr); -} - -/*-------------------------------------------------------------------------*/ -static void usbnet_cleanup(struct usbnet_device *dev) -{ - struct usbnet_context *context = dev->net_ctxt; - if (context) { - unregister_netdev(context->dev); - free_netdev(context->dev); - dev->net_ctxt = NULL; - } -} - -static void usbnet_unbind(struct usb_configuration *c, struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usb_composite_dev *cdev = c->cdev; - struct usbnet_context *context = dev->net_ctxt; - struct usb_request *req; - - dev->cdev = cdev; - - usb_ep_disable(context->bulk_in); - usb_ep_disable(context->bulk_out); - - /* Free BULK OUT Requests */ - while ((req = usb_get_recv_request(context))) - usb_ep_free_request(context->bulk_out, req); - - /* Free BULK IN Requests */ - while ((req = usb_get_xmit_request(DO_NOT_STOP_QUEUE, - context->dev))) { - usb_ep_free_request(context->bulk_in, req); - } - - context->config = 0; - - usbnet_cleanup(dev); -} - -static void ether_out_complete(struct usb_ep *ep, struct usb_request *req) -{ - struct sk_buff *skb = req->context; - struct usbnet_context *context = ep->driver_data; - - if (req->status == 0) { - dmac_inv_range((void *)req->buf, (void *)(req->buf + - req->actual)); - skb_put(skb, req->actual); - skb->protocol = eth_type_trans(skb, context->dev); - context->stats.rx_packets++; - context->stats.rx_bytes += req->actual; - netif_rx(skb); - } else { - dev_kfree_skb_any(skb); - context->stats.rx_errors++; - } - - /* don't bother requeuing if we just went offline */ - if ((req->status == -ENODEV) || (req->status == -ESHUTDOWN)) { - unsigned long flags; - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->rx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - } else { - if (ether_queue_out(req, context)) - USBNETDBG(context, "ether_out: cannot requeue\n"); - } -} - -static void ether_in_complete(struct usb_ep *ep, struct usb_request *req) -{ - unsigned long flags; - struct sk_buff *skb = req->context; - struct usbnet_context *context = ep->driver_data; - - if (req->status == 0) { - context->stats.tx_packets++; - context->stats.tx_bytes += req->actual; - } else { - context->stats.tx_errors++; - } - - dev_kfree_skb_any(skb); - - spin_lock_irqsave(&context->lock, flags); - if (list_empty(&context->tx_reqs)) - netif_start_queue(context->dev); - - list_add_tail(&req->list, &context->tx_reqs); - spin_unlock_irqrestore(&context->lock, flags); -} - -static int usbnet_bind(struct usb_configuration *c, - struct usb_function *f) -{ - struct usb_composite_dev *cdev = c->cdev; - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - int n, rc, id; - struct usb_ep *ep; - struct usb_request *req; - unsigned long flags; - - dev->cdev = cdev; - - id = usb_interface_id(c, f); - if (id < 0) - return id; - usbnet_intf_desc.bInterfaceNumber = id; - context->gadget = cdev->gadget; - - /* Find all the endpoints we will use */ - ep = usb_ep_autoconfig(cdev->gadget, &usbnet_fs_bulk_in_desc); - if (!ep) { - USBNETDBG(context, "%s auto-configure usbnet_hs_bulk_in_desc error\n", - __func__); - goto autoconf_fail; - } - ep->driver_data = context; - context->bulk_in = ep; - - ep = usb_ep_autoconfig(cdev->gadget, &usbnet_fs_bulk_out_desc); - if (!ep) { - USBNETDBG(context, "%s auto-configure usbnet_hs_bulk_out_desc error\n", - __func__); - goto autoconf_fail; - } - ep->driver_data = context; - context->bulk_out = ep; - - - ep = usb_ep_autoconfig(cdev->gadget, &fs_intr_out_desc); - if (!ep) { - USBNETDBG(context, "%s auto-configure hs_intr_out_desc error\n", - __func__); - goto autoconf_fail; - } - ep->driver_data = context; - context->intr_out = ep; - - if (gadget_is_dualspeed(cdev->gadget)) { - - /* Assume endpoint addresses are the same for both speeds */ - usbnet_hs_bulk_in_desc.bEndpointAddress = - usbnet_fs_bulk_in_desc.bEndpointAddress; - usbnet_hs_bulk_out_desc.bEndpointAddress = - usbnet_fs_bulk_out_desc.bEndpointAddress; - hs_intr_out_desc.bEndpointAddress = - fs_intr_out_desc.bEndpointAddress; - } - - - rc = -ENOMEM; - - for (n = 0; n < MAX_BULK_RX_REQ_NUM; n++) { - req = usb_ep_alloc_request(context->bulk_out, - GFP_KERNEL); - if (!req) { - USBNETDBG(context, "%s: alloc request bulk_out fail\n", - __func__); - break; - } - req->complete = ether_out_complete; - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->rx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - } - for (n = 0; n < MAX_BULK_TX_REQ_NUM; n++) { - req = usb_ep_alloc_request(context->bulk_in, - GFP_KERNEL); - if (!req) { - USBNETDBG(context, "%s: alloc request bulk_in fail\n", - __func__); - break; - } - req->complete = ether_in_complete; - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->tx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - } - - return 0; - -autoconf_fail: - rc = -ENOTSUPP; - usbnet_unbind(c, f); - return rc; -} - - - - -static void do_set_config(struct usb_function *f, u16 new_config) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - int result = 0; - struct usb_request *req; - int high_speed_flag = 0; - - if (context->config == new_config) /* Config did not change */ - return; - - context->config = new_config; - - if (new_config == 1) { /* Enable End points */ - if (gadget_is_dualspeed(context->gadget) - && context->gadget->speed == USB_SPEED_HIGH) - high_speed_flag = 1; - - if (high_speed_flag) - result = usb_ep_enable(context->bulk_in, - &usbnet_hs_bulk_in_desc); - else - result = usb_ep_enable(context->bulk_in, - &usbnet_fs_bulk_in_desc); - - if (result != 0) { - USBNETDBG(context, - "%s: failed to enable BULK_IN EP ret=%d\n", - __func__, result); - } - - context->bulk_in->driver_data = context; - - if (high_speed_flag) - result = usb_ep_enable(context->bulk_out, - &usbnet_hs_bulk_out_desc); - else - result = usb_ep_enable(context->bulk_out, - &usbnet_fs_bulk_out_desc); - - if (result != 0) { - USBNETDBG(context, - "%s: failed to enable BULK_OUT EP ret = %d\n", - __func__, result); - } - - context->bulk_out->driver_data = context; - - if (high_speed_flag) - result = usb_ep_enable(context->intr_out, - &hs_intr_out_desc); - else - result = usb_ep_enable(context->intr_out, - &fs_intr_out_desc); - - if (result != 0) { - USBNETDBG(context, - "%s: failed to enable INTR_OUT EP ret = %d\n", - __func__, result); - } - - context->intr_out->driver_data = context; - - /* we're online -- get all rx requests queued */ - while ((req = usb_get_recv_request(context))) { - if (ether_queue_out(req, context)) { - USBNETDBG(context, - "%s: ether_queue_out failed\n", - __func__); - break; - } - } - - } else {/* Disable Endpoints */ - if (context->bulk_in) - usb_ep_disable(context->bulk_in); - if (context->bulk_out) - usb_ep_disable(context->bulk_out); - } -} - - -static int usbnet_set_alt(struct usb_function *f, - unsigned intf, unsigned alt) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "usbnet_set_alt intf: %d alt: %d\n", intf, alt); - do_set_config(f, 1); - return 0; -} - -static int usbnet_ctrlrequest(struct usb_composite_dev *cdev, - const struct usb_ctrlrequest *ctrl) -{ - struct usbnet_device *dev = _usbnet_dev; - struct usbnet_context *context = dev->net_ctxt; - int rc = -EOPNOTSUPP; - int wIndex = le16_to_cpu(ctrl->wIndex); - int wValue = le16_to_cpu(ctrl->wValue); - int wLength = le16_to_cpu(ctrl->wLength); - struct usb_request *req = cdev->req; - - USBNETDBG(context, "usbnet_ctrlrequest " - "%02x.%02x v%04x i%04x l%u\n", - ctrl->bRequestType, ctrl->bRequest, - wValue, wIndex, wLength); - - if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { - switch (ctrl->bRequest) { - case USBNET_SET_IP_ADDRESS: - context->ip_addr = (wValue << 16) | wIndex; - rc = 0; - break; - case USBNET_SET_SUBNET_MASK: - context->subnet_mask = (wValue << 16) | wIndex; - rc = 0; - break; - case USBNET_SET_HOST_IP: - context->router_ip = (wValue << 16) | wIndex; - rc = 0; - break; - default: - break; - } - - if (context->ip_addr && context->subnet_mask - && context->router_ip) { - context->iff_flag = IFF_UP; - /* schedule a work queue to do this because we - need to be able to sleep */ - schedule_work(&context->usbnet_config_wq); - } - } - - /* respond with data transfer or status phase? */ - if (rc >= 0) { - req->zero = rc < wLength; - req->length = rc; - rc = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); - if (rc < 0) - USBNETDBG(context, "usbnet setup response error\n"); - } - return rc; -} - -static void usbnet_disable(struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "%s\n", __func__); - do_set_config(f, 0); -} - -static void usbnet_suspend(struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "%s\n", __func__); -} - -static void usbnet_resume(struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "%s\n", __func__); -} - -static int usbnet_bind_config(struct usb_configuration *c) -{ - struct usbnet_device *dev = _usbnet_dev; - struct usbnet_context *context; - struct net_device *net_dev; - int ret, status; - - net_dev = alloc_netdev(sizeof(struct usbnet_context), - "usb%d", usb_ether_setup); - if (!net_dev) { - pr_err("%s: alloc_netdev error\n", __func__); - return -EINVAL; - } - net_dev->netdev_ops = &usb_netdev_ops; - - ret = register_netdev(net_dev); - if (ret) { - pr_err("%s: register_netdev error\n", __func__); - free_netdev(net_dev); - return -EINVAL; - } - context = netdev_priv(net_dev); - INIT_WORK(&context->usbnet_config_wq, usbnet_if_config); - - status = usb_string_id(c->cdev); - if (status >= 0) { - usbnet_string_defs[STRING_INTERFACE].id = status; - usbnet_intf_desc.iInterface = status; - } - - context->config = 0; - dev->net_ctxt = context; - dev->cdev = c->cdev; - dev->function.name = USBNET_FUNCTION_NAME; - dev->function.descriptors = fs_function; - dev->function.hs_descriptors = hs_function; - dev->function.bind = usbnet_bind; - dev->function.unbind = usbnet_unbind; - dev->function.set_alt = usbnet_set_alt; - dev->function.disable = usbnet_disable; - dev->function.suspend = usbnet_suspend; - dev->function.resume = usbnet_resume; - dev->function.strings = usbnet_strings; - - ret = usb_add_function(c, &dev->function); - if (ret) - goto err1; - - pr_info("%s\n", __func__); - - return 0; - -err1: - kfree(dev); - pr_err("usbnet gadget driver failed to initialize\n"); - usbnet_cleanup(dev); - return ret; -} - -static int usbnet_setup(void) -{ - struct usbnet_device *dev = _usbnet_dev; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - _usbnet_dev = dev; - - return 0; -} diff --git a/drivers/usb/gadget/htc_attr.c b/drivers/usb/gadget/htc_attr.c index 4247ab53..072564f9 100644 --- a/drivers/usb/gadget/htc_attr.c +++ b/drivers/usb/gadget/htc_attr.c @@ -12,647 +12,27 @@ * */ -#include -#include - -enum { - USB_FUNCTION_UMS = 0, - USB_FUNCTION_ADB = 1, - USB_FUNCTION_RNDIS, - USB_FUNCTION_DIAG, - USB_FUNCTION_SERIAL, - USB_FUNCTION_PROJECTOR, - USB_FUNCTION_FSYNC, - USB_FUNCTION_MTP, - USB_FUNCTION_MODEM, /* 8 */ - USB_FUNCTION_ECM, - USB_FUNCTION_ACM, - USB_FUNCTION_DIAG_MDM, /* 11 */ - USB_FUNCTION_RMNET, - USB_FUNCTION_ACCESSORY, - USB_FUNCTION_MODEM_MDM, /* 14 */ - USB_FUNCTION_MTP36, - USB_FUNCTION_USBNET, - USB_FUNCTION_RNDIS_IPT = 31, -}; - -struct usb_string_node{ - u32 usb_function_flag; - char *name; -}; - -static struct usb_string_node usb_string_array[] = { - { - .usb_function_flag = 1 << USB_FUNCTION_UMS, - .name = "mass_storage", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ADB, - .name = "adb", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_RNDIS, - .name = "rndis", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_DIAG, - .name = "diag", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_SERIAL, - .name = "serial", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_PROJECTOR, - .name = "projector", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_MODEM, - .name = "modem", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ECM, - .name = "cdc_ethernet", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ACM, - .name = "acm", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_DIAG_MDM, - .name = "diag_mdm", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_RMNET, - .name = "rmnet", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ACCESSORY, - .name = "accessory", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_MODEM_MDM, - .name = "modem_mdm", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_MTP, - .name = "mtp", - }, - -}; - -static int use_mfg_serialno; -static char mfg_df_serialno[16]; -static int intrsharing; - -#define PID_RNDIS 0x0ffe -#define PID_ECM 0x0ff8 -#define PID_ACM 0x0ff4 -#define PID_USBNET 0x0fcd - -/* for htc in-house device attribute, htc_usb_attr.c */ -void android_force_reset(void) -{ - if (_android_dev && _android_dev->cdev) - usb_composite_force_reset(_android_dev->cdev); -} - -static bool isFunctionDisabled(struct android_usb_function *function) -{ - struct android_usb_function *f; - struct list_head *list = &_android_dev->enabled_functions; - - list_for_each_entry(f, list, enabled_list) { - if (!strcmp(function->name, f->name)) - return false; - } - return true; -} - -static int product_has_function(struct android_usb_product *p, - struct android_usb_function *f) -{ - char **functions = p->functions; - int count = p->num_functions; - const char *name = f->name; - int i; - - for (i = 0; i < count; i++) { - if (!strncmp(name, functions[i], strlen(name))) - return 1; - } - return 0; -} - -static int product_matches_functions(struct android_usb_product *p, - struct list_head *list) -{ - int count = 0; - struct android_usb_function *f; - list_for_each_entry(f, list, enabled_list) { - count++; - if (product_has_function(p, f) == isFunctionDisabled(f)) - return 0; - } - - if (count == p->num_functions) - return 1; - else - return 0; -} - -static int get_product_id(struct android_dev *dev, struct list_head *list) -{ - struct android_usb_product *p = dev->products; - int count = dev->num_products; - int i; - - if (p) { - for (i = 0; i < count; i++, p++) { - if (product_matches_functions(p, list)) - return p->product_id; - } - } - /* use default product ID */ - return dev->pdata->product_id; -} - -static struct android_usb_product *get_product(struct android_dev *dev, struct list_head *list) -{ - struct android_usb_product *p = dev->products; - int count = dev->num_products; - int i; - - if (p) { - for (i = 0; i < count; i++, p++) { - if (product_matches_functions(p, list)) - return p; - } - } - return NULL; -} - - -static unsigned int htc_usb_get_func_combine_value(void) -{ - struct android_dev *dev = _android_dev; - struct android_usb_function *f; - int i; - unsigned int val = 0; - - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - for (i = 0; i < ARRAY_SIZE(usb_string_array); i++) - if (!strcmp(usb_string_array[i].name, f->name)) { - val |= usb_string_array[i].usb_function_flag; - break; - } - } - return val; -} -static DEFINE_MUTEX(function_bind_sem); -int htc_usb_enable_function(char *name, int ebl) -{ - int i; - unsigned val; - - mutex_lock(&function_bind_sem); - - val = htc_usb_get_func_combine_value(); - - for (i = 0; i < ARRAY_SIZE(usb_string_array); i++) { - if (!strcmp(usb_string_array[i].name, name)) { - if (ebl) { - if (val & usb_string_array[i].usb_function_flag) { - pr_info("%s: '%s' is already enabled\n", __func__, name); - mutex_unlock(&function_bind_sem); - return 0; - } - val |= usb_string_array[i].usb_function_flag; - } else { - if (!(val & usb_string_array[i].usb_function_flag)) { - pr_info("%s: '%s' is already disabled\n", __func__, name); - mutex_unlock(&function_bind_sem); - return 0; - } - - val &= ~usb_string_array[i].usb_function_flag; - } - break; - } - } - mutex_unlock(&function_bind_sem); - return android_switch_function(val); -} - - -int android_show_function(char *buf) -{ - unsigned length = 0; - struct android_dev *dev = _android_dev; - struct android_usb_function *f; - char *ebl_str[2] = {"disable", "enable"}; - char *p; - int i; - - for (i = 0; dev->functions[i] != NULL; i++) { - - p = ebl_str[0]; - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - if (!strcmp(dev->functions[i]->name, f->name)) { - p = ebl_str[1]; - break; - } - } - - length += sprintf(buf + length, "%s:%s\n", - dev->functions[i]->name, p); - - } - return length; -} - - -int android_switch_function(unsigned func) -{ - struct android_dev *dev = _android_dev; - struct android_usb_function **functions = dev->functions; - struct android_usb_function *f; - struct android_usb_product *product; - int product_id, vendor_id; - unsigned val; - - /* framework may try to enable adb before android_usb_init_work is done.*/ - if (dev->enabled != true) { - pr_info("%s: USB driver is not initialize\n", __func__); - return 0; - } - - mutex_lock(&function_bind_sem); - - val = htc_usb_get_func_combine_value(); - - pr_info("%s: %u, before %u\n", __func__, func, val); - - if (func == val) { - pr_info("%s: SKIP due the function is the same ,%u\n" - , __func__, func); - mutex_unlock(&function_bind_sem); - return 0; - } - - usb_gadget_disconnect(dev->cdev->gadget); - usb_remove_config(dev->cdev, &android_config_driver); - - INIT_LIST_HEAD(&dev->enabled_functions); - - while ((f = *functions++)) { - if ((func & (1 << USB_FUNCTION_UMS)) && - !strcmp(f->name, "mass_storage")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ADB)) && - !strcmp(f->name, "adb")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ECM)) && - !strcmp(f->name, "cdc_ethernet")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ACM)) && - !strcmp(f->name, "acm")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_RNDIS)) && - !strcmp(f->name, "rndis")) { - list_add_tail(&f->enabled_list, &dev->enabled_functions); - intrsharing = !((func >> USB_FUNCTION_RNDIS_IPT) & 1); - } else if ((func & (1 << USB_FUNCTION_DIAG)) && - !strcmp(f->name, "diag")) { - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_DIAG - func |= 1 << USB_FUNCTION_DIAG_MDM; -#endif - } else if ((func & (1 << USB_FUNCTION_MODEM)) && - !strcmp(f->name, "modem")) { - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM - func |= 1 << USB_FUNCTION_MODEM_MDM; -#endif - } else if ((func & (1 << USB_FUNCTION_SERIAL)) && - !strcmp(f->name, "serial")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_MTP)) && - !strcmp(f->name, "mtp")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ACCESSORY)) && - !strcmp(f->name, "accessory")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_PROJECTOR)) && - !strcmp(f->name, "projector")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_DIAG - else if ((func & (1 << USB_FUNCTION_DIAG_MDM)) && - !strcmp(f->name, "diag_mdm")) { - if (func & (1 << USB_FUNCTION_DIAG)) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else - func &= ~(1 << USB_FUNCTION_DIAG_MDM); - } -#endif - else if ((func & (1 << USB_FUNCTION_RMNET)) && - !strcmp(f->name, "rmnet")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM - else if ((func & (1 << USB_FUNCTION_MODEM_MDM)) && - !strcmp(f->name, "modem_mdm")) { - if (func & (1 << USB_FUNCTION_MODEM)) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else - func &= ~(1 << USB_FUNCTION_MODEM_MDM); - } -#endif -#ifdef CONFIG_USB_ANDROID_USBNET - else if ((func & (1 << USB_FUNCTION_USBNET)) && - !strcmp(f->name, "usbnet")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#endif - } - - list_for_each_entry(f, &dev->enabled_functions, enabled_list) - pr_debug("# %s\n", f->name); - - product = get_product(dev, &dev->enabled_functions); - - if (product) { - vendor_id = product->vendor_id ? product->vendor_id : dev->pdata->vendor_id; - product_id = product->product_id; - } else { - vendor_id = dev->pdata->vendor_id; - product_id = dev->pdata->product_id; - } - - /* We need to specify the COMM class in the device descriptor - * if we are using RNDIS. - */ - if (product_id == PID_RNDIS || product_id == PID_ECM - || product_id == PID_ACM || product_id == PID_USBNET) - dev->cdev->desc.bDeviceClass = USB_CLASS_COMM; - else - dev->cdev->desc.bDeviceClass = USB_CLASS_PER_INTERFACE; - - if (dev->match) - product_id = dev->match(product_id, intrsharing); - - pr_info("%s: vendor_id=0x%x, product_id=0x%x\n", - __func__, vendor_id, product_id); - - device_desc.idVendor = __constant_cpu_to_le16(vendor_id); - device_desc.idProduct = __constant_cpu_to_le16(product_id); - - dev->cdev->desc.idVendor = device_desc.idVendor; - dev->cdev->desc.idProduct = device_desc.idProduct; - - device_desc.bDeviceClass = dev->cdev->desc.bDeviceClass; - - usb_add_config(dev->cdev, &android_config_driver, android_bind_config); - - mdelay(100); - usb_gadget_connect(dev->cdev->gadget); - dev->enabled = true; - - mutex_unlock(&function_bind_sem); - return 0; -} - -void android_set_serialno(char *serialno) -{ - strings_dev[STRING_SERIAL_IDX].s = serialno; -} - -void init_mfg_serialno(void) -{ - char *serialno = "000000000000"; - - use_mfg_serialno = (board_mfg_mode() == 1) ? 1 : 0; - strncpy(mfg_df_serialno, serialno, strlen(serialno)); -} - -static ssize_t show_usb_cable_connect(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned length; - - length = sprintf(buf, "%d", - (usb_get_connect_type() == CONNECT_TYPE_USB)?1:0); - return length; -} static ssize_t show_usb_function_switch(struct device *dev, struct device_attribute *attr, char *buf) { - return android_show_function(buf); + return 0; } static ssize_t store_usb_function_switch(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - unsigned u; - ssize_t ret; - - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) { - USB_ERR("%s: %d\n", __func__, ret); - return 0; - } - - ret = android_switch_function(u); - - if (ret == 0) - return count; - else - return 0; -} - -static ssize_t show_USB_ID_status(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct android_usb_platform_data *pdata = dev->platform_data; - int value = 1; - unsigned length; - printk(KERN_INFO "[USB] id pin: %d\n", pdata->usb_id_pin_gpio); - - if (pdata->usb_id_pin_gpio != 0) { - value = gpio_get_value(pdata->usb_id_pin_gpio); - printk(KERN_INFO"[USB] id pin status %d\n", value); - } - - length = sprintf(buf, "%d", value); - return length; -} - -static ssize_t show_usb_serial_number(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned length; - struct android_usb_platform_data *pdata = dev->platform_data; - - length = sprintf(buf, "%s", pdata->serial_number); - return length; -} - -static ssize_t store_usb_serial_number(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct android_usb_platform_data *pdata = dev->platform_data; - char *serialno = "000000000000"; - - if (buf[0] == '0' || buf[0] == '1') { - memset(mfg_df_serialno, 0x0, sizeof(mfg_df_serialno)); - if (buf[0] == '0') { - strncpy(mfg_df_serialno, serialno, strlen(serialno)); - use_mfg_serialno = 1; - android_set_serialno(mfg_df_serialno); - } else { - strncpy(mfg_df_serialno, pdata->serial_number, - strlen(pdata->serial_number)); - use_mfg_serialno = 0; - android_set_serialno(pdata->serial_number); - } - /* reset_device */ - android_force_reset(); - } - - return count; -} - -static ssize_t show_dummy_usb_serial_number(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned length; - struct android_usb_platform_data *pdata = dev->platform_data; - - if (use_mfg_serialno) - length = sprintf(buf, "%s", mfg_df_serialno); /* dummy */ - else - length = sprintf(buf, "%s", pdata->serial_number); /* Real */ - return length; -} - -static ssize_t store_dummy_usb_serial_number(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int data_buff_size = (sizeof(mfg_df_serialno) > strlen(buf))? - strlen(buf):sizeof(mfg_df_serialno); - int loop_i; - - /* avoid overflow, mfg_df_serialno[16] always is 0x0 */ - if (data_buff_size == 16) - data_buff_size--; - - for (loop_i = 0; loop_i < data_buff_size; loop_i++) { - if (buf[loop_i] >= 0x30 && buf[loop_i] <= 0x39) /* 0-9 */ - continue; - else if (buf[loop_i] >= 0x41 && buf[loop_i] <= 0x5A) /* A-Z */ - continue; - if (buf[loop_i] == 0x0A) /* Line Feed */ - continue; - else { - printk(KERN_INFO "%s(): get invaild char (0x%2.2X)\n", - __func__, buf[loop_i]); - return -EINVAL; - } - } - - use_mfg_serialno = 1; - memset(mfg_df_serialno, 0x0, sizeof(mfg_df_serialno)); - strncpy(mfg_df_serialno, buf, data_buff_size); - android_set_serialno(mfg_df_serialno); - /*device_reset */ - android_force_reset(); - - return count; -} - -static ssize_t -show_usb_car_kit_enable(struct device *dev, struct device_attribute *attr, - char *buf) -{ - unsigned length; - int value = 0; -#ifdef CONFIG_CABLE_DETECT_ACCESSORY -#include - value = (cable_get_accessory_type() == DOCK_STATE_UNDOCKED) ? 0 : 1; - printk(KERN_INFO "USB_car_kit_enable %d\n", cable_get_accessory_type()); -#else - value = 0; - printk(KERN_INFO "USB_car_kit_enable: CABLE_DETECT_ACCESSORY was not defined\n"); -#endif - - length = sprintf(buf, "%d", value); - return length; -} - -static ssize_t show_usb_phy_setting(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return otg_show_usb_phy_setting(buf); -} -static ssize_t store_usb_phy_setting(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return otg_store_usb_phy_setting(buf, count); -} - -#if (defined(CONFIG_USB_OTG) && defined(CONFIG_USB_OTG_HOST)) -void msm_otg_set_id_state(int id); -static ssize_t store_usb_host_mode(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - unsigned u, enable; - ssize_t ret; - - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) { - USB_ERR("%s: %d\n", __func__, ret); - return 0; - } - - enable = u ? 1 : 0; - msm_otg_set_id_state(!enable); - - USB_INFO("%s USB host\n", enable ? "Enable" : "Disable"); - - return count; + return 0; } -static DEVICE_ATTR(host_mode, 0220, - NULL, store_usb_host_mode); -#endif -static DEVICE_ATTR(usb_cable_connect, 0444, show_usb_cable_connect, NULL); static DEVICE_ATTR(usb_function_switch, 0664, show_usb_function_switch, store_usb_function_switch); -static DEVICE_ATTR(USB_ID_status, 0444, show_USB_ID_status, NULL); -static DEVICE_ATTR(usb_serial_number, 0644, - show_usb_serial_number, store_usb_serial_number); -static DEVICE_ATTR(dummy_usb_serial_number, 0644, - show_dummy_usb_serial_number, store_dummy_usb_serial_number); -static DEVICE_ATTR(usb_car_kit_enable, 0444, show_usb_car_kit_enable, NULL); -static DEVICE_ATTR(usb_phy_setting, 0664, - show_usb_phy_setting, store_usb_phy_setting); static struct attribute *android_htc_usb_attributes[] = { - &dev_attr_usb_cable_connect.attr, &dev_attr_usb_function_switch.attr, - &dev_attr_USB_ID_status.attr, /* for MFG */ - &dev_attr_usb_serial_number.attr, /* for MFG */ - &dev_attr_dummy_usb_serial_number.attr, /* for MFG */ - &dev_attr_usb_car_kit_enable.attr, - &dev_attr_usb_phy_setting.attr, -#if (defined(CONFIG_USB_OTG) && defined(CONFIG_USB_OTG_HOST)) - &dev_attr_host_mode.attr, -#endif - NULL + NULL, }; -static const struct attribute_group android_usb_attr_group = { +static const struct attribute_group htc_attr_group = { .attrs = android_htc_usb_attributes, }; - diff --git a/drivers/usb/gadget/u_xpst.c b/drivers/usb/gadget/u_xpst.c index 7df4ab6e..66027741 100644 --- a/drivers/usb/gadget/u_xpst.c +++ b/drivers/usb/gadget/u_xpst.c @@ -11,6 +11,9 @@ * GNU General Public License for more details. * */ +#if defined(CONFIG_MACH_MECHA) +#include +#endif struct diag_context _context; static struct usb_diag_ch *legacych; @@ -367,7 +370,7 @@ static long htc_diag_ioctl(struct file *file, unsigned int cmd, unsigned long ar diag_smd_enable(driver->ch, "diag_ioctl", tmp_value); #if defined(CONFIG_MACH_MECHA) /* internal hub*/ - /*smsc251x_mdm_port_sw(tmp_value);*/ + smsc251x_mdm_port_sw(tmp_value); #endif /* force diag_read to return error when disable diag */ if (tmp_value == 0) diff --git a/include/linux/usb/android.h b/include/linux/usb/android.h index 9d7e4a84..6736a085 100644 --- a/include/linux/usb/android.h +++ b/include/linux/usb/android.h @@ -19,6 +19,7 @@ struct android_usb_platform_data { int (*update_pid_and_serial_num)(uint32_t, const char *); + int nluns; }; #endif /* __LINUX_USB_ANDROID_H */ From 30e38498be6a0f81c8f3c34fa0bdec6570c1aa7d Mon Sep 17 00:00:00 2001 From: David Hays Date: Wed, 29 May 2013 11:03:05 -0500 Subject: [PATCH 117/117] bluetooth: updating bluetooth driver to caf Change-Id: I916039b3405324ef3efe1ebbd24238b9da11e364 Conflicts: net/bluetooth/hidp/core.c net/bluetooth/sco.c --- include/net/bluetooth/hci.h | 30 --- include/net/bluetooth/hci_core.h | 8 +- include/net/bluetooth/l2cap.h | 1 - include/net/bluetooth/mgmt.h | 30 --- include/net/bluetooth/sco.h | 2 +- net/bluetooth/Kconfig | 1 + net/bluetooth/Makefile | 1 + net/bluetooth/hci_conn.c | 101 +--------- net/bluetooth/hci_core.c | 13 -- net/bluetooth/hci_event.c | 105 +---------- net/bluetooth/hidbrcm/Kconfig | 6 + net/bluetooth/hidbrcm/Makefile | 7 + net/bluetooth/hidbrcm/bthid.c | 309 +++++++++++++++++++++++++++++++ net/bluetooth/hidp/core.c | 47 +++-- net/bluetooth/l2cap_core.c | 78 +++----- net/bluetooth/l2cap_sock.c | 19 +- net/bluetooth/mgmt.c | 285 ++-------------------------- net/bluetooth/rfcomm/core.c | 33 +--- net/bluetooth/sco.c | 21 +-- net/bluetooth/smp.c | 9 + 20 files changed, 423 insertions(+), 683 deletions(-) create mode 100644 net/bluetooth/hidbrcm/Kconfig create mode 100644 net/bluetooth/hidbrcm/Makefile create mode 100644 net/bluetooth/hidbrcm/bthid.c diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 66d373f9..eb89f4b1 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -86,7 +86,6 @@ enum { HCI_SERVICE_CACHE, HCI_LINK_KEYS, HCI_DEBUG_KEYS, - HCI_UNREGISTER, HCI_RESET, }; @@ -968,26 +967,6 @@ struct hci_cp_le_create_conn { #define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e -#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200F -struct hci_rp_le_read_white_list_size { - __u8 status; - __u8 size; -} __packed; - -#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010 - -#define HCI_OP_LE_ADD_DEV_WHITE_LIST 0x2011 -struct hci_cp_le_add_dev_white_list { - __u8 addr_type; - bdaddr_t addr; -} __packed; - -#define HCI_OP_LE_REMOVE_DEV_WHITE_LIST 0x2012 -struct hci_cp_le_remove_dev_white_list { - __u8 addr_type; - bdaddr_t addr; -} __packed; - #define HCI_OP_LE_CONN_UPDATE 0x2013 struct hci_cp_le_conn_update { __le16 handle; @@ -1355,15 +1334,6 @@ struct hci_ev_le_advertising_info { __u8 data[0]; } __packed; -#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 -struct hci_ev_le_conn_update_complete { - __u8 status; - __le16 handle; - __le16 interval; - __le16 latency; - __le16 supervision_timeout; -} __packed; - #define HCI_EV_LE_LTK_REQ 0x05 struct hci_ev_le_ltk_req { __le16 handle; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 091afbe0..57492930 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -191,7 +191,6 @@ struct hci_dev { unsigned int acl_pkts; unsigned int sco_pkts; unsigned int le_pkts; - unsigned int le_white_list_size; unsigned int data_block_len; @@ -603,9 +602,6 @@ struct hci_conn *hci_le_connect(struct hci_dev *hdev, __u16 pkt_type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type, struct bt_le_params *le_params); -void hci_le_add_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst); -void hci_le_remove_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst); -void hci_le_cancel_create_connect(struct hci_dev *hdev, bdaddr_t *dst); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); int hci_conn_change_link_key(struct hci_conn *conn); @@ -1037,9 +1033,7 @@ int mgmt_discoverable(u16 index, u8 discoverable); int mgmt_connectable(u16 index, u8 connectable); int mgmt_new_key(u16 index, struct link_key *key, u8 bonded); int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 le); -int mgmt_le_conn_params(u16 index, bdaddr_t *bdaddr, u16 interval, - u16 latency, u16 timeout); -int mgmt_disconnected(u16 index, bdaddr_t *bdaddr, u8 reason); +int mgmt_disconnected(u16 index, bdaddr_t *bdaddr); int mgmt_disconnect_failed(u16 index); int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status); int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 380bd4ed..a098f3e2 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -653,7 +653,6 @@ struct l2cap_pinfo { #define L2CAP_ATT_MTU_RSP 0x03 #define L2CAP_ATT_RESPONSE_BIT 0x01 #define L2CAP_ATT_INDICATE 0x1D -#define L2CAP_ATT_CONFIRM 0x1E #define L2CAP_ATT_NOT_SUPPORTED 0x06 #define __delta_seq(x, y, pi) ((x) >= (y) ? (x) - (y) : \ diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 602fe598..e34c4258 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -60,7 +60,6 @@ struct mgmt_rp_read_info { __u8 hci_ver; __u16 hci_rev; __u8 name[MGMT_MAX_NAME_LENGTH]; - __u8 le_white_list_size; } __packed; struct mgmt_mode { @@ -247,26 +246,6 @@ struct mgmt_cp_unset_rssi_reporter { bdaddr_t bdaddr; } __packed; -#define MGMT_OP_LE_READ_WHITE_LIST_SIZE 0xE000 - -#define MGMT_OP_LE_CLEAR_WHITE_LIST 0xE001 - -#define MGMT_OP_LE_ADD_DEV_WHITE_LIST 0xE002 -struct mgmt_cp_le_add_dev_white_list { - __u8 addr_type; - bdaddr_t bdaddr; -} __packed; - -#define MGMT_OP_LE_REMOVE_DEV_WHITE_LIST 0xE003 -struct mgmt_cp_le_remove_dev_white_list { - __u8 addr_type; - bdaddr_t bdaddr; -} __packed; - -#define MGMT_OP_LE_CREATE_CONN_WHITE_LIST 0xE004 - -#define MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST 0xE005 - #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -311,7 +290,6 @@ struct mgmt_ev_connected { #define MGMT_EV_DISCONNECTED 0x000C struct mgmt_ev_disconnected { bdaddr_t bdaddr; - __u8 reason; } __packed; #define MGMT_EV_CONNECT_FAILED 0x000D @@ -401,11 +379,3 @@ struct mgmt_ev_rssi_update { bdaddr_t bdaddr; __s8 rssi; } __packed; - -#define MGMT_EV_LE_CONN_PARAMS 0xF000 -struct mgmt_ev_le_conn_params { - bdaddr_t bdaddr; - __u16 interval; - __u16 latency; - __u16 timeout; -} __packed; diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index 6d94c343..160e3f0c 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -31,7 +31,7 @@ #define SCO_DEFAULT_FLUSH_TO 0xFFFF #define SCO_CONN_TIMEOUT (HZ * 40) -#define SCO_DISCONN_TIMEOUT (HZ * 20) +#define SCO_DISCONN_TIMEOUT (HZ * 2) #define SCO_CONN_IDLE_TIMEOUT (HZ * 60) /* SCO socket address */ diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 114d2513..c8ea9931 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -69,3 +69,4 @@ source "net/bluetooth/hidp/Kconfig" source "drivers/bluetooth/Kconfig" +source "net/bluetooth/hidbrcm/Kconfig" diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index d46736a0..04d9dabe 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_BT_RFCOMM) += rfcomm/ obj-$(CONFIG_BT_BNEP) += bnep/ obj-$(CONFIG_BT_CMTP) += cmtp/ obj-$(CONFIG_BT_HIDP) += hidp/ +obj-$(CONFIG_BT_HID_BRCM) += hidbrcm/ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o smp.o bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o amp.o diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index c956e140..25b559b1 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -50,7 +50,7 @@ struct hci_conn *hci_le_connect(struct hci_dev *hdev, __u16 pkt_type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type, struct bt_le_params *le_params) { - struct hci_conn *le, *le_wlist_conn; + struct hci_conn *le; struct hci_cp_le_create_conn cp; struct adv_entry *entry; struct link_key *key; @@ -59,21 +59,8 @@ struct hci_conn *hci_le_connect(struct hci_dev *hdev, __u16 pkt_type, le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); if (le) { - le_wlist_conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, - BDADDR_ANY); - if (!le_wlist_conn) { - hci_conn_hold(le); - return le; - } else { - BT_DBG("remove wlist conn"); - le->out = 1; - le->link_mode |= HCI_LM_MASTER; - le->sec_level = BT_SECURITY_LOW; - le->type = LE_LINK; - hci_proto_connect_cfm(le, 0); - hci_conn_del(le_wlist_conn); - return le; - } + hci_conn_hold(le); + return le; } key = hci_find_link_key_type(hdev, dst, KEY_TYPE_LTK); @@ -120,13 +107,8 @@ struct hci_conn *hci_le_connect(struct hci_dev *hdev, __u16 pkt_type, cp.conn_latency = cpu_to_le16(BT_LE_LATENCY_DEF); le->conn_timeout = 5; } - if (!bacmp(&le->dst, BDADDR_ANY)) { - cp.filter_policy = 0x01; - le->conn_timeout = 0; - } else { - bacpy(&cp.peer_addr, &le->dst); - cp.peer_addr_type = le->dst_type; - } + bacpy(&cp.peer_addr, &le->dst); + cp.peer_addr_type = le->dst_type; hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); @@ -139,73 +121,6 @@ static void hci_le_connect_cancel(struct hci_conn *conn) hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); } -void hci_le_cancel_create_connect(struct hci_dev *hdev, bdaddr_t *dst) -{ - struct hci_conn *le; - - BT_DBG("%p", hdev); - - le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); - if (le) { - BT_DBG("send hci connect cancel"); - hci_le_connect_cancel(le); - hci_conn_del(le); - } -} -EXPORT_SYMBOL(hci_le_cancel_create_connect); - -void hci_le_add_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst) -{ - struct hci_cp_le_add_dev_white_list cp; - struct adv_entry *entry; - struct link_key *key; - - BT_DBG("%p", hdev); - - memset(&cp, 0, sizeof(cp)); - bacpy(&cp.addr, dst); - - key = hci_find_link_key_type(hdev, dst, KEY_TYPE_LTK); - if (!key) { - entry = hci_find_adv_entry(hdev, dst); - if (entry) - cp.addr_type = entry->bdaddr_type; - else - cp.addr_type = 0x00; - } else { - cp.addr_type = key->addr_type; - } - - hci_send_cmd(hdev, HCI_OP_LE_ADD_DEV_WHITE_LIST, sizeof(cp), &cp); -} -EXPORT_SYMBOL(hci_le_add_dev_white_list); - -void hci_le_remove_dev_white_list(struct hci_dev *hdev, bdaddr_t *dst) -{ - struct hci_cp_le_remove_dev_white_list cp; - struct adv_entry *entry; - struct link_key *key; - - BT_DBG("%p", hdev); - - memset(&cp, 0, sizeof(cp)); - bacpy(&cp.addr, dst); - - key = hci_find_link_key_type(hdev, dst, KEY_TYPE_LTK); - if (!key) { - entry = hci_find_adv_entry(hdev, dst); - if (entry) - cp.addr_type = entry->bdaddr_type; - else - cp.addr_type = 0x00; - } else { - cp.addr_type = key->addr_type; - } - - hci_send_cmd(hdev, HCI_OP_LE_REMOVE_DEV_WHITE_LIST, sizeof(cp), &cp); -} -EXPORT_SYMBOL(hci_le_remove_dev_white_list); - void hci_acl_connect(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; @@ -1034,9 +949,6 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) if (test_bit(HCI_RAW, &hdev->flags)) return; - if (conn->type == LE_LINK) - return; - if (conn->mode != HCI_CM_SNIFF) goto timer; @@ -1102,9 +1014,6 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn) if (test_bit(HCI_RAW, &hdev->flags)) return; - if (conn->type == LE_LINK) - return; - if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) return; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4e4824a9..46cef815 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -305,12 +305,6 @@ static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt) /* Read LE buffer size */ hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); - - /* Read LE clear white list */ - hci_send_cmd(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); - - /* Read LE white list size */ - hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); } static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) @@ -549,11 +543,6 @@ int hci_dev_open(__u16 dev) hci_req_lock(hdev); - if (test_bit(HCI_UNREGISTER, &hdev->flags)) { - ret = -ENODEV; - goto done; - } - if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { ret = -ERFKILL; goto done; @@ -1570,8 +1559,6 @@ int hci_unregister_dev(struct hci_dev *hdev) BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - set_bit(HCI_UNREGISTER, &hdev->flags); - write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); write_unlock_bh(&hci_dev_list_lock); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 10687f4e..5bff34bf 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -426,16 +426,6 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); } -static void hci_cc_le_clear_white_list(struct hci_dev *hdev, - struct sk_buff *skb) -{ - __u8 status = *((__u8 *) skb->data); - - BT_DBG("%s status 0x%x", hdev->name, status); - - hci_req_complete(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, status); -} - static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_ssp_mode *rp = (void *) skb->data; @@ -923,23 +913,6 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); } -static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; - - BT_DBG("%s status 0x%x", hdev->name, rp->status); - - if (rp->status) - return; - - hdev->le_white_list_size = rp->size; - - BT_DBG("%s le white list %d", hdev->name, hdev->le_white_list_size); - - hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status); -} - static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; @@ -1784,7 +1757,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff struct hci_ev_disconn_complete *ev = (void *) skb->data; struct hci_conn *conn; - BT_DBG("%s status %d reason %d", hdev->name, ev->status, ev->reason); + BT_DBG("%s status %d", hdev->name, ev->status); if (ev->status) { hci_dev_lock(hdev); @@ -1802,7 +1775,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff conn->state = BT_CLOSED; if (conn->type == ACL_LINK || conn->type == LE_LINK) - mgmt_disconnected(hdev->id, &conn->dst, ev->reason); + mgmt_disconnected(hdev->id, &conn->dst); if (conn->type == LE_LINK) del_timer(&conn->smp_timer); @@ -1830,15 +1803,6 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s struct hci_cp_auth_requested cp; hci_remove_link_key(hdev, &conn->dst); cp.handle = cpu_to_le16(conn->handle); - /*Initiates dedicated bonding as pin or key is missing - on remote device*/ - /*In case if remote device is ssp supported, - reduce the security level to MEDIUM if it is HIGH*/ - if (conn->ssp_mode && conn->auth_initiator && - conn->io_capability != 0x03) { - conn->pending_sec_level = BT_SECURITY_HIGH; - conn->auth_type = HCI_AT_DEDICATED_BONDING_MITM; - } hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); hci_dev_unlock(hdev); @@ -2083,9 +2047,6 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk opcode = __le16_to_cpu(ev->opcode); - if (test_bit(HCI_RESET, &hdev->flags) && (opcode != HCI_OP_RESET)) - return; - switch (opcode) { case HCI_OP_INQUIRY_CANCEL: hci_cc_inquiry_cancel(hdev, skb); @@ -2252,14 +2213,6 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk hci_cc_le_read_buffer_size(hdev, skb); break; - case HCI_OP_LE_READ_WHITE_LIST_SIZE: - hci_cc_le_read_white_list_size(hdev, skb); - break; - - case HCI_OP_LE_CLEAR_WHITE_LIST: - hci_cc_le_clear_white_list(hdev, skb); - break; - case HCI_OP_READ_RSSI: hci_cc_read_rssi(hdev, skb); break; @@ -3167,23 +3120,11 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff { struct hci_ev_le_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; - u8 white_list; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); - /* Ignore event for LE cancel create conn whitelist */ - if (ev->status && !bacmp(&ev->bdaddr, BDADDR_ANY)) - goto unlock; - - if (hci_conn_hash_lookup_ba(hdev, LE_LINK, BDADDR_ANY)) - white_list = 1; - else - white_list = 0; - - BT_DBG("w_list %d", white_list); - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); if (!conn) { conn = hci_le_conn_add(hdev, &ev->bdaddr, ev->bdaddr_type); @@ -3206,48 +3147,12 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff conn->state = BT_CONNECTED; conn->disc_timeout = HCI_DISCONN_TIMEOUT; mgmt_connected(hdev->id, &ev->bdaddr, 1); - mgmt_le_conn_params(hdev->id, &ev->bdaddr, - __le16_to_cpu(ev->interval), - __le16_to_cpu(ev->latency), - __le16_to_cpu(ev->supervision_timeout)); hci_conn_hold(conn); hci_conn_hold_device(conn); hci_conn_add_sysfs(conn); - if (!white_list) - hci_proto_connect_cfm(conn, ev->status); - -unlock: - hci_dev_unlock(hdev); -} - -static inline void hci_le_conn_update_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; - struct hci_conn *conn; - - BT_DBG("%s status %d", hdev->name, ev->status); - - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_handle(hdev, - __le16_to_cpu(ev->handle)); - if (conn == NULL) { - BT_ERR("Unknown connection update"); - goto unlock; - } - - if (ev->status) { - BT_ERR("Connection update unsuccessful"); - goto unlock; - } - - mgmt_le_conn_params(hdev->id, &conn->dst, - __le16_to_cpu(ev->interval), - __le16_to_cpu(ev->latency), - __le16_to_cpu(ev->supervision_timeout)); + hci_proto_connect_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); @@ -3322,10 +3227,6 @@ static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_le_conn_complete_evt(hdev, skb); break; - case HCI_EV_LE_CONN_UPDATE_COMPLETE: - hci_le_conn_update_complete_evt(hdev, skb); - break; - case HCI_EV_LE_LTK_REQ: hci_le_ltk_request_evt(hdev, skb); break; diff --git a/net/bluetooth/hidbrcm/Kconfig b/net/bluetooth/hidbrcm/Kconfig new file mode 100644 index 00000000..dda67795 --- /dev/null +++ b/net/bluetooth/hidbrcm/Kconfig @@ -0,0 +1,6 @@ +config BT_HID_BRCM + bool "BRCM HID support" + depends on BT + help + Say Y here to compile BRCM HID support into the kernel + diff --git a/net/bluetooth/hidbrcm/Makefile b/net/bluetooth/hidbrcm/Makefile new file mode 100644 index 00000000..609a3e09 --- /dev/null +++ b/net/bluetooth/hidbrcm/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Broadcom bthid Driver +# + +obj-$(CONFIG_BT_HID_BRCM) += bthid.o + +hidbrcm-objs := bthid.o diff --git a/net/bluetooth/hidbrcm/bthid.c b/net/bluetooth/hidbrcm/bthid.c new file mode 100644 index 00000000..bb35416d --- /dev/null +++ b/net/bluetooth/hidbrcm/bthid.c @@ -0,0 +1,309 @@ +/***************************************************************************** + * + * Copyright (C) 2009-2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * A copy of the GPL is available + * at http://www.broadcom.com/licenses/GPLv2.php, or by writing to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + *****************************************************************************/ +#include +#include +#include + + +MODULE_AUTHOR("Daniel McDowell "); +MODULE_DESCRIPTION("User level driver support for Bluetooth HID input"); +MODULE_SUPPORTED_DEVICE("bthid"); +MODULE_LICENSE("GPL"); + + +#define BTHID_NAME "bthid" +#define BTHID_MINOR 224 +#define BTHID_IOCTL_RPT_DSCP 1 +#define BTHID_MAX_CTRL_BUF_LEN 508 + + +struct bthid_ctrl { + int size; + char buf[BTHID_MAX_CTRL_BUF_LEN]; +}; + +struct bthid_device { + struct input_dev *dev; + struct hid_device *hid; + int dscp_set; +}; + + +static int bthid_ll_start(struct hid_device *hid) +{ + printk(KERN_INFO "######## bthid_ll_start: hid = %p ########\n", hid); + return 0; +} + +static void bthid_ll_stop(struct hid_device *hid) +{ + printk(KERN_INFO "######## bthid_ll_stop: hid = %p ########\n", hid); +} + +static int bthid_ll_open(struct hid_device *hid) +{ + printk(KERN_INFO "######## bthid_ll_open: hid = %p ########\n", hid); + return 0; +} + +static void bthid_ll_close(struct hid_device *hid) +{ + printk(KERN_INFO "######## bthid_ll_close: hid = %p ########\n", hid); +} + +static int bthid_ll_hidinput_event(struct input_dev *dev, unsigned int type, + unsigned int code, int value) +{ + /* + printk("######## bthid_ll_hidinput_event: dev = %p, type = %d, + code = %d, value = %d ########\n", + dev, type, code, value); + */ + return 0; +} + +static int bthid_ll_parse(struct hid_device *hid) +{ + int ret; + unsigned char *buf; + struct bthid_ctrl *p_ctrl = hid->driver_data; + + printk(KERN_INFO "######## bthid_ll_parse: hid = %p ########\n", hid); + + buf = kmalloc(p_ctrl->size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, p_ctrl->buf, p_ctrl->size); + + ret = hid_parse_report(hid, buf, p_ctrl->size); + kfree(buf); + + printk(KERN_INFO "######## bthid_ll_parse: status = %d, \ + ret = %d ########\n", hid->status, ret); + + return ret; +} + +static struct hid_ll_driver bthid_ll_driver = { + .start = bthid_ll_start, + .stop = bthid_ll_stop, + .open = bthid_ll_open, + .close = bthid_ll_close, + .hidinput_input_event = bthid_ll_hidinput_event, + .parse = bthid_ll_parse, +}; + +static int bthid_open(struct inode *inode, struct file *file) +{ + struct bthid_device *p_dev; + + printk(KERN_INFO "######## bthid_open: ########\n"); + + p_dev = kzalloc(sizeof(struct bthid_device), GFP_KERNEL); + if (!p_dev) + return -ENOMEM; + + file->private_data = p_dev; + + printk(KERN_INFO "######## bthid_open: done ########\n"); + return 0; +} + +static int bthid_release(struct inode *inode, struct file *file) +{ + struct bthid_device *p_dev = file->private_data; + + printk(KERN_INFO "######## bthid_release: ########\n"); + + if (p_dev->hid) { + if (p_dev->hid->status == (HID_STAT_ADDED | HID_STAT_PARSED)) + hidinput_disconnect(p_dev->hid); + + if (p_dev->hid->driver_data != NULL) + kfree(p_dev->hid->driver_data); + + hid_destroy_device(p_dev->hid); + p_dev->hid = NULL; + } + + kfree(p_dev); + file->private_data = NULL; + + printk(KERN_INFO "######## bthid_release: done ########\n"); + return 0; +} + +static ssize_t bthid_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + unsigned char *buf; + struct bthid_device *p_dev = file->private_data; + + /* + printk("######## bthid_write: count = %d ########\n", count); + */ + + if (p_dev->dscp_set == 0) { + printk(KERN_INFO "bthid_write: Oops, HID report \ + descriptor not configured\n"); + return 0; + } + + buf = kmalloc(count + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, buffer, count)) { + kfree(buf); + return -EFAULT; + } + + if (p_dev->hid) + hid_input_report(p_dev->hid, HID_INPUT_REPORT, buf, count, 1); + + kfree(buf); + + /* + printk("######## bthid_write: done ########\n"); + */ + + return 0; +} + +static long bthid_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret; + struct bthid_ctrl *p_ctrl; + struct bthid_device *p_dev = file->private_data; + + printk(KERN_INFO "######## bthid_ioctl: cmd = %d ########\n", cmd); + + if (cmd != BTHID_IOCTL_RPT_DSCP || p_dev == NULL) + return -EINVAL; + + p_ctrl = kmalloc(sizeof(struct bthid_ctrl), GFP_KERNEL); + if (p_ctrl == NULL) + return -ENOMEM; + + if (copy_from_user(p_ctrl, (void __user *) arg, + sizeof(struct bthid_ctrl)) != 0) { + kfree(p_ctrl); + return -EFAULT; + } + + if (p_ctrl->size <= 0) { + printk(KERN_INFO "Oops: Invalid BT HID report \ + descriptor size %d\n", p_ctrl->size); + + kfree(p_ctrl); + return -EINVAL; + } + + p_dev->hid = hid_allocate_device(); + if (p_dev->hid == NULL) { + printk(KERN_INFO "Oops: Failed to allocation HID device.\n"); + + kfree(p_ctrl); + return -ENOMEM; + } + + p_dev->hid->bus = BUS_BLUETOOTH; + p_dev->hid->vendor = 0; + p_dev->hid->product = 0; + p_dev->hid->version = 0; + p_dev->hid->country = 0; + p_dev->hid->ll_driver = &bthid_ll_driver; + p_dev->hid->driver_data = p_ctrl; + + strcpy(p_dev->hid->name, "BT HID"); + + ret = hid_add_device(p_dev->hid); + + printk(KERN_INFO "hid_add_device: ret = %d, hid->status = %d\n", + ret, p_dev->hid->status); + + if (ret != 0) { + printk(KERN_INFO "Oops: Failed to add HID device"); + + kfree(p_ctrl); + hid_destroy_device(p_dev->hid); + p_dev->hid = NULL; + return -EINVAL; + } + p_dev->hid->claimed |= HID_CLAIMED_INPUT; + + if (p_dev->hid->status != (HID_STAT_ADDED | HID_STAT_PARSED)) { + printk(KERN_INFO "Oops: Failed to process HID \ + report descriptor"); + return -EINVAL; + } + + p_dev->dscp_set = 1; + + printk(KERN_INFO "######## bthid_ioctl: done ########\n"); + return 0; +} + + +static const struct file_operations bthid_fops = { + .owner = THIS_MODULE, + .open = bthid_open, + .release = bthid_release, + .write = bthid_write, + .unlocked_ioctl = bthid_ioctl, +}; + +static struct miscdevice bthid_misc = { + .name = BTHID_NAME, + .minor = BTHID_MINOR, + .fops = &bthid_fops, +}; + + +static int __init bthid_init(void) +{ + int ret; + + printk(KERN_INFO "######## bthid_init: ########\n"); + + ret = misc_register(&bthid_misc); + if (ret != 0) { + printk(KERN_INFO "Oops, failed to register Misc driver, \ + ret = %d\n", ret); + return ret; + } + + printk(KERN_INFO "######## bthid_init: done ########\n"); + + return ret; +} + +static void __exit bthid_exit(void) +{ + printk(KERN_INFO "bthid_exit:\n"); + + misc_deregister(&bthid_misc); + printk(KERN_INFO "bthid_exit: done\n"); +} + +module_init(bthid_init); +module_exit(bthid_exit); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 4d533e26..aae7cd21 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -1,7 +1,6 @@ /* HIDP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2003-2004 Marcel Holtmann - Copyright (c) 2012 Code Aurora Forum. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as @@ -92,26 +91,6 @@ static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) return NULL; } -static struct device *hidp_get_device(struct hidp_session *session) -{ - bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; - bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; - struct device *device = NULL; - struct hci_dev *hdev; - - hdev = hci_get_route(dst, src); - if (!hdev) - return NULL; - - session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); - if (session->conn) - device = &session->conn->dev; - - hci_dev_put(hdev); - - return device; -} - static void __hidp_link_session(struct hidp_session *session) { __module_get(THIS_MODULE); @@ -122,11 +101,7 @@ static void __hidp_link_session(struct hidp_session *session) static void __hidp_unlink_session(struct hidp_session *session) { - struct device *dev; - - dev = hidp_get_device(session); - if (dev) - hci_conn_put_device(session->conn); + hci_conn_put_device(session->conn); list_del(&session->list); module_put(THIS_MODULE); @@ -659,6 +634,26 @@ static int hidp_session(void *arg) return 0; } +static struct device *hidp_get_device(struct hidp_session *session) +{ + bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; + bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; + struct device *device = NULL; + struct hci_dev *hdev; + + hdev = hci_get_route(dst, src); + if (!hdev) + return NULL; + + session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); + if (session->conn) + device = &session->conn->dev; + + hci_dev_put(hdev); + + return device; +} + static int hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 4f8a2b71..a7b95d3f 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -700,9 +700,6 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d if (!skb) return; - if (conn->hcon == NULL || conn->hcon->hdev == NULL) - return; - if (lmp_no_flush_capable(conn->hcon->hdev)) flags = ACL_START_NO_FLUSH; else @@ -3290,7 +3287,7 @@ int l2cap_build_conf_req(struct sock *sk, void *data) struct l2cap_conf_rfc rfc = { .mode = pi->mode }; void *ptr = req->data; - BT_DBG("sk %p mode %d", sk, pi->mode); + BT_DBG("sk %p", sk); if (pi->num_conf_req || pi->num_conf_rsp) goto done; @@ -3316,6 +3313,7 @@ int l2cap_build_conf_req(struct sock *sk, void *data) if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) break; + rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; @@ -3465,9 +3463,6 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) BT_DBG("sk %p", sk); - if (pi->omtu > mtu) - mtu = pi->omtu; - while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&req, &type, &olen, &val); @@ -3569,8 +3564,6 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) if (pi->mode != rfc.mode) { result = L2CAP_CONF_UNACCEPT; rfc.mode = pi->mode; - if (mtu > L2CAP_DEFAULT_MTU) - pi->omtu = mtu; if (pi->num_conf_rsp == 1) return -ECONNREFUSED; @@ -5404,6 +5397,9 @@ static void l2cap_logical_link_complete(struct hci_chan *chan, u8 status) BT_DBG("sk %p", sk); + if (!sk) + return; + lock_sock(sk); if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) { @@ -5543,11 +5539,8 @@ static void l2cap_logical_link_worker(struct work_struct *work) container_of(work, struct l2cap_logical_link_work, work); struct sock *sk = log_link_work->chan->l2cap_sk; - if (sk) { - l2cap_logical_link_complete(log_link_work->chan, - log_link_work->status); - sock_put(sk); - } + l2cap_logical_link_complete(log_link_work->chan, log_link_work->status); + sock_put(sk); hci_chan_put(log_link_work->chan); kfree(log_link_work); } @@ -7249,31 +7242,14 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb) { - struct sock *sk = NULL; + struct sock *sk; struct sk_buff *skb_rsp; struct l2cap_hdr *lh; int dir; + u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0}; u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00, L2CAP_ATT_NOT_SUPPORTED}; - if (skb->data[0] == L2CAP_ATT_MTU_REQ) { - u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0}; - - skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE, - GFP_ATOMIC); - if (!skb_rsp) - goto drop; - - lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE); - lh->len = cpu_to_le16(sizeof(mtu_rsp)); - lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA); - memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp, - sizeof(mtu_rsp)); - hci_send_acl(conn->hcon, NULL, skb_rsp, 0); - - goto free_skb; - } - dir = (skb->data[0] & L2CAP_ATT_RESPONSE_BIT) ? 0 : 1; sk = l2cap_find_sock_by_fixed_cid_and_dir(cid, conn->src, @@ -7294,30 +7270,28 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, if (l2cap_pi(sk)->imtu < skb->len) goto drop; - if (!sock_queue_rcv_skb(sk, skb)) - goto done; - -drop: - if (skb->data[0] != L2CAP_ATT_INDICATE) - goto not_indicate; + if (skb->data[0] == L2CAP_ATT_MTU_REQ) { + skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE, + GFP_ATOMIC); + if (!skb_rsp) + goto drop; - /* If this is an incoming Indication, we are required to confirm */ + lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE); + lh->len = cpu_to_le16(sizeof(mtu_rsp)); + lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA); + memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp, + sizeof(mtu_rsp)); + hci_send_acl(conn->hcon, NULL, skb_rsp, 0); - skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC); - if (!skb_rsp) goto free_skb; + } - lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE); - lh->len = cpu_to_le16(sizeof(u8)); - lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA); - err_rsp[0] = L2CAP_ATT_CONFIRM; - memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8)); - hci_send_acl(conn->hcon, NULL, skb_rsp, 0); - goto free_skb; + if (!sock_queue_rcv_skb(sk, skb)) + goto done; -not_indicate: - if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT || - skb->data[0] == L2CAP_ATT_CONFIRM) +drop: + if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT && + skb->data[0] != L2CAP_ATT_INDICATE) goto free_skb; /* If this is an incoming PDU that requires a response, respond with diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 0a6c107c..4c299e61 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1172,7 +1172,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; - struct sock *sk2 = NULL; + struct sock *srv_sk = NULL; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -1180,16 +1180,15 @@ static int l2cap_sock_release(struct socket *sock) if (!sk) return 0; - /* If this is an ATT socket, find it's matching server/client */ - if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) - sk2 = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA, - &bt_sk(sk)->src, &bt_sk(sk)->dst, - l2cap_pi(sk)->incoming ? 0 : 1); + /* If this is an ATT Client socket, find the matching Server */ + if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA && !l2cap_pi(sk)->incoming) + srv_sk = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA, + &bt_sk(sk)->src, &bt_sk(sk)->dst, 1); - /* If matching socket found, request tear down */ - BT_DBG("sock:%p companion:%p", sk, sk2); - if (sk2) - l2cap_sock_set_timer(sk2, 1); + /* If server socket found, request tear down */ + BT_DBG("client:%p server:%p", sk, srv_sk); + if (srv_sk) + l2cap_sock_set_timer(srv_sk, 1); err = l2cap_sock_shutdown(sock, 2); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1fe65f44..80f4bd6d 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -227,8 +227,6 @@ static int read_controller_info(struct sock *sk, u16 index) memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); - rp.le_white_list_size = hdev->le_white_list_size; - hci_dev_unlock_bh(hdev); hci_dev_put(hdev); @@ -310,7 +308,7 @@ static void mgmt_pending_foreach(u16 opcode, int index, cmd = list_entry(p, struct pending_cmd, list); - if (opcode > 0 && cmd->opcode != opcode) + if (cmd->opcode != opcode) continue; if (index >= 0 && cmd->index != index) @@ -418,7 +416,6 @@ static u8 get_service_classes(struct hci_dev *hdev) static int update_class(struct hci_dev *hdev) { u8 cod[3]; - int err = 0; BT_DBG("%s", hdev->name); @@ -432,12 +429,7 @@ static int update_class(struct hci_dev *hdev) if (memcmp(cod, hdev->dev_class, 3) == 0) return 0; - err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); - - if (err == 0) - memcpy(hdev->dev_class, cod, 3); - - return err; + return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); } static int set_limited_discoverable(struct sock *sk, u16 index, @@ -1006,12 +998,12 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, hdev->major_class |= cp->major & MGMT_MAJOR_CLASS_MASK; hdev->minor_class = cp->minor; - if (test_bit(HCI_UP, &hdev->flags)) { + if (test_bit(HCI_UP, &hdev->flags)) err = update_class(hdev); - if (err == 0) - err = cmd_complete(sk, index, - MGMT_OP_SET_DEV_CLASS, hdev->dev_class, sizeof(u8)*3); - } else + else + err = 0; + + if (err == 0) err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); hci_dev_unlock_bh(hdev); @@ -1450,185 +1442,6 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, return err; } -static int le_add_dev_white_list(struct sock *sk, u16 index, - unsigned char *data, u16 len) -{ - struct hci_dev *hdev; - struct mgmt_cp_le_add_dev_white_list *cp; - int err = 0; - - BT_DBG(""); - - cp = (void *) data; - - if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST, - EINVAL); - - hdev = hci_dev_get(index); - if (!hdev) - return cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST, - ENODEV); - - hci_dev_lock_bh(hdev); - - if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_LE_ADD_DEV_WHITE_LIST, - ENETDOWN); - goto failed; - } - - hci_le_add_dev_white_list(hdev, &cp->bdaddr); - -failed: - hci_dev_unlock_bh(hdev); - hci_dev_put(hdev); - - return err; -} - -static int le_remove_dev_white_list(struct sock *sk, u16 index, - unsigned char *data, u16 len) -{ - struct hci_dev *hdev; - struct mgmt_cp_le_remove_dev_white_list *cp; - int err = 0; - - BT_DBG(""); - - cp = (void *) data; - - if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST, - EINVAL); - - hdev = hci_dev_get(index); - if (!hdev) - return cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST, - ENODEV); - - hci_dev_lock_bh(hdev); - - if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_LE_REMOVE_DEV_WHITE_LIST, - ENETDOWN); - goto failed; - } - - hci_le_remove_dev_white_list(hdev, &cp->bdaddr); - -failed: - hci_dev_unlock_bh(hdev); - hci_dev_put(hdev); - - return err; -} - -static int le_create_conn_white_list(struct sock *sk, u16 index) -{ - struct hci_dev *hdev; - struct hci_conn *conn; - u8 sec_level, auth_type; - struct pending_cmd *cmd; - bdaddr_t bdaddr; - int err = 0; - - BT_DBG(""); - - hdev = hci_dev_get(index); - if (!hdev) - return cmd_status(sk, index, MGMT_OP_LE_CREATE_CONN_WHITE_LIST, - ENODEV); - - hci_dev_lock_bh(hdev); - - if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, MGMT_OP_LE_CREATE_CONN_WHITE_LIST, - ENETDOWN); - goto failed; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_LE_CREATE_CONN_WHITE_LIST, index, - NULL, 0); - if (!cmd) { - err = -ENOMEM; - goto failed; - } - - sec_level = BT_SECURITY_MEDIUM; - auth_type = HCI_AT_GENERAL_BONDING; - memset(&bdaddr, 0, sizeof(bdaddr)); - conn = hci_le_connect(hdev, 0, BDADDR_ANY, sec_level, auth_type, NULL); - if (IS_ERR(conn)) { - err = PTR_ERR(conn); - mgmt_pending_remove(cmd); - } - -failed: - hci_dev_unlock_bh(hdev); - hci_dev_put(hdev); - - return err; -} - -static int le_cancel_create_conn_white_list(struct sock *sk, u16 index) -{ - struct hci_dev *hdev; - int err = 0; - - BT_DBG(""); - - hdev = hci_dev_get(index); - if (!hdev) - return cmd_status(sk, index, - MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST, ENODEV); - - hci_dev_lock_bh(hdev); - - if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, - MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST, ENETDOWN); - goto failed; - } - - hci_le_cancel_create_connect(hdev, BDADDR_ANY); - -failed: - hci_dev_unlock_bh(hdev); - hci_dev_put(hdev); - - return err; -} - -static int le_clear_white_list(struct sock *sk, u16 index) -{ - struct hci_dev *hdev; - int err; - - BT_DBG(""); - - hdev = hci_dev_get(index); - if (!hdev) - return cmd_status(sk, index, - MGMT_OP_LE_CLEAR_WHITE_LIST, ENODEV); - - hci_dev_lock_bh(hdev); - - if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_status(sk, index, - MGMT_OP_LE_CLEAR_WHITE_LIST, ENETDOWN); - goto failed; - } - - err = hci_send_cmd(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); - -failed: - hci_dev_unlock_bh(hdev); - hci_dev_put(hdev); - - return err; -} - static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, u16 len) { @@ -2316,10 +2129,9 @@ static int start_discovery(struct sock *sk, u16 index) err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); - if (err < 0) { + if (err < 0) mgmt_pending_remove(cmd); - hdev->disco_state = SCAN_IDLE; - } else if (lmp_le_capable(hdev)) { + else if (lmp_le_capable(hdev)) { cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index); if (!cmd) mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, @@ -2331,8 +2143,7 @@ static int start_discovery(struct sock *sk, u16 index) del_timer(&hdev->disco_timer); mod_timer(&hdev->disco_timer, jiffies + msecs_to_jiffies(20000)); - } else - hdev->disco_state = SCAN_BR; + } failed: hci_dev_unlock_bh(hdev); @@ -2376,7 +2187,9 @@ static int stop_discovery(struct sock *sk, u16 index) err = cmd_complete(sk, index, MGMT_OP_STOP_DISCOVERY, NULL, 0); } - } else if (state == SCAN_BR) + } + + if (err < 0) err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index); @@ -2645,23 +2458,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) case MGMT_OP_ENCRYPT_LINK: err = encrypt_link(sk, index, buf + sizeof(*hdr), len); break; - case MGMT_OP_LE_ADD_DEV_WHITE_LIST: - err = le_add_dev_white_list(sk, index, buf + sizeof(*hdr), - len); - break; - case MGMT_OP_LE_REMOVE_DEV_WHITE_LIST: - err = le_remove_dev_white_list(sk, index, buf + sizeof(*hdr), - len); - break; - case MGMT_OP_LE_CLEAR_WHITE_LIST: - err = le_clear_white_list(sk, index); - break; - case MGMT_OP_LE_CREATE_CONN_WHITE_LIST: - err = le_create_conn_white_list(sk, index); - break; - case MGMT_OP_LE_CANCEL_CREATE_CONN_WHITE_LIST: - err = le_cancel_create_conn_white_list(sk, index); - break; + default: BT_DBG("Unknown op %u", opcode); err = cmd_status(sk, index, opcode, 0x01); @@ -2678,14 +2475,6 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) return err; } -static void cmd_status_rsp(struct pending_cmd *cmd, void *data) -{ - u8 *status = data; - - cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); - mgmt_pending_remove(cmd); -} - int mgmt_index_added(u16 index) { BT_DBG("%d", index); @@ -2694,12 +2483,7 @@ int mgmt_index_added(u16 index) int mgmt_index_removed(u16 index) { - u8 status = ENODEV; - BT_DBG("%d", index); - - mgmt_pending_foreach(0, index, cmd_status_rsp, &status); - return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL); } @@ -2738,11 +2522,6 @@ int mgmt_powered(u16 index, u8 powered) mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match); - if (!powered) { - u8 status = ENETDOWN; - mgmt_pending_foreach(0, index, cmd_status_rsp, &status); - } - ev.val = powered; ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk); @@ -2821,42 +2600,13 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 bonded) int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 le) { struct mgmt_ev_connected ev; - struct pending_cmd *cmd; - struct hci_dev *hdev; - - BT_DBG("hci%u", index); - - hdev = hci_dev_get(index); - - if (!hdev) - return -ENODEV; bacpy(&ev.bdaddr, bdaddr); ev.le = le; - cmd = mgmt_pending_find(MGMT_OP_LE_CREATE_CONN_WHITE_LIST, index); - if (cmd) { - BT_ERR("mgmt_connected remove mgmt pending white_list"); - mgmt_pending_remove(cmd); - } - return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); } -int mgmt_le_conn_params(u16 index, bdaddr_t *bdaddr, u16 interval, - u16 latency, u16 timeout) -{ - struct mgmt_ev_le_conn_params ev; - - bacpy(&ev.bdaddr, bdaddr); - ev.interval = interval; - ev.latency = latency; - ev.timeout = timeout; - - return mgmt_event(MGMT_EV_LE_CONN_PARAMS, index, &ev, sizeof(ev), - NULL); -} - static void disconnect_rsp(struct pending_cmd *cmd, void *data) { struct mgmt_cp_disconnect *cp = cmd->param; @@ -2873,22 +2623,21 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_remove(cmd); } -int mgmt_disconnected(u16 index, bdaddr_t *bdaddr, u8 reason) +int mgmt_disconnected(u16 index, bdaddr_t *bdaddr) { struct mgmt_ev_disconnected ev; struct sock *sk = NULL; int err; + mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); + bacpy(&ev.bdaddr, bdaddr); - ev.reason = reason; err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk); if (sk) sock_put(sk); - mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); - return err; } diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index ae79e25b..c4b99502 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -137,21 +137,6 @@ static inline void rfcomm_schedule(void) static inline void rfcomm_session_put(struct rfcomm_session *s) { - bool match = false; - struct rfcomm_session *sess; - struct list_head *p, *n; - list_for_each_safe(p, n, &session_list) { - sess = list_entry(p, struct rfcomm_session, list); - if (s == sess) { - match = true; - break; - } - } - if (!match) { - BT_ERR("session already freed previously"); - dump_stack(); - return; - } if (atomic_dec_and_test(&s->refcnt)) rfcomm_session_del(s); } @@ -1232,18 +1217,12 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) break; case BT_DISCONN: - /* rfcomm_session_put is called later so don't do - * anything here otherwise we will mess up the session - * reference counter: - * - * (a) when we are the initiator dlc_unlink will drive - * the reference counter to 0 (there is no initial put - * after session_add) - * - * (b) when we are not the initiator rfcomm_rx_process - * will explicitly call put to balance the initial hold - * done after session add. - */ + /* When socket is closed and we are not RFCOMM + * initiator rfcomm_process_rx already calls + * rfcomm_session_put() */ + if (s->sock->sk->sk_state != BT_CLOSED) + if (list_empty(&s->dlcs)) + rfcomm_session_put(s); break; } } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 6ec7e627..f8c3bbaf 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -393,15 +393,6 @@ static void __sco_sock_close(struct sock *sk) case BT_CONNECTED: case BT_CONFIG: - if (sco_pi(sk)->conn) { - sk->sk_state = BT_DISCONN; - sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); - hci_conn_put(sco_pi(sk)->conn->hcon); - sco_pi(sk)->conn->hcon = NULL; - } else - sco_chan_del(sk, ECONNRESET); - break; - case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); @@ -811,9 +802,6 @@ static int sco_sock_shutdown(struct socket *sock, int how) if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); - else - err = bt_sock_wait_state(sk, BT_CLOSED, - SCO_DISCONN_TIMEOUT); } release_sock(sk); return err; @@ -835,11 +823,6 @@ static int sco_sock_release(struct socket *sock) lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); - } else { - lock_sock(sk); - err = bt_sock_wait_state(sk, BT_CLOSED, - SCO_DISCONN_TIMEOUT); - release_sock(sk); } sock_orphan(sk); @@ -873,9 +856,7 @@ static void sco_chan_del(struct sock *sk, int err) conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); - - if (conn->hcon) - hci_conn_put(conn->hcon); + hci_conn_put(conn->hcon); } sk->sk_state = BT_CLOSED; diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index dd09a255..31fc4dcf 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -708,6 +708,10 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) invalid_key: hcon->sec_req = FALSE; + /* Switch to Pairing Connection Parameters */ + hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL, SMP_MAX_CONN_INTERVAL, + SMP_MAX_CONN_LATENCY, SMP_SUPERVISION_TIMEOUT); + skb_pull(skb, sizeof(*rp)); memset(&cp, 0, sizeof(cp)); @@ -769,6 +773,11 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) if (hcon->link_mode & HCI_LM_MASTER) { struct smp_cmd_pairing cp; + /* Switch to Pairing Connection Parameters */ + hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL, + SMP_MAX_CONN_INTERVAL, SMP_MAX_CONN_LATENCY, + SMP_SUPERVISION_TIMEOUT); + build_pairing_cmd(conn, &cp, NULL, authreq); hcon->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&hcon->preq[1], &cp, sizeof(cp));