summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb_cgroup.c
blob: d8fb10de0f1449f0cd912aaa65d6887c338b20aa (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
/*
 *
 * Copyright IBM Corporation, 2012
 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2.1 of the GNU Lesser General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 *
 */

#include <linux/cgroup.h>
#include <linux/page_counter.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>

struct hugetlb_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for hugepages from hugetlb.
	 */
	struct page_counter hugepage[HUGE_MAX_HSTATE];
};

#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
#define MEMFILE_IDX(val)	(((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val)	((val) & 0xffff)

static struct hugetlb_cgroup *root_h_cgroup __read_mostly;

static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
{
	return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
}

static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
{
	return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
}

static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
{
	return (h_cg == root_h_cgroup);
}

static inline struct hugetlb_cgroup *
parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
{
	return hugetlb_cgroup_from_css(h_cg->css.parent);
}

static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
{
	int idx;

	for (idx = 0; idx < hugetlb_max_hstate; idx++) {
		if (page_counter_read(&h_cg->hugepage[idx]))
			return true;
	}
	return false;
}

static struct cgroup_subsys_state *
hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
	struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
	struct hugetlb_cgroup *h_cgroup;
	int idx;

	h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
	if (!h_cgroup)
		return ERR_PTR(-ENOMEM);

	if (parent_h_cgroup) {
		for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
			page_counter_init(&h_cgroup->hugepage[idx],
					  &parent_h_cgroup->hugepage[idx]);
	} else {
		root_h_cgroup = h_cgroup;
		for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
			page_counter_init(&h_cgroup->hugepage[idx], NULL);
	}
	return &h_cgroup->css;
}

static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
{
	struct hugetlb_cgroup *h_cgroup;

	h_cgroup = hugetlb_cgroup_from_css(css);
	kfree(h_cgroup);
}


/*
 * Should be called with hugetlb_lock held.
 * Since we are holding hugetlb_lock, pages cannot get moved from
 * active list or uncharged from the cgroup, So no need to get
 * page reference and test for page active here. This function
 * cannot fail.
 */
static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
				       struct page *page)
{
	unsigned int nr_pages;
	struct page_counter *counter;
	struct hugetlb_cgroup *page_hcg;
	struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);

	page_hcg = hugetlb_cgroup_from_page(page);
	/*
	 * We can have pages in active list without any cgroup
	 * ie, hugepage with less than 3 pages. We can safely
	 * ignore those pages.
	 */
	if (!page_hcg || page_hcg != h_cg)
		goto out;

	nr_pages = 1 << compound_order(page);
	if (!parent) {
		parent = root_h_cgroup;
		/* root has no limit */
		page_counter_charge(&parent->hugepage[idx], nr_pages);
	}
	counter = &h_cg->hugepage[idx];
	/* Take the pages off the local counter */
	page_counter_cancel(counter, nr_pages);

	set_hugetlb_cgroup(page, parent);
out:
	return;
}

/*
 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
 * the parent cgroup.
 */
static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
{
	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
	struct hstate *h;
	struct page *page;
	int idx = 0;

	do {
		for_each_hstate(h) {
			spin_lock(&hugetlb_lock);
			list_for_each_entry(page, &h->hugepage_activelist, lru)
				hugetlb_cgroup_move_parent(idx, h_cg, page);

			spin_unlock(&hugetlb_lock);
			idx++;
		}
		cond_resched();
	} while (hugetlb_cgroup_have_usage(h_cg));
}

int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
				 struct hugetlb_cgroup **ptr)
{
	int ret = 0;
	struct page_counter *counter;
	struct hugetlb_cgroup *h_cg = NULL;

	if (hugetlb_cgroup_disabled())
		goto done;
	/*
	 * We don't charge any cgroup if the compound page have less
	 * than 3 pages.
	 */
	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
		goto done;
again:
	rcu_read_lock();
	h_cg = hugetlb_cgroup_from_task(current);
	if (!css_tryget_online(&h_cg->css)) {
		rcu_read_unlock();
		goto again;
	}
	rcu_read_unlock();

	if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter))
		ret = -ENOMEM;
	css_put(&h_cg->css);
done:
	*ptr = h_cg;
	return ret;
}

/* Should be called with hugetlb_lock held */
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
				  struct hugetlb_cgroup *h_cg,
				  struct page *page)
{
	if (hugetlb_cgroup_disabled() || !h_cg)
		return;

	set_hugetlb_cgroup(page, h_cg);
	return;
}

/*
 * Should be called with hugetlb_lock held
 */
void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
				  struct page *page)
{
	struct hugetlb_cgroup *h_cg;

	if (hugetlb_cgroup_disabled())
		return;
	lockdep_assert_held(&hugetlb_lock);
	h_cg = hugetlb_cgroup_from_page(page);
	if (unlikely(!h_cg))
		return;
	set_hugetlb_cgroup(page, NULL);
	page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
	return;
}

void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
				    struct hugetlb_cgroup *h_cg)
{
	if (hugetlb_cgroup_disabled() || !h_cg)
		return;

	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
		return;

	page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
	return;
}

enum {
	RES_USAGE,
	RES_LIMIT,
	RES_MAX_USAGE,
	RES_FAILCNT,
};

static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
				   struct cftype *cft)
{
	struct page_counter *counter;
	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);

	counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];

	switch (MEMFILE_ATTR(cft->private)) {
	case RES_USAGE:
		return (u64)page_counter_read(counter) * PAGE_SIZE;
	case RES_LIMIT:
		return (u64)counter->limit * PAGE_SIZE;
	case RES_MAX_USAGE:
		return (u64)counter->watermark * PAGE_SIZE;
	case RES_FAILCNT:
		return counter->failcnt;
	default:
		BUG();
	}
}

static DEFINE_MUTEX(hugetlb_limit_mutex);

static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
				    char *buf, size_t nbytes, loff_t off)
{
	int ret, idx;
	unsigned long nr_pages;
	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));

	if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
		return -EINVAL;

	buf = strstrip(buf);
	ret = page_counter_memparse(buf, "-1", &nr_pages);
	if (ret)
		return ret;

	idx = MEMFILE_IDX(of_cft(of)->private);

	switch (MEMFILE_ATTR(of_cft(of)->private)) {
	case RES_LIMIT:
		mutex_lock(&hugetlb_limit_mutex);
		ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
		mutex_unlock(&hugetlb_limit_mutex);
		break;
	default:
		ret = -EINVAL;
		break;
	}
	return ret ?: nbytes;
}

static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
				    char *buf, size_t nbytes, loff_t off)
{
	int ret = 0;
	struct page_counter *counter;
	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));

	counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];

	switch (MEMFILE_ATTR(of_cft(of)->private)) {
	case RES_MAX_USAGE:
		page_counter_reset_watermark(counter);
		break;
	case RES_FAILCNT:
		counter->failcnt = 0;
		break;
	default:
		ret = -EINVAL;
		break;
	}
	return ret ?: nbytes;
}

static char *mem_fmt(char *buf, int size, unsigned long hsize)
{
	if (hsize >= (1UL << 30))
		snprintf(buf, size, "%luGB", hsize >> 30);
	else if (hsize >= (1UL << 20))
		snprintf(buf, size, "%luMB", hsize >> 20);
	else
		snprintf(buf, size, "%luKB", hsize >> 10);
	return buf;
}

static void __init __hugetlb_cgroup_file_init(int idx)
{
	char buf[32];
	struct cftype *cft;
	struct hstate *h = &hstates[idx];

	/* format the size */
	mem_fmt(buf, 32, huge_page_size(h));

	/* Add the limit file */
	cft = &h->cgroup_files[0];
	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
	cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
	cft->read_u64 = hugetlb_cgroup_read_u64;
	cft->write = hugetlb_cgroup_write;

	/* Add the usage file */
	cft = &h->cgroup_files[1];
	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
	cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
	cft->read_u64 = hugetlb_cgroup_read_u64;

	/* Add the MAX usage file */
	cft = &h->cgroup_files[2];
	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
	cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
	cft->write = hugetlb_cgroup_reset;
	cft->read_u64 = hugetlb_cgroup_read_u64;

	/* Add the failcntfile */
	cft = &h->cgroup_files[3];
	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
	cft->private  = MEMFILE_PRIVATE(idx, RES_FAILCNT);
	cft->write = hugetlb_cgroup_reset;
	cft->read_u64 = hugetlb_cgroup_read_u64;

	/* NULL terminate the last cft */
	cft = &h->cgroup_files[4];
	memset(cft, 0, sizeof(*cft));

	WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
					  h->cgroup_files));
}

void __init hugetlb_cgroup_file_init(void)
{
	struct hstate *h;

	for_each_hstate(h) {
		/*
		 * Add cgroup control files only if the huge page consists
		 * of more than two normal pages. This is because we use
		 * page[2].private for storing cgroup details.
		 */
		if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
			__hugetlb_cgroup_file_init(hstate_index(h));
	}
}

/*
 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
 * when we migrate hugepages
 */
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
{
	struct hugetlb_cgroup *h_cg;
	struct hstate *h = page_hstate(oldhpage);

	if (hugetlb_cgroup_disabled())
		return;

	VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
	spin_lock(&hugetlb_lock);
	h_cg = hugetlb_cgroup_from_page(oldhpage);
	set_hugetlb_cgroup(oldhpage, NULL);

	/* move the h_cg details to new cgroup */
	set_hugetlb_cgroup(newhpage, h_cg);
	list_move(&newhpage->lru, &h->hugepage_activelist);
	spin_unlock(&hugetlb_lock);
	return;
}

struct cgroup_subsys hugetlb_cgrp_subsys = {
	.css_alloc	= hugetlb_cgroup_css_alloc,
	.css_offline	= hugetlb_cgroup_css_offline,
	.css_free	= hugetlb_cgroup_css_free,
};
623' href='#n1623'>1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
/*********************************************************************
 *                
 * Filename:      ali-ircc.h
 * Version:       0.5
 * Description:   Driver for the ALI M1535D and M1543C FIR Controller
 * Status:        Experimental.
 * Author:        Benjamin Kong <benjamin_kong@ali.com.tw>
 * Created at:    2000/10/16 03:46PM
 * Modified at:   2001/1/3 02:55PM
 * Modified by:   Benjamin Kong <benjamin_kong@ali.com.tw>
 * Modified at:   2003/11/6 and support for ALi south-bridge chipsets M1563
 * Modified by:   Clear Zhang <clear_zhang@ali.com.tw>
 * 
 *     Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
 *     All Rights Reserved
 *      
 *     This program is free software; you can redistribute it and/or 
 *     modify it under the terms of the GNU General Public License as 
 *     published by the Free Software Foundation; either version 2 of 
 *     the License, or (at your option) any later version.
 *  
 ********************************************************************/

#include <linux/module.h>
#include <linux/gfp.h>

#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>

#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>

#include <net/irda/wrapper.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>

#include "ali-ircc.h"

#define CHIP_IO_EXTENT 8
#define BROKEN_DONGLE_ID

#define ALI_IRCC_DRIVER_NAME "ali-ircc"

/* Power Management */
static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state);
static int ali_ircc_resume(struct platform_device *dev);

static struct platform_driver ali_ircc_driver = {
	.suspend	= ali_ircc_suspend,
	.resume		= ali_ircc_resume,
	.driver		= {
		.name	= ALI_IRCC_DRIVER_NAME,
	},
};

/* Module parameters */
static int qos_mtt_bits = 0x07;  /* 1 ms or more */

/* Use BIOS settions by default, but user may supply module parameters */
static unsigned int io[]  = { ~0, ~0, ~0, ~0 };
static unsigned int irq[] = { 0, 0, 0, 0 };
static unsigned int dma[] = { 0, 0, 0, 0 };

static int  ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
static int  ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
static int  ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);

/* These are the currently known ALi south-bridge chipsets, the only one difference
 * is that M1543C doesn't support HP HDSL-3600
 */
static ali_chip_t chips[] =
{
	{ "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 },
	{ "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 },
	{ "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 },
	{ NULL }
};

/* Max 4 instances for now */
static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };

/* Dongle Types */
static char *dongle_types[] = {
	"TFDS6000",
	"HP HSDL-3600",
	"HP HSDL-1100",	
	"No dongle connected",
};

/* Some prototypes */
static int  ali_ircc_open(int i, chipio_t *info);

static int  ali_ircc_close(struct ali_ircc_cb *self);

static int  ali_ircc_setup(chipio_t *info);
static int  ali_ircc_is_receiving(struct ali_ircc_cb *self);
static int  ali_ircc_net_open(struct net_device *dev);
static int  ali_ircc_net_close(struct net_device *dev);
static int  ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);

/* SIR function */
static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
						struct net_device *dev);
static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
static void ali_ircc_sir_receive(struct ali_ircc_cb *self);
static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
static int  ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);

/* FIR function */
static netdev_tx_t  ali_ircc_fir_hard_xmit(struct sk_buff *skb,
						 struct net_device *dev);
static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
static int  ali_ircc_dma_receive(struct ali_ircc_cb *self); 
static int  ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
static int  ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
static void ali_ircc_dma_xmit(struct ali_ircc_cb *self);

/* My Function */
static int  ali_ircc_read_dongle_id (int i, chipio_t *info);
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);

/* ALi chip function */
static void SIR2FIR(int iobase);
static void FIR2SIR(int iobase);
static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);

/*
 * Function ali_ircc_init ()
 *
 *    Initialize chip. Find out whay kinds of chips we are dealing with
 *    and their configuration registers address
 */
static int __init ali_ircc_init(void)
{
	ali_chip_t *chip;
	chipio_t info;
	int ret;
	int cfg, cfg_base;
	int reg, revision;
	int i = 0;
	
	ret = platform_driver_register(&ali_ircc_driver);
        if (ret) {
		net_err_ratelimited("%s, Can't register driver!\n",
				    ALI_IRCC_DRIVER_NAME);
                return ret;
        }

	ret = -ENODEV;
	
	/* Probe for all the ALi chipsets we know about */
	for (chip= chips; chip->name; chip++, i++) 
	{
		pr_debug("%s(), Probing for %s ...\n", __func__, chip->name);
				
		/* Try all config registers for this chip */
		for (cfg=0; cfg<2; cfg++)
		{
			cfg_base = chip->cfg[cfg];
			if (!cfg_base)
				continue;
				
			memset(&info, 0, sizeof(chipio_t));
			info.cfg_base = cfg_base;
			info.fir_base = io[i];
			info.dma = dma[i];
			info.irq = irq[i];
			
			
			/* Enter Configuration */
			outb(chip->entr1, cfg_base);
			outb(chip->entr2, cfg_base);
			
			/* Select Logical Device 5 Registers (UART2) */
			outb(0x07, cfg_base);
			outb(0x05, cfg_base+1);
			
			/* Read Chip Identification Register */
			outb(chip->cid_index, cfg_base);	
			reg = inb(cfg_base+1);	
				
			if (reg == chip->cid_value)
			{
				pr_debug("%s(), Chip found at 0x%03x\n",
					 __func__, cfg_base);
					   
				outb(0x1F, cfg_base);
				revision = inb(cfg_base+1);
				pr_debug("%s(), Found %s chip, revision=%d\n",
					 __func__, chip->name, revision);
				
				/* 
				 * If the user supplies the base address, then
				 * we init the chip, if not we probe the values
				 * set by the BIOS
				 */				
				if (io[i] < 2000)
				{
					chip->init(chip, &info);
				}
				else
				{
					chip->probe(chip, &info);	
				}
				
				if (ali_ircc_open(i, &info) == 0)
					ret = 0;
				i++;				
			}
			else
			{
				pr_debug("%s(), No %s chip at 0x%03x\n",
					 __func__, chip->name, cfg_base);
			}
			/* Exit configuration */
			outb(0xbb, cfg_base);
		}
	}		
		
	if (ret)
		platform_driver_unregister(&ali_ircc_driver);

	return ret;
}

/*
 * Function ali_ircc_cleanup ()
 *
 *    Close all configured chips
 *
 */
static void __exit ali_ircc_cleanup(void)
{
	int i;

	for (i=0; i < ARRAY_SIZE(dev_self); i++) {
		if (dev_self[i])
			ali_ircc_close(dev_self[i]);
	}
	
	platform_driver_unregister(&ali_ircc_driver);

}

static const struct net_device_ops ali_ircc_sir_ops = {
	.ndo_open       = ali_ircc_net_open,
	.ndo_stop       = ali_ircc_net_close,
	.ndo_start_xmit = ali_ircc_sir_hard_xmit,
	.ndo_do_ioctl   = ali_ircc_net_ioctl,
};

static const struct net_device_ops ali_ircc_fir_ops = {
	.ndo_open       = ali_ircc_net_open,
	.ndo_stop       = ali_ircc_net_close,
	.ndo_start_xmit = ali_ircc_fir_hard_xmit,
	.ndo_do_ioctl   = ali_ircc_net_ioctl,
};

/*
 * Function ali_ircc_open (int i, chipio_t *inf)
 *
 *    Open driver instance
 *
 */
static int ali_ircc_open(int i, chipio_t *info)
{
	struct net_device *dev;
	struct ali_ircc_cb *self;
	int dongle_id;
	int err;
			
	if (i >= ARRAY_SIZE(dev_self)) {
		net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
				    __func__);
		return -ENOMEM;
	}
	
	/* Set FIR FIFO and DMA Threshold */
	if ((ali_ircc_setup(info)) == -1)
		return -1;
		
	dev = alloc_irdadev(sizeof(*self));
	if (dev == NULL) {
		net_err_ratelimited("%s(), can't allocate memory for control block!\n",
				    __func__);
		return -ENOMEM;
	}

	self = netdev_priv(dev);
	self->netdev = dev;
	spin_lock_init(&self->lock);
   
	/* Need to store self somewhere */
	dev_self[i] = self;
	self->index = i;

	/* Initialize IO */
	self->io.cfg_base  = info->cfg_base;	/* In ali_ircc_probe_53 assign 		*/
	self->io.fir_base  = info->fir_base;	/* info->sir_base = info->fir_base 	*/
	self->io.sir_base  = info->sir_base; 	/* ALi SIR and FIR use the same address */
        self->io.irq       = info->irq;
        self->io.fir_ext   = CHIP_IO_EXTENT;
        self->io.dma       = info->dma;
        self->io.fifo_size = 16;		/* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
	
	/* Reserve the ioports that we need */
	if (!request_region(self->io.fir_base, self->io.fir_ext,
			    ALI_IRCC_DRIVER_NAME)) {
		net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
				     __func__, self->io.fir_base);
		err = -ENODEV;
		goto err_out1;
	}

	/* Initialize QoS for this device */
	irda_init_max_qos_capabilies(&self->qos);
	
	/* The only value we must override it the baudrate */
	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
			
	self->qos.min_turn_time.bits = qos_mtt_bits;
			
	irda_qos_bits_to_value(&self->qos);
	
	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
	self->rx_buff.truesize = 14384; 
	self->tx_buff.truesize = 14384;

	/* Allocate memory if needed */
	self->rx_buff.head =
		dma_zalloc_coherent(NULL, self->rx_buff.truesize,
				    &self->rx_buff_dma, GFP_KERNEL);
	if (self->rx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out2;
	}
	
	self->tx_buff.head =
		dma_zalloc_coherent(NULL, self->tx_buff.truesize,
				    &self->tx_buff_dma, GFP_KERNEL);
	if (self->tx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out3;
	}

	self->rx_buff.in_frame = FALSE;
	self->rx_buff.state = OUTSIDE_FRAME;
	self->tx_buff.data = self->tx_buff.head;
	self->rx_buff.data = self->rx_buff.head;
	
	/* Reset Tx queue info */
	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
	self->tx_fifo.tail = self->tx_buff.head;

	/* Override the network functions we need to use */
	dev->netdev_ops = &ali_ircc_sir_ops;

	err = register_netdev(dev);
	if (err) {
		net_err_ratelimited("%s(), register_netdev() failed!\n",
				    __func__);
		goto err_out4;
	}
	net_info_ratelimited("IrDA: Registered device %s\n", dev->name);

	/* Check dongle id */
	dongle_id = ali_ircc_read_dongle_id(i, info);
	net_info_ratelimited("%s(), %s, Found dongle: %s\n",
			     __func__, ALI_IRCC_DRIVER_NAME,
			     dongle_types[dongle_id]);
		
	self->io.dongle_id = dongle_id;

	
	return 0;

 err_out4:
	dma_free_coherent(NULL, self->tx_buff.truesize,
			  self->tx_buff.head, self->tx_buff_dma);
 err_out3:
	dma_free_coherent(NULL, self->rx_buff.truesize,
			  self->rx_buff.head, self->rx_buff_dma);
 err_out2:
	release_region(self->io.fir_base, self->io.fir_ext);
 err_out1:
	dev_self[i] = NULL;
	free_netdev(dev);
	return err;
}


/*
 * Function ali_ircc_close (self)
 *
 *    Close driver instance
 *
 */
static int __exit ali_ircc_close(struct ali_ircc_cb *self)
{
	int iobase;

	IRDA_ASSERT(self != NULL, return -1;);

        iobase = self->io.fir_base;

	/* Remove netdevice */
	unregister_netdev(self->netdev);

	/* Release the PORT that this driver is using */
	pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
	release_region(self->io.fir_base, self->io.fir_ext);

	if (self->tx_buff.head)
		dma_free_coherent(NULL, self->tx_buff.truesize,
				  self->tx_buff.head, self->tx_buff_dma);
	
	if (self->rx_buff.head)
		dma_free_coherent(NULL, self->rx_buff.truesize,
				  self->rx_buff.head, self->rx_buff_dma);

	dev_self[self->index] = NULL;
	free_netdev(self->netdev);
	
	
	return 0;
}

/*
 * Function ali_ircc_init_43 (chip, info)
 *
 *    Initialize the ALi M1543 chip. 
 */
static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info) 
{
	/* All controller information like I/O address, DMA channel, IRQ
	 * are set by BIOS
	 */
	
	return 0;
}

/*
 * Function ali_ircc_init_53 (chip, info)
 *
 *    Initialize the ALi M1535 chip. 
 */
static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info) 
{
	/* All controller information like I/O address, DMA channel, IRQ
	 * are set by BIOS
	 */
	
	return 0;
}

/*
 * Function ali_ircc_probe_53 (chip, info)
 *    	
 *	Probes for the ALi M1535D or M1535
 */
static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
{
	int cfg_base = info->cfg_base;
	int hi, low, reg;
	
	
	/* Enter Configuration */
	outb(chip->entr1, cfg_base);
	outb(chip->entr2, cfg_base);
	
	/* Select Logical Device 5 Registers (UART2) */
	outb(0x07, cfg_base);
	outb(0x05, cfg_base+1);
	
	/* Read address control register */
	outb(0x60, cfg_base);
	hi = inb(cfg_base+1);	
	outb(0x61, cfg_base);
	low = inb(cfg_base+1);
	info->fir_base = (hi<<8) + low;
	
	info->sir_base = info->fir_base;
	
	pr_debug("%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
		
	/* Read IRQ control register */
	outb(0x70, cfg_base);
	reg = inb(cfg_base+1);
	info->irq = reg & 0x0f;
	pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
	
	/* Read DMA channel */
	outb(0x74, cfg_base);
	reg = inb(cfg_base+1);
	info->dma = reg & 0x07;
	
	if(info->dma == 0x04)
		net_warn_ratelimited("%s(), No DMA channel assigned !\n",
				     __func__);
	else
		pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
	
	/* Read Enabled Status */
	outb(0x30, cfg_base);
	reg = inb(cfg_base+1);
	info->enabled = (reg & 0x80) && (reg & 0x01);
	pr_debug("%s(), probing enabled=%d\n", __func__, info->enabled);
	
	/* Read Power Status */
	outb(0x22, cfg_base);
	reg = inb(cfg_base+1);
	info->suspended = (reg & 0x20);
	pr_debug("%s(), probing suspended=%d\n", __func__, info->suspended);
	
	/* Exit configuration */
	outb(0xbb, cfg_base);
		
	
	return 0;	
}

/*
 * Function ali_ircc_setup (info)
 *
 *    	Set FIR FIFO and DMA Threshold
 *	Returns non-negative on success.
 *
 */
static int ali_ircc_setup(chipio_t *info)
{
	unsigned char tmp;
	int version;
	int iobase = info->fir_base;
	
	
	/* Locking comments :
	 * Most operations here need to be protected. We are called before
	 * the device instance is created in ali_ircc_open(), therefore 
	 * nobody can bother us - Jean II */

	/* Switch to FIR space */
	SIR2FIR(iobase);
	
	/* Master Reset */
	outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM
	
	/* Read FIR ID Version Register */
	switch_bank(iobase, BANK3);
	version = inb(iobase+FIR_ID_VR);
	
	/* Should be 0x00 in the M1535/M1535D */
	if(version != 0x00)
	{
		net_err_ratelimited("%s, Wrong chip version %02x\n",
				    ALI_IRCC_DRIVER_NAME, version);
		return -1;
	}
	
	/* Set FIR FIFO Threshold Register */
	switch_bank(iobase, BANK1);
	outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
	
	/* Set FIR DMA Threshold Register */
	outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
	
	/* CRC enable */
	switch_bank(iobase, BANK2);
	outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR);
	
	/* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/
	
	/* Switch to Bank 0 */
	switch_bank(iobase, BANK0);
	
	tmp = inb(iobase+FIR_LCR_B);
	tmp &=~0x20; // disable SIP
	tmp |= 0x80; // these two steps make RX mode
	tmp &= 0xbf;	
	outb(tmp, iobase+FIR_LCR_B);
		
	/* Disable Interrupt */
	outb(0x00, iobase+FIR_IER);
	
	
	/* Switch to SIR space */
	FIR2SIR(iobase);
	
	net_info_ratelimited("%s, driver loaded (Benjamin Kong)\n",
			     ALI_IRCC_DRIVER_NAME);
	
	/* Enable receive interrupts */ 
	// outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
	// Turn on the interrupts in ali_ircc_net_open
	
	
	return 0;
}

/*
 * Function ali_ircc_read_dongle_id (int index, info)
 *
 * Try to read dongle identification. This procedure needs to be executed
 * once after power-on/reset. It also needs to be used whenever you suspect
 * that the user may have plugged/unplugged the IrDA Dongle.
 */
static int ali_ircc_read_dongle_id (int i, chipio_t *info)
{
	int dongle_id, reg;
	int cfg_base = info->cfg_base;
	
		
	/* Enter Configuration */
	outb(chips[i].entr1, cfg_base);
	outb(chips[i].entr2, cfg_base);
	
	/* Select Logical Device 5 Registers (UART2) */
	outb(0x07, cfg_base);
	outb(0x05, cfg_base+1);
	
	/* Read Dongle ID */
	outb(0xf0, cfg_base);
	reg = inb(cfg_base+1);	
	dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
	pr_debug("%s(), probing dongle_id=%d, dongle_types=%s\n",
		 __func__, dongle_id, dongle_types[dongle_id]);
	
	/* Exit configuration */
	outb(0xbb, cfg_base);
			
	
	return dongle_id;
}

/*
 * Function ali_ircc_interrupt (irq, dev_id, regs)
 *
 *    An interrupt from the chip has arrived. Time to do some work
 *
 */
static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct ali_ircc_cb *self;
	int ret;
		
		
	self = netdev_priv(dev);
	
	spin_lock(&self->lock);
	
	/* Dispatch interrupt handler for the current speed */
	if (self->io.speed > 115200)
		ret = ali_ircc_fir_interrupt(self);
	else
		ret = ali_ircc_sir_interrupt(self);
		
	spin_unlock(&self->lock);
	
	return ret;
}
/*
 * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self)
 *
 *    Handle MIR/FIR interrupt
 *
 */
static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
{
	__u8 eir, OldMessageCount;
	int iobase, tmp;
	
	
	iobase = self->io.fir_base;
	
	switch_bank(iobase, BANK0);	
	self->InterruptID = inb(iobase+FIR_IIR);		
	self->BusStatus = inb(iobase+FIR_BSR);	
	
	OldMessageCount = (self->LineStatus + 1) & 0x07;
	self->LineStatus = inb(iobase+FIR_LSR);	
	//self->ier = inb(iobase+FIR_IER); 		2000/12/1 04:32PM
	eir = self->InterruptID & self->ier; /* Mask out the interesting ones */ 
	
	pr_debug("%s(), self->InterruptID = %x\n", __func__, self->InterruptID);
	pr_debug("%s(), self->LineStatus = %x\n", __func__, self->LineStatus);
	pr_debug("%s(), self->ier = %x\n", __func__, self->ier);
	pr_debug("%s(), eir = %x\n", __func__, eir);
	
	/* Disable interrupts */
	 SetCOMInterrupts(self, FALSE);
	
	/* Tx or Rx Interrupt */
	
	if (eir & IIR_EOM) 
	{		
		if (self->io.direction == IO_XMIT) /* TX */
		{
			pr_debug("%s(), ******* IIR_EOM (Tx) *******\n",
				 __func__);
			
			if(ali_ircc_dma_xmit_complete(self))
			{
				if (irda_device_txqueue_empty(self->netdev)) 
				{
					/* Prepare for receive */
					ali_ircc_dma_receive(self);					
					self->ier = IER_EOM;									
				}
			}
			else
			{
				self->ier = IER_EOM; 					
			}
									
		}	
		else /* RX */
		{
			pr_debug("%s(), ******* IIR_EOM (Rx) *******\n",
				 __func__);
			
			if(OldMessageCount > ((self->LineStatus+1) & 0x07))
			{
				self->rcvFramesOverflow = TRUE;	
				pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE ********\n",
					 __func__);
			}
						
			if (ali_ircc_dma_receive_complete(self))
			{
				pr_debug("%s(), ******* receive complete ********\n",
					 __func__);
				
				self->ier = IER_EOM;				
			}
			else
			{
				pr_debug("%s(), ******* Not receive complete ********\n",
					 __func__);
				
				self->ier = IER_EOM | IER_TIMER;								
			}	
		
		}		
	}
	/* Timer Interrupt */
	else if (eir & IIR_TIMER)
	{	
		if(OldMessageCount > ((self->LineStatus+1) & 0x07))
		{
			self->rcvFramesOverflow = TRUE;	
			pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE *******\n",
				 __func__);
		}
		/* Disable Timer */
		switch_bank(iobase, BANK1);
		tmp = inb(iobase+FIR_CR);
		outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR);
		
		/* Check if this is a Tx timer interrupt */
		if (self->io.direction == IO_XMIT)
		{
			ali_ircc_dma_xmit(self);
			
			/* Interrupt on EOM */
			self->ier = IER_EOM;
									
		}
		else /* Rx */
		{
			if(ali_ircc_dma_receive_complete(self)) 
			{
				self->ier = IER_EOM;
			}
			else
			{
				self->ier = IER_EOM | IER_TIMER;
			}	
		}		
	}
	
	/* Restore Interrupt */	
	SetCOMInterrupts(self, TRUE);	
		
	return IRQ_RETVAL(eir);
}

/*
 * Function ali_ircc_sir_interrupt (irq, self, eir)
 *
 *    Handle SIR interrupt
 *
 */
static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
{
	int iobase;
	int iir, lsr;
	
	
	iobase = self->io.sir_base;

	iir = inb(iobase+UART_IIR) & UART_IIR_ID;
	if (iir) {	
		/* Clear interrupt */
		lsr = inb(iobase+UART_LSR);

		pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
			 __func__, iir, lsr, iobase);

		switch (iir) 
		{
			case UART_IIR_RLSI:
				pr_debug("%s(), RLSI\n", __func__);
				break;
			case UART_IIR_RDI:
				/* Receive interrupt */
				ali_ircc_sir_receive(self);
				break;
			case UART_IIR_THRI:
				if (lsr & UART_LSR_THRE)
				{
					/* Transmitter ready for data */
					ali_ircc_sir_write_wakeup(self);				
				}				
				break;
			default:
				pr_debug("%s(), unhandled IIR=%#x\n",
					 __func__, iir);
				break;
		} 
		
	}
	
	
	return IRQ_RETVAL(iir);
}


/*
 * Function ali_ircc_sir_receive (self)
 *
 *    Receive one frame from the infrared port
 *
 */
static void ali_ircc_sir_receive(struct ali_ircc_cb *self) 
{
	int boguscount = 0;
	int iobase;
	
	IRDA_ASSERT(self != NULL, return;);

	iobase = self->io.sir_base;

	/*  
	 * Receive all characters in Rx FIFO, unwrap and unstuff them. 
         * async_unwrap_char will deliver all found frames  
	 */
	do {
		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
				  inb(iobase+UART_RX));

		/* Make sure we don't stay here too long */
		if (boguscount++ > 32) {
			pr_debug("%s(), breaking!\n", __func__);
			break;
		}
	} while (inb(iobase+UART_LSR) & UART_LSR_DR);	
	
}

/*
 * Function ali_ircc_sir_write_wakeup (tty)
 *
 *    Called by the driver when there's room for more data.  If we have
 *    more packets to send, we send them here.
 *
 */
static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
{
	int actual = 0;
	int iobase;	

	IRDA_ASSERT(self != NULL, return;);

	
	iobase = self->io.sir_base;

	/* Finished with frame?  */
	if (self->tx_buff.len > 0)  
	{
		/* Write data left in transmit buffer */
		actual = ali_ircc_sir_write(iobase, self->io.fifo_size, 
				      self->tx_buff.data, self->tx_buff.len);
		self->tx_buff.data += actual;
		self->tx_buff.len  -= actual;
	} 
	else 
	{
		if (self->new_speed) 
		{
			/* We must wait until all data are gone */
			while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
				pr_debug("%s(), UART_LSR_THRE\n", __func__);
			
			pr_debug("%s(), Changing speed! self->new_speed = %d\n",
				 __func__, self->new_speed);
			ali_ircc_change_speed(self, self->new_speed);
			self->new_speed = 0;			
			
			// benjamin 2000/11/10 06:32PM
			if (self->io.speed > 115200)
			{
				pr_debug("%s(), ali_ircc_change_speed from UART_LSR_TEMT\n",
					 __func__);
					
				self->ier = IER_EOM;
				// SetCOMInterrupts(self, TRUE);							
				return;							
			}
		}
		else
		{
			netif_wake_queue(self->netdev);	
		}
			
		self->netdev->stats.tx_packets++;
		
		/* Turn on receive interrupts */
		outb(UART_IER_RDI, iobase+UART_IER);
	}
		
}

static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
{
	struct net_device *dev = self->netdev;
	int iobase;
	
	
	pr_debug("%s(), setting speed = %d\n", __func__, baud);
	
	/* This function *must* be called with irq off and spin-lock.
	 * - Jean II */

	iobase = self->io.fir_base;
	
	SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
	
	/* Go to MIR, FIR Speed */
	if (baud > 115200)
	{
		
					
		ali_ircc_fir_change_speed(self, baud);			
		
		/* Install FIR xmit handler*/
		dev->netdev_ops = &ali_ircc_fir_ops;
				
		/* Enable Interuupt */
		self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM					
				
		/* Be ready for incoming frames */
		ali_ircc_dma_receive(self);	// benajmin 2000/11/8 07:46PM not complete
	}	
	/* Go to SIR Speed */
	else
	{
		ali_ircc_sir_change_speed(self, baud);
				
		/* Install SIR xmit handler*/
		dev->netdev_ops = &ali_ircc_sir_ops;
	}
	
		
	SetCOMInterrupts(self, TRUE);	// 2000/11/24 11:43AM
		
	netif_wake_queue(self->netdev);	
	
}

static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
{
		
	int iobase; 
	struct ali_ircc_cb *self = priv;
	struct net_device *dev;

		
	IRDA_ASSERT(self != NULL, return;);

	dev = self->netdev;
	iobase = self->io.fir_base;
	
	pr_debug("%s(), self->io.speed = %d, change to speed = %d\n",
		 __func__, self->io.speed, baud);
	
	/* Come from SIR speed */
	if(self->io.speed <=115200)
	{
		SIR2FIR(iobase);
	}
		
	/* Update accounting for new speed */
	self->io.speed = baud;
		
	// Set Dongle Speed mode
	ali_ircc_change_dongle_speed(self, baud);
		
}

/*
 * Function ali_sir_change_speed (self, speed)
 *
 *    Set speed of IrDA port to specified baudrate
 *
 */
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
{
	struct ali_ircc_cb *self = priv;
	int iobase; 
	int fcr;    /* FIFO control reg */
	int lcr;    /* Line control reg */
	int divisor;

	
	pr_debug("%s(), Setting speed to: %d\n", __func__, speed);

	IRDA_ASSERT(self != NULL, return;);

	iobase = self->io.sir_base;
	
	/* Come from MIR or FIR speed */
	if(self->io.speed >115200)
	{	
		// Set Dongle Speed mode first
		ali_ircc_change_dongle_speed(self, speed);
			
		FIR2SIR(iobase);
	}
		
	// Clear Line and Auxiluary status registers 2000/11/24 11:47AM
		
	inb(iobase+UART_LSR);
	inb(iobase+UART_SCR);
		
	/* Update accounting for new speed */
	self->io.speed = speed;

	divisor = 115200/speed;
	
	fcr = UART_FCR_ENABLE_FIFO;

	/* 
	 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
	 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
	 * about this timeout since it will always be fast enough. 
	 */
	if (self->io.speed < 38400)
		fcr |= UART_FCR_TRIGGER_1;
	else 
		fcr |= UART_FCR_TRIGGER_14;
        
	/* IrDA ports use 8N1 */
	lcr = UART_LCR_WLEN8;
	
	outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
	outb(divisor & 0xff,      iobase+UART_DLL); /* Set speed */
	outb(divisor >> 8,	  iobase+UART_DLM);
	outb(lcr,		  iobase+UART_LCR); /* Set 8N1	*/
	outb(fcr,		  iobase+UART_FCR); /* Enable FIFO's */

	/* without this, the connection will be broken after come back from FIR speed,
	   but with this, the SIR connection is harder to established */
	outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
}

static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
{
	
	struct ali_ircc_cb *self = priv;
	int iobase,dongle_id;
	int tmp = 0;
			
	
	iobase = self->io.fir_base; 	/* or iobase = self->io.sir_base; */
	dongle_id = self->io.dongle_id;
	
	/* We are already locked, no need to do it again */
		
	pr_debug("%s(), Set Speed for %s , Speed = %d\n",
		 __func__, dongle_types[dongle_id], speed);
	
	switch_bank(iobase, BANK2);
	tmp = inb(iobase+FIR_IRDA_CR);
		
	/* IBM type dongle */
	if(dongle_id == 0)
	{				
		if(speed == 4000000)
		{
			//	      __ __	
			// SD/MODE __|     |__ __
			//               __ __ 
			// IRTX    __ __|     |__
			//         T1 T2 T3 T4 T5
			
			tmp &=  ~IRDA_CR_HDLC;		// HDLC=0
			tmp |= IRDA_CR_CRC;	   	// CRC=1
			
			switch_bank(iobase, BANK2);
			outb(tmp, iobase+FIR_IRDA_CR);
			
      			// T1 -> SD/MODE:0 IRTX:0
      			tmp &= ~0x09;
      			tmp |= 0x02;
      			outb(tmp, iobase+FIR_IRDA_CR);
      			udelay(2);
      			
      			// T2 -> SD/MODE:1 IRTX:0
      			tmp &= ~0x01;
      			tmp |= 0x0a;
      			outb(tmp, iobase+FIR_IRDA_CR);
      			udelay(2);
      			
      			// T3 -> SD/MODE:1 IRTX:1
      			tmp |= 0x0b;
      			outb(tmp, iobase+FIR_IRDA_CR);
      			udelay(2);
      			
      			// T4 -> SD/MODE:0 IRTX:1
      			tmp &= ~0x08;
      			tmp |= 0x03;
      			outb(tmp, iobase+FIR_IRDA_CR);
      			udelay(2);
      			
      			// T5 -> SD/MODE:0 IRTX:0
      			tmp &= ~0x09;
      			tmp |= 0x02;
      			outb(tmp, iobase+FIR_IRDA_CR);
      			udelay(2);
      			
      			// reset -> Normal TX output Signal
      			outb(tmp & ~0x02, iobase+FIR_IRDA_CR);      			
		}
		else /* speed <=1152000 */
		{	
			//	      __	
			// SD/MODE __|  |__
			//
			// IRTX    ________
			//         T1 T2 T3  
			
			/* MIR 115200, 57600 */
			if (speed==1152000)
			{
				tmp |= 0xA0;	   //HDLC=1, 1.152Mbps=1
      			}
      			else
      			{
				tmp &=~0x80;	   //HDLC 0.576Mbps
				tmp |= 0x20;	   //HDLC=1,
      			}			
      			
      			tmp |= IRDA_CR_CRC;	   	// CRC=1
      			
      			switch_bank(iobase, BANK2);
      			outb(tmp, iobase+FIR_IRDA_CR);
						
			/* MIR 115200, 57600 */	
						
			//switch_bank(iobase, BANK2);			
			// T1 -> SD/MODE:0 IRTX:0
      			tmp &= ~0x09;
      			tmp |= 0x02;
      			outb(tmp, iobase+FIR_IRDA_CR);
      			udelay(2);
      			
      			// T2 -> SD/MODE:1 IRTX:0
      			tmp &= ~0x01;     
      			tmp |= 0x0a;      
      			outb(tmp, iobase+FIR_IRDA_CR);
      			
      			// T3 -> SD/MODE:0 IRTX:0
      			tmp &= ~0x09;
      			tmp |= 0x02;
      			outb(tmp, iobase+FIR_IRDA_CR);
      			udelay(2);
      			
      			// reset -> Normal TX output Signal
      			outb(tmp & ~0x02, iobase+FIR_IRDA_CR);      						
		}		
	}
	else if (dongle_id == 1) /* HP HDSL-3600 */
	{
		switch(speed)
		{
		case 4000000:
			tmp &=  ~IRDA_CR_HDLC;	// HDLC=0
			break;	
			
		case 1152000:
			tmp |= 0xA0;	   	// HDLC=1, 1.152Mbps=1
      			break;
      			
      		case 576000:
      			tmp &=~0x80;	   	// HDLC 0.576Mbps
			tmp |= 0x20;	   	// HDLC=1,
			break;
      		}			
			
		tmp |= IRDA_CR_CRC;	   	// CRC=1
			
		switch_bank(iobase, BANK2);
      		outb(tmp, iobase+FIR_IRDA_CR);		
	}
	else /* HP HDSL-1100 */
	{
		if(speed <= 115200) /* SIR */
		{
			
			tmp &= ~IRDA_CR_FIR_SIN;	// HP sin select = 0
			
			switch_bank(iobase, BANK2);
      			outb(tmp, iobase+FIR_IRDA_CR);			
		}
		else /* MIR FIR */
		{	
			
			switch(speed)
			{
			case 4000000:
				tmp &=  ~IRDA_CR_HDLC;	// HDLC=0
				break;	
			
			case 1152000:
				tmp |= 0xA0;	   	// HDLC=1, 1.152Mbps=1
      				break;
      			
      			case 576000:
      				tmp &=~0x80;	   	// HDLC 0.576Mbps
				tmp |= 0x20;	   	// HDLC=1,
				break;
      			}			
			
			tmp |= IRDA_CR_CRC;	   	// CRC=1
			tmp |= IRDA_CR_FIR_SIN;		// HP sin select = 1
			
			switch_bank(iobase, BANK2);
      			outb(tmp, iobase+FIR_IRDA_CR);			
		}
	}
			
	switch_bank(iobase, BANK0);
	
}

/*
 * Function ali_ircc_sir_write (driver)
 *
 *    Fill Tx FIFO with transmit data
 *
 */
static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
{
	int actual = 0;
	
		
	/* Tx FIFO should be empty! */
	if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
		pr_debug("%s(), failed, fifo not empty!\n", __func__);
		return 0;
	}
        
	/* Fill FIFO with current frame */
	while ((fifo_size-- > 0) && (actual < len)) {
		/* Transmit next byte */
		outb(buf[actual], iobase+UART_TX);

		actual++;
	}
	
	return actual;
}

/*
 * Function ali_ircc_net_open (dev)
 *
 *    Start the device
 *
 */
static int ali_ircc_net_open(struct net_device *dev)
{
	struct ali_ircc_cb *self;
	int iobase;
	char hwname[32];
		
	
	IRDA_ASSERT(dev != NULL, return -1;);
	
	self = netdev_priv(dev);
	
	IRDA_ASSERT(self != NULL, return 0;);
	
	iobase = self->io.fir_base;
	
	/* Request IRQ and install Interrupt Handler */
	if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev)) 
	{
		net_warn_ratelimited("%s, unable to allocate irq=%d\n",
				     ALI_IRCC_DRIVER_NAME, self->io.irq);
		return -EAGAIN;
	}
	
	/*
	 * Always allocate the DMA channel after the IRQ, and clean up on 
	 * failure.
	 */
	if (request_dma(self->io.dma, dev->name)) {
		net_warn_ratelimited("%s, unable to allocate dma=%d\n",
				     ALI_IRCC_DRIVER_NAME, self->io.dma);
		free_irq(self->io.irq, dev);
		return -EAGAIN;
	}
	
	/* Turn on interrups */
	outb(UART_IER_RDI , iobase+UART_IER);

	/* Ready to play! */
	netif_start_queue(dev); //benjamin by irport
	
	/* Give self a hardware name */
	sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base);

	/* 
	 * Open new IrLAP layer instance, now that everything should be
	 * initialized properly 
	 */
	self->irlap = irlap_open(dev, &self->qos, hwname);
		
	
	return 0;
}

/*
 * Function ali_ircc_net_close (dev)
 *
 *    Stop the device
 *
 */
static int ali_ircc_net_close(struct net_device *dev)
{	

	struct ali_ircc_cb *self;
	//int iobase;
			
		
	IRDA_ASSERT(dev != NULL, return -1;);

	self = netdev_priv(dev);
	IRDA_ASSERT(self != NULL, return 0;);

	/* Stop device */
	netif_stop_queue(dev);
	
	/* Stop and remove instance of IrLAP */
	if (self->irlap)
		irlap_close(self->irlap);
	self->irlap = NULL;
		
	disable_dma(self->io.dma);

	/* Disable interrupts */
	SetCOMInterrupts(self, FALSE);
	       
	free_irq(self->io.irq, dev);
	free_dma(self->io.dma);

	
	return 0;
}

/*
 * Function ali_ircc_fir_hard_xmit (skb, dev)
 *
 *    Transmit the frame
 *
 */
static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
						struct net_device *dev)
{
	struct ali_ircc_cb *self;
	unsigned long flags;
	int iobase;
	__u32 speed;
	int mtt, diff;
	
	
	self = netdev_priv(dev);
	iobase = self->io.fir_base;

	netif_stop_queue(dev);
	
	/* Make sure tests *& speed change are atomic */
	spin_lock_irqsave(&self->lock, flags);
	
	/* Note : you should make sure that speed changes are not going
	 * to corrupt any outgoing frame. Look at nsc-ircc for the gory
	 * details - Jean II */

	/* Check if we need to change the speed */
	speed = irda_get_next_speed(skb);
	if ((speed != self->io.speed) && (speed != -1)) {
		/* Check for empty frame */
		if (!skb->len) {
			ali_ircc_change_speed(self, speed); 
			dev->trans_start = jiffies;
			spin_unlock_irqrestore(&self->lock, flags);
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		} else
			self->new_speed = speed;
	}

	/* Register and copy this frame to DMA memory */
	self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
	self->tx_fifo.tail += skb->len;

	dev->stats.tx_bytes += skb->len;

	skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
		      skb->len);
	self->tx_fifo.len++;
	self->tx_fifo.free++;

	/* Start transmit only if there is currently no transmit going on */
	if (self->tx_fifo.len == 1) 
	{
		/* Check if we must wait the min turn time or not */
		mtt = irda_get_mtt(skb);
				
		if (mtt) 
		{
			/* Check how much time we have used already */
			diff = ktime_us_delta(ktime_get(), self->stamp);
			/* self->stamp is set from ali_ircc_dma_receive_complete() */
							
			pr_debug("%s(), ******* diff = %d *******\n",
				 __func__, diff);

			/* Check if the mtt is larger than the time we have
			 * already used by all the protocol processing
			 */
			if (mtt > diff)
			{				
				mtt -= diff;
								
				/* 
				 * Use timer if delay larger than 1000 us, and
				 * use udelay for smaller values which should
				 * be acceptable
				 */
				if (mtt > 500) 
				{
					/* Adjust for timer resolution */
					mtt = (mtt+250) / 500; 	/* 4 discard, 5 get advanced, Let's round off */
					
					pr_debug("%s(), ************** mtt = %d ***********\n",
						 __func__, mtt);
					
					/* Setup timer */
					if (mtt == 1) /* 500 us */
					{
						switch_bank(iobase, BANK1);
						outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR);
					}	
					else if (mtt == 2) /* 1 ms */
					{
						switch_bank(iobase, BANK1);
						outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR);
					}					
					else /* > 2ms -> 4ms */
					{
						switch_bank(iobase, BANK1);
						outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR);
					}
					
					
					/* Start timer */
					outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
					self->io.direction = IO_XMIT;
					
					/* Enable timer interrupt */
					self->ier = IER_TIMER;
					SetCOMInterrupts(self, TRUE);					
					
					/* Timer will take care of the rest */
					goto out; 
				} 
				else
					udelay(mtt);
			} // if (if (mtt > diff)
		}// if (mtt) 
				
		/* Enable EOM interrupt */
		self->ier = IER_EOM;
		SetCOMInterrupts(self, TRUE);
		
		/* Transmit frame */
		ali_ircc_dma_xmit(self);
	} // if (self->tx_fifo.len == 1) 
	
 out:
 	
	/* Not busy transmitting anymore if window is not full */
	if (self->tx_fifo.free < MAX_TX_WINDOW)
		netif_wake_queue(self->netdev);
	
	/* Restore bank register */
	switch_bank(iobase, BANK0);

	dev->trans_start = jiffies;
	spin_unlock_irqrestore(&self->lock, flags);
	dev_kfree_skb(skb);

	return NETDEV_TX_OK;	
}


static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
{
	int iobase, tmp;
	unsigned char FIFO_OPTI, Hi, Lo;
	
	
	
	iobase = self->io.fir_base;
	
	/* FIFO threshold , this method comes from NDIS5 code */
	
	if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold)
		FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1;
	else
		FIFO_OPTI = TX_FIFO_Threshold;
	
	/* Disable DMA */
	switch_bank(iobase, BANK1);
	outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
	
	self->io.direction = IO_XMIT;
	
	irda_setup_dma(self->io.dma, 
		       ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
			self->tx_buff.head) + self->tx_buff_dma,
		       self->tx_fifo.queue[self->tx_fifo.ptr].len, 
		       DMA_TX_MODE);
		
	/* Reset Tx FIFO */
	switch_bank(iobase, BANK0);
	outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
	
	/* Set Tx FIFO threshold */
	if (self->fifo_opti_buf!=FIFO_OPTI) 
	{
		switch_bank(iobase, BANK1);
	    	outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ;
	    	self->fifo_opti_buf=FIFO_OPTI;
	}
	
	/* Set Tx DMA threshold */
	switch_bank(iobase, BANK1);
	outb(TX_DMA_Threshold, iobase+FIR_DMA_TR);
	
	/* Set max Tx frame size */
	Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f;
	Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff;
	switch_bank(iobase, BANK2);
	outb(Hi, iobase+FIR_TX_DSR_HI);
	outb(Lo, iobase+FIR_TX_DSR_LO);
	
	/* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */
	switch_bank(iobase, BANK0);