summaryrefslogtreecommitdiffstats
path: root/drivers/staging/greybus/bundle.c
blob: 885461598c94cd8595581447ab6dbfe10783f440 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
/*
 * Greybus bundles
 *
 * Copyright 2014 Google Inc.
 * Copyright 2014 Linaro Ltd.
 *
 * Released under the GPLv2 only.
 */

#include "greybus.h"

static void gb_bundle_connections_exit(struct gb_bundle *bundle);
static int gb_bundle_connections_init(struct gb_bundle *bundle);


static ssize_t device_id_show(struct device *dev, struct device_attribute *attr,
			      char *buf)
{
	struct gb_bundle *bundle = to_gb_bundle(dev);

	return sprintf(buf, "%d", bundle->device_id);
}
static DEVICE_ATTR_RO(device_id);

static struct attribute *bundle_attrs[] = {
	&dev_attr_device_id.attr,
	NULL,
};

ATTRIBUTE_GROUPS(bundle);

static void gb_bundle_release(struct device *dev)
{
	struct gb_bundle *bundle = to_gb_bundle(dev);

	kfree(bundle);
}

struct device_type greybus_bundle_type = {
	.name =		"greybus_bundle",
	.release =	gb_bundle_release,
};


/* XXX This could be per-host device or per-module */
static DEFINE_SPINLOCK(gb_bundles_lock);

/*
 * Create a gb_bundle structure to represent a discovered
 * bundle.  Returns a pointer to the new bundle or a null
 * pointer if a failure occurs due to memory exhaustion.
 */
struct gb_bundle *gb_bundle_create(struct gb_interface_block *gb_ib, u8 interface_id)
{
	struct gb_bundle *bundle;
	int retval;

	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
	if (!bundle)
		return NULL;

	bundle->gb_ib = gb_ib;
	bundle->id = interface_id;
	bundle->device_id = 0xff;	/* Invalid device id to start with */
	INIT_LIST_HEAD(&bundle->connections);

	/* Build up the bundle device structures and register it with the
	 * driver core */
	bundle->dev.parent = &gb_ib->dev;
	bundle->dev.bus = &greybus_bus_type;
	bundle->dev.type = &greybus_bundle_type;
	bundle->dev.groups = bundle_groups;
	device_initialize(&bundle->dev);
	dev_set_name(&bundle->dev, "%d:%d", gb_ib->module_id, interface_id);

	retval = device_add(&bundle->dev);
	if (retval) {
		pr_err("failed to add bundle device for id 0x%02hhx\n",
			interface_id);
		put_device(&bundle->dev);
		kfree(bundle);
		return NULL;
	}

	spin_lock_irq(&gb_bundles_lock);
	list_add_tail(&bundle->links, &gb_ib->interfaces);
	spin_unlock_irq(&gb_bundles_lock);

	return bundle;
}

/*
 * Tear down a previously set up bundle.
 */
void gb_bundle_destroy(struct gb_interface_block *gb_ib)
{
	struct gb_bundle *bundle;
	struct gb_bundle *temp;

	if (WARN_ON(!gb_ib))
		return;

	spin_lock_irq(&gb_bundles_lock);
	list_for_each_entry_safe(bundle, temp, &gb_ib->interfaces, links) {
		list_del(&bundle->links);
		gb_bundle_connections_exit(bundle);
		device_del(&bundle->dev);
	}
	spin_unlock_irq(&gb_bundles_lock);
}

int gb_bundle_init(struct gb_interface_block *gb_ib, u8 bundle_id, u8 device_id)
{
	struct gb_bundle *bundle;
	int ret;

	bundle = gb_bundle_find(gb_ib, bundle_id);
	if (!bundle) {
		dev_err(gb_ib->hd->parent, "bundle %hhu not found\n",
			bundle_id);
		return -ENOENT;
	}
	bundle->device_id = device_id;

	ret = svc_set_route_send(bundle, gb_ib->hd);
	if (ret) {
		dev_err(gb_ib->hd->parent, "failed to set route (%d)\n", ret);
		return ret;
	}

	ret = gb_bundle_connections_init(bundle);
	if (ret) {
		dev_err(gb_ib->hd->parent, "interface bundle init error %d\n",
			ret);
		/* XXX clear route */
		return ret;
	}

	return 0;
}

struct gb_bundle *gb_bundle_find(struct gb_interface_block *gb_ib, u8 bundle_id)
{
	struct gb_bundle *bundle;

	spin_lock_irq(&gb_bundles_lock);
	list_for_each_entry(bundle, &gb_ib->interfaces, links)
		if (bundle->id == bundle_id) {
			spin_unlock_irq(&gb_bundles_lock);
			return bundle;
		}
	spin_unlock_irq(&gb_bundles_lock);

	return NULL;
}

static int gb_bundle_connections_init(struct gb_bundle *bundle)
{
	struct gb_connection *connection;
	int ret = 0;

	list_for_each_entry(connection, &bundle->connections, bundle_links) {
		ret = gb_connection_init(connection);
		if (ret)
			break;
	}

	return ret;
}

static void gb_bundle_connections_exit(struct gb_bundle *bundle)
{
	struct gb_connection *connection;
	struct gb_connection *next;

	list_for_each_entry_safe(connection, next, &bundle->connections,
				 bundle_links) {
		gb_connection_exit(connection);
		gb_connection_destroy(connection);
	}
}