Skip to content

Commit 0de4267

Browse files
authored
Merge pull request #120 from TheBlueMatt/main
Update to 0.0.112 (with RGS)
2 parents 384d4ce + e0fec34 commit 0de4267

File tree

611 files changed

+26028
-9269
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

611 files changed

+26028
-9269
lines changed

.github/workflows/build.yml

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -39,11 +39,11 @@ jobs:
3939
# Note this is a different endpoint, as we need one non-upstream commit!
4040
git clone https://git.bitcoin.ninja/rust-lightning
4141
cd rust-lightning
42-
git checkout origin/2022-09-111-java-bindings
42+
git checkout origin/2022-10-112-java-bindings
4343
cd ..
4444
git clone https://github.com/lightningdevkit/ldk-c-bindings
4545
cd ldk-c-bindings
46-
git checkout 0.0.111
46+
git checkout 0.0.112
4747
- name: Rebuild C bindings without STD
4848
run: |
4949
cd ldk-c-bindings
@@ -130,11 +130,11 @@ jobs:
130130
# Note this is a different endpoint, as we need one non-upstream commit!
131131
git clone https://git.bitcoin.ninja/rust-lightning
132132
cd rust-lightning
133-
git checkout origin/2022-09-111-java-bindings
133+
git checkout origin/2022-10-112-java-bindings
134134
cd ..
135135
git clone https://github.com/lightningdevkit/ldk-c-bindings
136136
cd ldk-c-bindings
137-
git checkout 0.0.111
137+
git checkout 0.0.112
138138
- name: Rebuild C bindings, and check the sample app builds + links
139139
run: |
140140
cd ldk-c-bindings
@@ -185,11 +185,11 @@ jobs:
185185
# Note this is a different endpoint, as we need one non-upstream commit!
186186
git clone https://git.bitcoin.ninja/rust-lightning
187187
cd rust-lightning
188-
git checkout origin/2022-09-111-java-bindings
188+
git checkout origin/2022-10-112-java-bindings
189189
cd ..
190190
git clone https://github.com/lightningdevkit/ldk-c-bindings
191191
cd ldk-c-bindings
192-
git checkout 0.0.111
192+
git checkout 0.0.112
193193
- name: Rebuild C bindings, and check the sample app builds + links
194194
run: |
195195
cd ldk-c-bindings
@@ -288,11 +288,11 @@ jobs:
288288
# Note this is a different endpoint, as we need one non-upstream commit!
289289
git clone https://git.bitcoin.ninja/rust-lightning
290290
cd rust-lightning
291-
git checkout origin/2022-09-111-java-bindings
291+
git checkout origin/2022-10-112-java-bindings
292292
cd ..
293293
git clone https://github.com/lightningdevkit/ldk-c-bindings
294294
cd ldk-c-bindings
295-
git checkout 0.0.111
295+
git checkout 0.0.112
296296
- name: Checkout Android AAR binaries and artifacts
297297
run: |
298298
# Gitweb only allows snapshots of folders by providing the object hash, which we have to extract:
@@ -368,11 +368,11 @@ jobs:
368368
# Note this is a different endpoint, as we need one non-upstream commit!
369369
git clone https://git.bitcoin.ninja/rust-lightning
370370
cd rust-lightning
371-
git checkout origin/2022-09-111-java-bindings
371+
git checkout origin/2022-10-112-java-bindings
372372
cd ..
373373
git clone https://github.com/lightningdevkit/ldk-c-bindings
374374
cd ldk-c-bindings
375-
git checkout 0.0.111
375+
git checkout 0.0.112
376376
- name: Rebuild C bindings with upstream clang, and check the sample app builds + links
377377
run: |
378378
export PATH=`pwd`/clang+llvm-14.0.5-x86_64-apple-darwin/bin:$PATH

gen_type_mapping.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -190,10 +190,19 @@ def _do_map_type_with_info(self, ty_info, print_void, ret_arr_len, is_free, hold
190190
if subty.to_hu_conv is not None:
191191
to_hu_conv = self.consts.var_decl_statement(self.consts.c_type_map["uint32_t"][0], conv_name + "_len", self.consts.get_java_arr_len(arr_name)) + ";\n"
192192
to_hu_conv += self.consts.var_decl_statement(ty_info.java_hu_ty, conv_name + "_arr", self.consts.constr_hu_array(ty_info, conv_name + "_len"))
193-
to_hu_conv += ";\n" + self.consts.for_n_in_range(idxc, "0", conv_name + "_len") + "\n"
194-
to_hu_conv += "\t" + self.consts.var_decl_statement(subty.java_ty, conv_name, self.consts.get_java_arr_elem(subty, arr_name, idxc)) + ";\n"
195-
to_hu_conv += "\t" + subty.to_hu_conv.replace("\n", "\n\t") + "\n"
196-
to_hu_conv += "\t" + conv_name + "_arr[" + idxc + "] = " + subty.to_hu_conv_name + ";\n}"
193+
to_hu_conv += ";\n"
194+
pfx = ""
195+
if is_nullable:
196+
to_hu_conv += "if (" + arr_name + " != null) {\n"
197+
pfx = "\t"
198+
to_hu_conv += pfx + self.consts.for_n_in_range(idxc, "0", conv_name + "_len") + "\n"
199+
200+
to_hu_conv += pfx + "\t" + self.consts.var_decl_statement(subty.java_ty, conv_name, self.consts.get_java_arr_elem(subty, arr_name, idxc)) + ";\n"
201+
to_hu_conv += pfx + "\t" + subty.to_hu_conv.replace("\n", "\n\t" + pfx) + "\n"
202+
to_hu_conv += pfx + "\t" + conv_name + "_arr[" + idxc + "] = " + subty.to_hu_conv_name + ";\n"
203+
to_hu_conv += pfx + "}"
204+
if is_nullable:
205+
to_hu_conv += "\n}"
197206
cleanup = self.consts.cleanup_converted_native_array(ty_info, arr_name)
198207
if cleanup is not None:
199208
to_hu_conv += "\n" + cleanup
@@ -209,7 +218,10 @@ def _do_map_type_with_info(self, ty_info, print_void, ret_arr_len, is_free, hold
209218
hu_conv_b = ""
210219
if subty.from_hu_conv[1] != "":
211220
iterator = self.consts.for_n_in_arr(conv_name, arr_name, subty)
212-
hu_conv_b = iterator[0] + subty.from_hu_conv[1] + ";" + iterator[1]
221+
if is_nullable:
222+
hu_conv_b = "if (" + arr_name + " != null) { " + iterator[0] + subty.from_hu_conv[1] + ";" + iterator[1] + " }"
223+
else:
224+
hu_conv_b = iterator[0] + subty.from_hu_conv[1] + ";" + iterator[1]
213225
if from_hu_conv is not None:
214226
arr_conv = self.consts.primitive_arr_from_hu(ty_info.subty, None, self.consts.map_hu_array_elems(arr_name, conv_name, ty_info, subty))
215227
assert arr_conv[1] == ""

java_strings.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1013,8 +1013,8 @@ def native_c_map_trait(self, struct_name, field_vars, flattened_field_vars, fiel
10131013
if fn_line.ret_ty_info.c_ty.endswith("Array"):
10141014
out_c = out_c + "\t" + fn_line.ret_ty_info.c_ty + " ret = (*env)->CallObjectMethod(env, obj, j_calls->" + fn_line.fn_name + "_meth"
10151015
elif fn_line.ret_ty_info.c_ty == "void":
1016-
out_c += "\t(*env)->Call" + fn_line.ret_ty_info.java_ty.title() + "Method(env, obj, j_calls->" + fn_line.fn_name + "_meth"
1017-
elif fn_line.ret_ty_info.java_hu_ty == "String":
1016+
out_c += "\t(*env)->CallVoidMethod(env, obj, j_calls->" + fn_line.fn_name + "_meth"
1017+
elif fn_line.ret_ty_info.java_hu_ty == "String" or "org/ldk/enums" in fn_line.ret_ty_info.java_fn_ty_arg:
10181018
# Manually write out String methods as they're just an Object
10191019
out_c += "\t" + fn_line.ret_ty_info.c_ty + " ret = (*env)->CallObjectMethod(env, obj, j_calls->" + fn_line.fn_name + "_meth"
10201020
elif not fn_line.ret_ty_info.passed_as_ptr:

node-net/test/test.mts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ const chan_handler = ldk.ErroringMessageHandler.constructor_new().as_ChannelMess
2727
const cust_handler = ldk.IgnoringMessageHandler.constructor_new().as_CustomMessageHandler();
2828
const onion_handler = ldk.IgnoringMessageHandler.constructor_new().as_OnionMessageHandler();
2929

30-
const a_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_a_secret, 0xdeadbeefn, rng_seed, logger_a, cust_handler);
30+
const a_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_a_secret, 0xdeadbeef, rng_seed, logger_a, cust_handler);
3131
const a_net_handler = new node_net.NodeLDKNet(a_pm);
3232
var port = 10000;
3333
for (; port < 11000; port++) {
@@ -38,7 +38,7 @@ for (; port < 11000; port++) {
3838
} catch(_) {}
3939
}
4040

41-
const b_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_b_secret, 0xdeadbeefn, rng_seed, logger_b, cust_handler);
41+
const b_pm = ldk.PeerManager.constructor_new(chan_handler, routing_handler, onion_handler, node_b_secret, 0xdeadbeef, rng_seed, logger_b, cust_handler);
4242
const b_net_handler = new node_net.NodeLDKNet(b_pm);
4343
await b_net_handler.connect_peer("127.0.0.1", port, node_a_pk);
4444

src/main/java/org/ldk/batteries/ChannelManagerConstructor.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -132,15 +132,15 @@ public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] cha
132132
graph_msg_handler.as_RoutingMessageHandler(),
133133
ignoring_handler.as_OnionMessageHandler(),
134134
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
135-
System.currentTimeMillis() / 1000,
135+
(int)(System.currentTimeMillis() / 1000),
136136
random_data, logger, ignoring_handler.as_CustomMessageHandler());
137137
} else {
138138
this.graph_msg_handler = null;
139139
this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
140140
ignoring_handler.as_RoutingMessageHandler(),
141141
ignoring_handler.as_OnionMessageHandler(),
142142
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
143-
System.currentTimeMillis() / 1000,
143+
(int)(System.currentTimeMillis() / 1000),
144144
random_data, logger, ignoring_handler.as_CustomMessageHandler());
145145
}
146146
NioPeerHandler nio_peer_handler = null;
@@ -184,15 +184,15 @@ public ChannelManagerConstructor(Network network, UserConfig config, byte[] curr
184184
graph_msg_handler.as_RoutingMessageHandler(),
185185
ignoring_handler.as_OnionMessageHandler(),
186186
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
187-
System.currentTimeMillis() / 1000,
187+
(int)(System.currentTimeMillis() / 1000),
188188
random_data, logger, ignoring_handler.as_CustomMessageHandler());
189189
} else {
190190
this.graph_msg_handler = null;
191191
this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
192192
ignoring_handler.as_RoutingMessageHandler(),
193193
ignoring_handler.as_OnionMessageHandler(),
194194
((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
195-
System.currentTimeMillis() / 1000,
195+
(int)(System.currentTimeMillis() / 1000),
196196
random_data, logger, ignoring_handler.as_CustomMessageHandler());
197197
}
198198
NioPeerHandler nio_peer_handler = null;

src/main/java/org/ldk/enums/ChannelMonitorUpdateErr.java

Lines changed: 0 additions & 73 deletions
This file was deleted.
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
package org.ldk.enums;
2+
3+
/**
4+
* An enum representing the status of a channel monitor update persistence.
5+
*/
6+
public enum ChannelMonitorUpdateStatus {
7+
/**
8+
* The update has been durably persisted and all copies of the relevant [`ChannelMonitor`]
9+
* have been updated.
10+
*
11+
* This includes performing any `fsync()` calls required to ensure the update is guaranteed to
12+
* be available on restart even if the application crashes.
13+
*/
14+
LDKChannelMonitorUpdateStatus_Completed,
15+
/**
16+
* Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
17+
* our state failed, but is expected to succeed at some point in the future).
18+
*
19+
* Such a failure will \"freeze\" a channel, preventing us from revoking old states or
20+
* submitting new commitment transactions to the counterparty. Once the update(s) which failed
21+
* have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the
22+
* channel to an operational state.
23+
*
24+
* Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`].
25+
* If you return this error you must ensure that it is written to disk safely before writing
26+
* the latest [`ChannelManager`] state, or you should return [`PermanentFailure`] instead.
27+
*
28+
* Even when a channel has been \"frozen\", updates to the [`ChannelMonitor`] can continue to
29+
* occur (e.g. if an inbound HTLC which we forwarded was claimed upstream, resulting in us
30+
* attempting to claim it on this channel) and those updates must still be persisted.
31+
*
32+
* No updates to the channel will be made which could invalidate other [`ChannelMonitor`]s
33+
* until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later
34+
* monitor update for the same channel.
35+
*
36+
* For deployments where a copy of ChannelMonitors and other local state are backed up in a
37+
* remote location (with local copies persisted immediately), it is anticipated that all
38+
* updates will return [`InProgress`] until the remote copies could be updated.
39+
*
40+
* [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
41+
* [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
42+
* [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
43+
*/
44+
LDKChannelMonitorUpdateStatus_InProgress,
45+
/**
46+
* Used to indicate no further channel monitor updates will be allowed (likely a disk failure
47+
* or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable).
48+
*
49+
* When this is returned, [`ChannelManager`] will force-close the channel but *not* broadcast
50+
* our current commitment transaction. This avoids a dangerous case where a local disk failure
51+
* (e.g. the Linux-default remounting of the disk as read-only) causes [`PermanentFailure`]s
52+
* for all monitor updates. If we were to broadcast our latest commitment transaction and then
53+
* restart, we could end up reading a previous [`ChannelMonitor`] and [`ChannelManager`],
54+
* revoking our now-broadcasted state before seeing it confirm and losing all our funds.
55+
*
56+
* Note that this is somewhat of a tradeoff - if the disk is really gone and we may have lost
57+
* the data permanently, we really should broadcast immediately. If the data can be recovered
58+
* with manual intervention, we'd rather close the channel, rejecting future updates to it,
59+
* and broadcast the latest state only if we have HTLCs to claim which are timing out (which
60+
* we do as long as blocks are connected).
61+
*
62+
* In order to broadcast the latest local commitment transaction, you'll need to call
63+
* [`ChannelMonitor::get_latest_holder_commitment_txn`] and broadcast the resulting
64+
* transactions once you've safely ensured no further channel updates can be generated by your
65+
* [`ChannelManager`].
66+
*
67+
* Note that at least one final [`ChannelMonitorUpdate`] may still be provided, which must
68+
* still be processed by a running [`ChannelMonitor`]. This final update will mark the
69+
* [`ChannelMonitor`] as finalized, ensuring no further updates (e.g. revocation of the latest
70+
* commitment transaction) are allowed.
71+
*
72+
* Note that even if you return a [`PermanentFailure`] due to unavailability of secondary
73+
* [`ChannelMonitor`] copies, you should still make an attempt to store the update where
74+
* possible to ensure you can claim HTLC outputs on the latest commitment transaction
75+
* broadcasted later.
76+
*
77+
* In case of distributed watchtowers deployment, the new version must be written to disk, as
78+
* state may have been stored but rejected due to a block forcing a commitment broadcast. This
79+
* storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
80+
* lagging behind on block processing.
81+
*
82+
* [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
83+
* [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
84+
*/
85+
LDKChannelMonitorUpdateStatus_PermanentFailure,
86+
; static native void init();
87+
static { init(); }
88+
}

0 commit comments

Comments
 (0)