Coverage Report

Created: 2025-03-18 19:28

/root/bitcoin/src/test/fuzz/util/net.cpp
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2009-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <test/fuzz/util/net.h>
6
7
#include <compat/compat.h>
8
#include <netaddress.h>
9
#include <node/protocol_version.h>
10
#include <protocol.h>
11
#include <test/fuzz/FuzzedDataProvider.h>
12
#include <test/fuzz/util.h>
13
#include <test/util/net.h>
14
#include <util/sock.h>
15
#include <util/time.h>
16
17
#include <array>
18
#include <cassert>
19
#include <cerrno>
20
#include <cstdint>
21
#include <cstdlib>
22
#include <cstring>
23
#include <thread>
24
#include <vector>
25
26
class CNode;
27
28
CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider, FastRandomContext* rand) noexcept
29
0
{
30
0
    struct NetAux {
31
0
        Network net;
32
0
        CNetAddr::BIP155Network bip155;
33
0
        size_t len;
34
0
    };
35
36
0
    static constexpr std::array<NetAux, 6> nets{
37
0
        NetAux{.net = Network::NET_IPV4, .bip155 = CNetAddr::BIP155Network::IPV4, .len = ADDR_IPV4_SIZE},
38
0
        NetAux{.net = Network::NET_IPV6, .bip155 = CNetAddr::BIP155Network::IPV6, .len = ADDR_IPV6_SIZE},
39
0
        NetAux{.net = Network::NET_ONION, .bip155 = CNetAddr::BIP155Network::TORV3, .len = ADDR_TORV3_SIZE},
40
0
        NetAux{.net = Network::NET_I2P, .bip155 = CNetAddr::BIP155Network::I2P, .len = ADDR_I2P_SIZE},
41
0
        NetAux{.net = Network::NET_CJDNS, .bip155 = CNetAddr::BIP155Network::CJDNS, .len = ADDR_CJDNS_SIZE},
42
0
        NetAux{.net = Network::NET_INTERNAL, .bip155 = CNetAddr::BIP155Network{0}, .len = 0},
43
0
    };
44
45
0
    const size_t nets_index{rand == nullptr
  Branch (45:29): [True: 0, False: 0]
46
0
        ? fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, nets.size() - 1)
47
0
        : static_cast<size_t>(rand->randrange(nets.size()))};
48
49
0
    const auto& aux = nets[nets_index];
50
51
0
    CNetAddr addr;
52
53
0
    if (aux.net == Network::NET_INTERNAL) {
  Branch (53:9): [True: 0, False: 0]
54
0
        if (rand == nullptr) {
  Branch (54:13): [True: 0, False: 0]
55
0
            addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32));
56
0
        } else {
57
0
            const auto v = rand->randbytes(32);
58
0
            addr.SetInternal(std::string{v.begin(), v.end()});
59
0
        }
60
0
        return addr;
61
0
    }
62
63
0
    DataStream s;
64
65
0
    s << static_cast<uint8_t>(aux.bip155);
66
67
0
    std::vector<uint8_t> addr_bytes;
68
0
    if (rand == nullptr) {
  Branch (68:9): [True: 0, False: 0]
69
0
        addr_bytes = fuzzed_data_provider.ConsumeBytes<uint8_t>(aux.len);
70
0
        addr_bytes.resize(aux.len);
71
0
    } else {
72
0
        addr_bytes = rand->randbytes(aux.len);
73
0
    }
74
0
    if (aux.net == NET_IPV6 && addr_bytes[0] == CJDNS_PREFIX) { // Avoid generating IPv6 addresses that look like CJDNS.
  Branch (74:9): [True: 0, False: 0]
  Branch (74:32): [True: 0, False: 0]
75
0
        addr_bytes[0] = 0x55; // Just an arbitrary number, anything != CJDNS_PREFIX would do.
76
0
    }
77
0
    if (aux.net == NET_CJDNS) { // Avoid generating CJDNS addresses that don't start with CJDNS_PREFIX because those are !IsValid().
  Branch (77:9): [True: 0, False: 0]
78
0
        addr_bytes[0] = CJDNS_PREFIX;
79
0
    }
80
0
    s << addr_bytes;
81
82
0
    s >> CAddress::V2_NETWORK(addr);
83
84
0
    return addr;
85
0
}
86
87
CAddress ConsumeAddress(FuzzedDataProvider& fuzzed_data_provider) noexcept
88
0
{
89
0
    return {ConsumeService(fuzzed_data_provider), ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS), NodeSeconds{std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<uint32_t>()}}};
90
0
}
91
92
template <typename P>
93
P ConsumeDeserializationParams(FuzzedDataProvider& fuzzed_data_provider) noexcept
94
0
{
95
0
    constexpr std::array ADDR_ENCODINGS{
96
0
        CNetAddr::Encoding::V1,
97
0
        CNetAddr::Encoding::V2,
98
0
    };
99
0
    constexpr std::array ADDR_FORMATS{
100
0
        CAddress::Format::Disk,
101
0
        CAddress::Format::Network,
102
0
    };
103
0
    if constexpr (std::is_same_v<P, CNetAddr::SerParams>) {
104
0
        return P{PickValue(fuzzed_data_provider, ADDR_ENCODINGS)};
105
0
    }
106
0
    if constexpr (std::is_same_v<P, CAddress::SerParams>) {
107
0
        return P{{PickValue(fuzzed_data_provider, ADDR_ENCODINGS)}, PickValue(fuzzed_data_provider, ADDR_FORMATS)};
108
0
    }
109
0
}
Unexecuted instantiation: CNetAddr::SerParams ConsumeDeserializationParams<CNetAddr::SerParams>(FuzzedDataProvider&)
Unexecuted instantiation: CAddress::SerParams ConsumeDeserializationParams<CAddress::SerParams>(FuzzedDataProvider&)
110
template CNetAddr::SerParams ConsumeDeserializationParams(FuzzedDataProvider&) noexcept;
111
template CAddress::SerParams ConsumeDeserializationParams(FuzzedDataProvider&) noexcept;
112
113
FuzzedSock::FuzzedSock(FuzzedDataProvider& fuzzed_data_provider)
114
0
    : Sock{fuzzed_data_provider.ConsumeIntegralInRange<SOCKET>(INVALID_SOCKET - 1, INVALID_SOCKET)},
115
0
      m_fuzzed_data_provider{fuzzed_data_provider},
116
0
      m_selectable{fuzzed_data_provider.ConsumeBool()},
117
0
      m_time{MockableSteadyClock::INITIAL_MOCK_TIME}
118
0
{
119
0
    ElapseTime(std::chrono::seconds(0)); // start mocking the steady clock.
120
0
}
121
122
FuzzedSock::~FuzzedSock()
123
0
{
124
    // Sock::~Sock() will be called after FuzzedSock::~FuzzedSock() and it will call
125
    // close(m_socket) if m_socket is not INVALID_SOCKET.
126
    // Avoid closing an arbitrary file descriptor (m_socket is just a random very high number which
127
    // theoretically may concide with a real opened file descriptor).
128
0
    m_socket = INVALID_SOCKET;
129
0
}
130
131
void FuzzedSock::ElapseTime(std::chrono::milliseconds duration) const
132
0
{
133
0
    m_time += duration;
134
0
    MockableSteadyClock::SetMockTime(m_time);
135
0
}
136
137
FuzzedSock& FuzzedSock::operator=(Sock&& other)
138
0
{
139
0
    assert(false && "Move of Sock into FuzzedSock not allowed.");
140
0
    return *this;
141
0
}
142
143
ssize_t FuzzedSock::Send(const void* data, size_t len, int flags) const
144
0
{
145
0
    constexpr std::array send_errnos{
146
0
        EACCES,
147
0
        EAGAIN,
148
0
        EALREADY,
149
0
        EBADF,
150
0
        ECONNRESET,
151
0
        EDESTADDRREQ,
152
0
        EFAULT,
153
0
        EINTR,
154
0
        EINVAL,
155
0
        EISCONN,
156
0
        EMSGSIZE,
157
0
        ENOBUFS,
158
0
        ENOMEM,
159
0
        ENOTCONN,
160
0
        ENOTSOCK,
161
0
        EOPNOTSUPP,
162
0
        EPIPE,
163
0
        EWOULDBLOCK,
164
0
    };
165
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (165:9): [True: 0, False: 0]
166
0
        return len;
167
0
    }
168
0
    const ssize_t r = m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(-1, len);
169
0
    if (r == -1) {
  Branch (169:9): [True: 0, False: 0]
170
0
        SetFuzzedErrNo(m_fuzzed_data_provider, send_errnos);
171
0
    }
172
0
    return r;
173
0
}
174
175
ssize_t FuzzedSock::Recv(void* buf, size_t len, int flags) const
176
0
{
177
    // Have a permanent error at recv_errnos[0] because when the fuzzed data is exhausted
178
    // SetFuzzedErrNo() will always return the first element and we want to avoid Recv()
179
    // returning -1 and setting errno to EAGAIN repeatedly.
180
0
    constexpr std::array recv_errnos{
181
0
        ECONNREFUSED,
182
0
        EAGAIN,
183
0
        EBADF,
184
0
        EFAULT,
185
0
        EINTR,
186
0
        EINVAL,
187
0
        ENOMEM,
188
0
        ENOTCONN,
189
0
        ENOTSOCK,
190
0
        EWOULDBLOCK,
191
0
    };
192
0
    assert(buf != nullptr || len == 0);
193
194
    // Do the latency before any of the "return" statements.
195
0
    if (m_fuzzed_data_provider.ConsumeBool() && std::getenv("FUZZED_SOCKET_FAKE_LATENCY") != nullptr) {
  Branch (195:9): [True: 0, False: 0]
  Branch (195:49): [True: 0, False: 0]
196
0
        std::this_thread::sleep_for(std::chrono::milliseconds{2});
197
0
    }
198
199
0
    if (len == 0 || m_fuzzed_data_provider.ConsumeBool()) {
  Branch (199:9): [True: 0, False: 0]
  Branch (199:21): [True: 0, False: 0]
200
0
        const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
  Branch (200:27): [True: 0, False: 0]
201
0
        if (r == -1) {
  Branch (201:13): [True: 0, False: 0]
202
0
            SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos);
203
0
        }
204
0
        return r;
205
0
    }
206
207
0
    size_t copied_so_far{0};
208
209
0
    if (!m_peek_data.empty()) {
  Branch (209:9): [True: 0, False: 0]
210
        // `MSG_PEEK` was used in the preceding `Recv()` call, copy the first bytes from `m_peek_data`.
211
0
        const size_t copy_len{std::min(len, m_peek_data.size())};
212
0
        std::memcpy(buf, m_peek_data.data(), copy_len);
213
0
        copied_so_far += copy_len;
214
0
        if ((flags & MSG_PEEK) == 0) {
  Branch (214:13): [True: 0, False: 0]
215
0
            m_peek_data.erase(m_peek_data.begin(), m_peek_data.begin() + copy_len);
216
0
        }
217
0
    }
218
219
0
    if (copied_so_far == len) {
  Branch (219:9): [True: 0, False: 0]
220
0
        return copied_so_far;
221
0
    }
222
223
0
    auto new_data = ConsumeRandomLengthByteVector(m_fuzzed_data_provider, len - copied_so_far);
224
0
    if (new_data.empty()) return copied_so_far;
  Branch (224:9): [True: 0, False: 0]
225
226
0
    std::memcpy(reinterpret_cast<uint8_t*>(buf) + copied_so_far, new_data.data(), new_data.size());
227
0
    copied_so_far += new_data.size();
228
229
0
    if ((flags & MSG_PEEK) != 0) {
  Branch (229:9): [True: 0, False: 0]
230
0
        m_peek_data.insert(m_peek_data.end(), new_data.begin(), new_data.end());
231
0
    }
232
233
0
    if (copied_so_far == len || m_fuzzed_data_provider.ConsumeBool()) {
  Branch (233:9): [True: 0, False: 0]
  Branch (233:33): [True: 0, False: 0]
234
0
        return copied_so_far;
235
0
    }
236
237
    // Pad to len bytes.
238
0
    std::memset(reinterpret_cast<uint8_t*>(buf) + copied_so_far, 0x0, len - copied_so_far);
239
240
0
    return len;
241
0
}
242
243
int FuzzedSock::Connect(const sockaddr*, socklen_t) const
244
0
{
245
    // Have a permanent error at connect_errnos[0] because when the fuzzed data is exhausted
246
    // SetFuzzedErrNo() will always return the first element and we want to avoid Connect()
247
    // returning -1 and setting errno to EAGAIN repeatedly.
248
0
    constexpr std::array connect_errnos{
249
0
        ECONNREFUSED,
250
0
        EAGAIN,
251
0
        ECONNRESET,
252
0
        EHOSTUNREACH,
253
0
        EINPROGRESS,
254
0
        EINTR,
255
0
        ENETUNREACH,
256
0
        ETIMEDOUT,
257
0
    };
258
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (258:9): [True: 0, False: 0]
259
0
        SetFuzzedErrNo(m_fuzzed_data_provider, connect_errnos);
260
0
        return -1;
261
0
    }
262
0
    return 0;
263
0
}
264
265
int FuzzedSock::Bind(const sockaddr*, socklen_t) const
266
0
{
267
    // Have a permanent error at bind_errnos[0] because when the fuzzed data is exhausted
268
    // SetFuzzedErrNo() will always set the global errno to bind_errnos[0]. We want to
269
    // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
270
    // repeatedly because proper code should retry on temporary errors, leading to an
271
    // infinite loop.
272
0
    constexpr std::array bind_errnos{
273
0
        EACCES,
274
0
        EADDRINUSE,
275
0
        EADDRNOTAVAIL,
276
0
        EAGAIN,
277
0
    };
278
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (278:9): [True: 0, False: 0]
279
0
        SetFuzzedErrNo(m_fuzzed_data_provider, bind_errnos);
280
0
        return -1;
281
0
    }
282
0
    return 0;
283
0
}
284
285
int FuzzedSock::Listen(int) const
286
0
{
287
    // Have a permanent error at listen_errnos[0] because when the fuzzed data is exhausted
288
    // SetFuzzedErrNo() will always set the global errno to listen_errnos[0]. We want to
289
    // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
290
    // repeatedly because proper code should retry on temporary errors, leading to an
291
    // infinite loop.
292
0
    constexpr std::array listen_errnos{
293
0
        EADDRINUSE,
294
0
        EINVAL,
295
0
        EOPNOTSUPP,
296
0
    };
297
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (297:9): [True: 0, False: 0]
298
0
        SetFuzzedErrNo(m_fuzzed_data_provider, listen_errnos);
299
0
        return -1;
300
0
    }
301
0
    return 0;
302
0
}
303
304
std::unique_ptr<Sock> FuzzedSock::Accept(sockaddr* addr, socklen_t* addr_len) const
305
0
{
306
0
    constexpr std::array accept_errnos{
307
0
        ECONNABORTED,
308
0
        EINTR,
309
0
        ENOMEM,
310
0
    };
311
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (311:9): [True: 0, False: 0]
312
0
        SetFuzzedErrNo(m_fuzzed_data_provider, accept_errnos);
313
0
        return std::unique_ptr<FuzzedSock>();
314
0
    }
315
0
    return std::make_unique<FuzzedSock>(m_fuzzed_data_provider);
316
0
}
317
318
int FuzzedSock::GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const
319
0
{
320
0
    constexpr std::array getsockopt_errnos{
321
0
        ENOMEM,
322
0
        ENOBUFS,
323
0
    };
324
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (324:9): [True: 0, False: 0]
325
0
        SetFuzzedErrNo(m_fuzzed_data_provider, getsockopt_errnos);
326
0
        return -1;
327
0
    }
328
0
    if (opt_val == nullptr) {
  Branch (328:9): [True: 0, False: 0]
329
0
        return 0;
330
0
    }
331
0
    std::memcpy(opt_val,
332
0
                ConsumeFixedLengthByteVector(m_fuzzed_data_provider, *opt_len).data(),
333
0
                *opt_len);
334
0
    return 0;
335
0
}
336
337
int FuzzedSock::SetSockOpt(int, int, const void*, socklen_t) const
338
0
{
339
0
    constexpr std::array setsockopt_errnos{
340
0
        ENOMEM,
341
0
        ENOBUFS,
342
0
    };
343
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (343:9): [True: 0, False: 0]
344
0
        SetFuzzedErrNo(m_fuzzed_data_provider, setsockopt_errnos);
345
0
        return -1;
346
0
    }
347
0
    return 0;
348
0
}
349
350
int FuzzedSock::GetSockName(sockaddr* name, socklen_t* name_len) const
351
0
{
352
0
    constexpr std::array getsockname_errnos{
353
0
        ECONNRESET,
354
0
        ENOBUFS,
355
0
    };
356
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (356:9): [True: 0, False: 0]
357
0
        SetFuzzedErrNo(m_fuzzed_data_provider, getsockname_errnos);
358
0
        return -1;
359
0
    }
360
0
    assert(name_len);
361
0
    const auto bytes{ConsumeRandomLengthByteVector(m_fuzzed_data_provider, *name_len)};
362
0
    if (bytes.size() < (int)sizeof(sockaddr)) return -1;
  Branch (362:9): [True: 0, False: 0]
363
0
    std::memcpy(name, bytes.data(), bytes.size());
364
0
    *name_len = bytes.size();
365
0
    return 0;
366
0
}
367
368
bool FuzzedSock::SetNonBlocking() const
369
0
{
370
0
    constexpr std::array setnonblocking_errnos{
371
0
        EBADF,
372
0
        EPERM,
373
0
    };
374
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (374:9): [True: 0, False: 0]
375
0
        SetFuzzedErrNo(m_fuzzed_data_provider, setnonblocking_errnos);
376
0
        return false;
377
0
    }
378
0
    return true;
379
0
}
380
381
bool FuzzedSock::IsSelectable() const
382
0
{
383
0
    return m_selectable;
384
0
}
385
386
bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const
387
0
{
388
0
    constexpr std::array wait_errnos{
389
0
        EBADF,
390
0
        EINTR,
391
0
        EINVAL,
392
0
    };
393
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (393:9): [True: 0, False: 0]
394
0
        SetFuzzedErrNo(m_fuzzed_data_provider, wait_errnos);
395
0
        return false;
396
0
    }
397
0
    if (occurred != nullptr) {
  Branch (397:9): [True: 0, False: 0]
398
        // We simulate the requested event as occurred when ConsumeBool()
399
        // returns false. This avoids simulating endless waiting if the
400
        // FuzzedDataProvider runs out of data.
401
0
        *occurred = m_fuzzed_data_provider.ConsumeBool() ? 0 : requested;
  Branch (401:21): [True: 0, False: 0]
402
0
    }
403
0
    ElapseTime(timeout);
404
0
    return true;
405
0
}
406
407
bool FuzzedSock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const
408
0
{
409
0
    for (auto& [sock, events] : events_per_sock) {
  Branch (409:31): [True: 0, False: 0]
410
0
        (void)sock;
411
        // We simulate the requested event as occurred when ConsumeBool()
412
        // returns false. This avoids simulating endless waiting if the
413
        // FuzzedDataProvider runs out of data.
414
0
        events.occurred = m_fuzzed_data_provider.ConsumeBool() ? 0 : events.requested;
  Branch (414:27): [True: 0, False: 0]
415
0
    }
416
0
    ElapseTime(timeout);
417
0
    return true;
418
0
}
419
420
bool FuzzedSock::IsConnected(std::string& errmsg) const
421
0
{
422
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
  Branch (422:9): [True: 0, False: 0]
423
0
        return true;
424
0
    }
425
0
    errmsg = "disconnected at random by the fuzzer";
426
0
    return false;
427
0
}
428
429
void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, CNode& node) noexcept
430
0
{
431
0
    auto successfully_connected = fuzzed_data_provider.ConsumeBool();
432
0
    auto remote_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
433
0
    auto local_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
434
0
    auto version = fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(MIN_PEER_PROTO_VERSION, std::numeric_limits<int32_t>::max());
435
0
    auto relay_txs = fuzzed_data_provider.ConsumeBool();
436
0
    connman.Handshake(node, successfully_connected, remote_services, local_services, version, relay_txs);
437
0
}