Coverage Report

Created: 2024-09-19 18:47

/root/bitcoin/src/test/fuzz/util/net.cpp
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2009-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <test/fuzz/util/net.h>
6
7
#include <compat/compat.h>
8
#include <netaddress.h>
9
#include <node/protocol_version.h>
10
#include <protocol.h>
11
#include <test/fuzz/FuzzedDataProvider.h>
12
#include <test/fuzz/util.h>
13
#include <test/util/net.h>
14
#include <util/sock.h>
15
#include <util/time.h>
16
17
#include <array>
18
#include <cassert>
19
#include <cerrno>
20
#include <cstdint>
21
#include <cstdlib>
22
#include <cstring>
23
#include <thread>
24
#include <vector>
25
26
class CNode;
27
28
CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider, FastRandomContext* rand) noexcept
29
0
{
30
0
    struct NetAux {
31
0
        Network net;
32
0
        CNetAddr::BIP155Network bip155;
33
0
        size_t len;
34
0
    };
35
36
0
    static constexpr std::array<NetAux, 6> nets{
37
0
        NetAux{.net = Network::NET_IPV4, .bip155 = CNetAddr::BIP155Network::IPV4, .len = ADDR_IPV4_SIZE},
38
0
        NetAux{.net = Network::NET_IPV6, .bip155 = CNetAddr::BIP155Network::IPV6, .len = ADDR_IPV6_SIZE},
39
0
        NetAux{.net = Network::NET_ONION, .bip155 = CNetAddr::BIP155Network::TORV3, .len = ADDR_TORV3_SIZE},
40
0
        NetAux{.net = Network::NET_I2P, .bip155 = CNetAddr::BIP155Network::I2P, .len = ADDR_I2P_SIZE},
41
0
        NetAux{.net = Network::NET_CJDNS, .bip155 = CNetAddr::BIP155Network::CJDNS, .len = ADDR_CJDNS_SIZE},
42
0
        NetAux{.net = Network::NET_INTERNAL, .bip155 = CNetAddr::BIP155Network{0}, .len = 0},
43
0
    };
44
45
0
    const size_t nets_index{rand == nullptr
46
0
        ? fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, nets.size() - 1)
47
0
        : static_cast<size_t>(rand->randrange(nets.size()))};
48
49
0
    const auto& aux = nets[nets_index];
50
51
0
    CNetAddr addr;
52
53
0
    if (aux.net == Network::NET_INTERNAL) {
54
0
        if (rand == nullptr) {
55
0
            addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32));
56
0
        } else {
57
0
            const auto v = rand->randbytes(32);
58
0
            addr.SetInternal(std::string{v.begin(), v.end()});
59
0
        }
60
0
        return addr;
61
0
    }
62
63
0
    DataStream s;
64
65
0
    s << static_cast<uint8_t>(aux.bip155);
66
67
0
    std::vector<uint8_t> addr_bytes;
68
0
    if (rand == nullptr) {
69
0
        addr_bytes = fuzzed_data_provider.ConsumeBytes<uint8_t>(aux.len);
70
0
        addr_bytes.resize(aux.len);
71
0
    } else {
72
0
        addr_bytes = rand->randbytes(aux.len);
73
0
    }
74
0
    if (aux.net == NET_IPV6 && addr_bytes[0] == CJDNS_PREFIX) { // Avoid generating IPv6 addresses that look like CJDNS.
75
0
        addr_bytes[0] = 0x55; // Just an arbitrary number, anything != CJDNS_PREFIX would do.
76
0
    }
77
0
    if (aux.net == NET_CJDNS) { // Avoid generating CJDNS addresses that don't start with CJDNS_PREFIX because those are !IsValid().
78
0
        addr_bytes[0] = CJDNS_PREFIX;
79
0
    }
80
0
    s << addr_bytes;
81
82
0
    s >> CAddress::V2_NETWORK(addr);
83
84
0
    return addr;
85
0
}
86
87
CAddress ConsumeAddress(FuzzedDataProvider& fuzzed_data_provider) noexcept
88
0
{
89
0
    return {ConsumeService(fuzzed_data_provider), ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS), NodeSeconds{std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<uint32_t>()}}};
90
0
}
91
92
template <typename P>
93
P ConsumeDeserializationParams(FuzzedDataProvider& fuzzed_data_provider) noexcept
94
0
{
95
0
    constexpr std::array ADDR_ENCODINGS{
96
0
        CNetAddr::Encoding::V1,
97
0
        CNetAddr::Encoding::V2,
98
0
    };
99
0
    constexpr std::array ADDR_FORMATS{
100
0
        CAddress::Format::Disk,
101
0
        CAddress::Format::Network,
102
0
    };
103
0
    if constexpr (std::is_same_v<P, CNetAddr::SerParams>) {
104
0
        return P{PickValue(fuzzed_data_provider, ADDR_ENCODINGS)};
105
0
    }
106
0
    if constexpr (std::is_same_v<P, CAddress::SerParams>) {
107
0
        return P{{PickValue(fuzzed_data_provider, ADDR_ENCODINGS)}, PickValue(fuzzed_data_provider, ADDR_FORMATS)};
108
0
    }
109
0
}
Unexecuted instantiation: _Z28ConsumeDeserializationParamsIN8CNetAddr9SerParamsEET_R18FuzzedDataProvider
Unexecuted instantiation: _Z28ConsumeDeserializationParamsIN8CAddress9SerParamsEET_R18FuzzedDataProvider
110
template CNetAddr::SerParams ConsumeDeserializationParams(FuzzedDataProvider&) noexcept;
111
template CAddress::SerParams ConsumeDeserializationParams(FuzzedDataProvider&) noexcept;
112
113
FuzzedSock::FuzzedSock(FuzzedDataProvider& fuzzed_data_provider)
114
0
    : Sock{fuzzed_data_provider.ConsumeIntegralInRange<SOCKET>(INVALID_SOCKET - 1, INVALID_SOCKET)},
115
0
      m_fuzzed_data_provider{fuzzed_data_provider},
116
0
      m_selectable{fuzzed_data_provider.ConsumeBool()}
117
0
{
118
0
}
119
120
FuzzedSock::~FuzzedSock()
121
0
{
122
    // Sock::~Sock() will be called after FuzzedSock::~FuzzedSock() and it will call
123
    // close(m_socket) if m_socket is not INVALID_SOCKET.
124
    // Avoid closing an arbitrary file descriptor (m_socket is just a random very high number which
125
    // theoretically may concide with a real opened file descriptor).
126
0
    m_socket = INVALID_SOCKET;
127
0
}
128
129
FuzzedSock& FuzzedSock::operator=(Sock&& other)
130
0
{
131
0
    assert(false && "Move of Sock into FuzzedSock not allowed.");
132
0
    return *this;
133
0
}
134
135
ssize_t FuzzedSock::Send(const void* data, size_t len, int flags) const
136
0
{
137
0
    constexpr std::array send_errnos{
138
0
        EACCES,
139
0
        EAGAIN,
140
0
        EALREADY,
141
0
        EBADF,
142
0
        ECONNRESET,
143
0
        EDESTADDRREQ,
144
0
        EFAULT,
145
0
        EINTR,
146
0
        EINVAL,
147
0
        EISCONN,
148
0
        EMSGSIZE,
149
0
        ENOBUFS,
150
0
        ENOMEM,
151
0
        ENOTCONN,
152
0
        ENOTSOCK,
153
0
        EOPNOTSUPP,
154
0
        EPIPE,
155
0
        EWOULDBLOCK,
156
0
    };
157
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
158
0
        return len;
159
0
    }
160
0
    const ssize_t r = m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(-1, len);
161
0
    if (r == -1) {
162
0
        SetFuzzedErrNo(m_fuzzed_data_provider, send_errnos);
163
0
    }
164
0
    return r;
165
0
}
166
167
ssize_t FuzzedSock::Recv(void* buf, size_t len, int flags) const
168
0
{
169
    // Have a permanent error at recv_errnos[0] because when the fuzzed data is exhausted
170
    // SetFuzzedErrNo() will always return the first element and we want to avoid Recv()
171
    // returning -1 and setting errno to EAGAIN repeatedly.
172
0
    constexpr std::array recv_errnos{
173
0
        ECONNREFUSED,
174
0
        EAGAIN,
175
0
        EBADF,
176
0
        EFAULT,
177
0
        EINTR,
178
0
        EINVAL,
179
0
        ENOMEM,
180
0
        ENOTCONN,
181
0
        ENOTSOCK,
182
0
        EWOULDBLOCK,
183
0
    };
184
0
    assert(buf != nullptr || len == 0);
185
186
    // Do the latency before any of the "return" statements.
187
0
    if (m_fuzzed_data_provider.ConsumeBool() && std::getenv("FUZZED_SOCKET_FAKE_LATENCY") != nullptr) {
188
0
        std::this_thread::sleep_for(std::chrono::milliseconds{2});
189
0
    }
190
191
0
    if (len == 0 || m_fuzzed_data_provider.ConsumeBool()) {
192
0
        const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
193
0
        if (r == -1) {
194
0
            SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos);
195
0
        }
196
0
        return r;
197
0
    }
198
199
0
    size_t copied_so_far{0};
200
201
0
    if (!m_peek_data.empty()) {
202
        // `MSG_PEEK` was used in the preceding `Recv()` call, copy the first bytes from `m_peek_data`.
203
0
        const size_t copy_len{std::min(len, m_peek_data.size())};
204
0
        std::memcpy(buf, m_peek_data.data(), copy_len);
205
0
        copied_so_far += copy_len;
206
0
        if ((flags & MSG_PEEK) == 0) {
207
0
            m_peek_data.erase(m_peek_data.begin(), m_peek_data.begin() + copy_len);
208
0
        }
209
0
    }
210
211
0
    if (copied_so_far == len) {
212
0
        return copied_so_far;
213
0
    }
214
215
0
    auto new_data = ConsumeRandomLengthByteVector(m_fuzzed_data_provider, len - copied_so_far);
216
0
    if (new_data.empty()) return copied_so_far;
217
218
0
    std::memcpy(reinterpret_cast<uint8_t*>(buf) + copied_so_far, new_data.data(), new_data.size());
219
0
    copied_so_far += new_data.size();
220
221
0
    if ((flags & MSG_PEEK) != 0) {
222
0
        m_peek_data.insert(m_peek_data.end(), new_data.begin(), new_data.end());
223
0
    }
224
225
0
    if (copied_so_far == len || m_fuzzed_data_provider.ConsumeBool()) {
226
0
        return copied_so_far;
227
0
    }
228
229
    // Pad to len bytes.
230
0
    std::memset(reinterpret_cast<uint8_t*>(buf) + copied_so_far, 0x0, len - copied_so_far);
231
232
0
    return len;
233
0
}
234
235
int FuzzedSock::Connect(const sockaddr*, socklen_t) const
236
0
{
237
    // Have a permanent error at connect_errnos[0] because when the fuzzed data is exhausted
238
    // SetFuzzedErrNo() will always return the first element and we want to avoid Connect()
239
    // returning -1 and setting errno to EAGAIN repeatedly.
240
0
    constexpr std::array connect_errnos{
241
0
        ECONNREFUSED,
242
0
        EAGAIN,
243
0
        ECONNRESET,
244
0
        EHOSTUNREACH,
245
0
        EINPROGRESS,
246
0
        EINTR,
247
0
        ENETUNREACH,
248
0
        ETIMEDOUT,
249
0
    };
250
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
251
0
        SetFuzzedErrNo(m_fuzzed_data_provider, connect_errnos);
252
0
        return -1;
253
0
    }
254
0
    return 0;
255
0
}
256
257
int FuzzedSock::Bind(const sockaddr*, socklen_t) const
258
0
{
259
    // Have a permanent error at bind_errnos[0] because when the fuzzed data is exhausted
260
    // SetFuzzedErrNo() will always set the global errno to bind_errnos[0]. We want to
261
    // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
262
    // repeatedly because proper code should retry on temporary errors, leading to an
263
    // infinite loop.
264
0
    constexpr std::array bind_errnos{
265
0
        EACCES,
266
0
        EADDRINUSE,
267
0
        EADDRNOTAVAIL,
268
0
        EAGAIN,
269
0
    };
270
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
271
0
        SetFuzzedErrNo(m_fuzzed_data_provider, bind_errnos);
272
0
        return -1;
273
0
    }
274
0
    return 0;
275
0
}
276
277
int FuzzedSock::Listen(int) const
278
0
{
279
    // Have a permanent error at listen_errnos[0] because when the fuzzed data is exhausted
280
    // SetFuzzedErrNo() will always set the global errno to listen_errnos[0]. We want to
281
    // avoid this method returning -1 and setting errno to a temporary error (like EAGAIN)
282
    // repeatedly because proper code should retry on temporary errors, leading to an
283
    // infinite loop.
284
0
    constexpr std::array listen_errnos{
285
0
        EADDRINUSE,
286
0
        EINVAL,
287
0
        EOPNOTSUPP,
288
0
    };
289
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
290
0
        SetFuzzedErrNo(m_fuzzed_data_provider, listen_errnos);
291
0
        return -1;
292
0
    }
293
0
    return 0;
294
0
}
295
296
std::unique_ptr<Sock> FuzzedSock::Accept(sockaddr* addr, socklen_t* addr_len) const
297
0
{
298
0
    constexpr std::array accept_errnos{
299
0
        ECONNABORTED,
300
0
        EINTR,
301
0
        ENOMEM,
302
0
    };
303
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
304
0
        SetFuzzedErrNo(m_fuzzed_data_provider, accept_errnos);
305
0
        return std::unique_ptr<FuzzedSock>();
306
0
    }
307
0
    return std::make_unique<FuzzedSock>(m_fuzzed_data_provider);
308
0
}
309
310
int FuzzedSock::GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const
311
0
{
312
0
    constexpr std::array getsockopt_errnos{
313
0
        ENOMEM,
314
0
        ENOBUFS,
315
0
    };
316
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
317
0
        SetFuzzedErrNo(m_fuzzed_data_provider, getsockopt_errnos);
318
0
        return -1;
319
0
    }
320
0
    if (opt_val == nullptr) {
321
0
        return 0;
322
0
    }
323
0
    std::memcpy(opt_val,
324
0
                ConsumeFixedLengthByteVector(m_fuzzed_data_provider, *opt_len).data(),
325
0
                *opt_len);
326
0
    return 0;
327
0
}
328
329
int FuzzedSock::SetSockOpt(int, int, const void*, socklen_t) const
330
0
{
331
0
    constexpr std::array setsockopt_errnos{
332
0
        ENOMEM,
333
0
        ENOBUFS,
334
0
    };
335
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
336
0
        SetFuzzedErrNo(m_fuzzed_data_provider, setsockopt_errnos);
337
0
        return -1;
338
0
    }
339
0
    return 0;
340
0
}
341
342
int FuzzedSock::GetSockName(sockaddr* name, socklen_t* name_len) const
343
0
{
344
0
    constexpr std::array getsockname_errnos{
345
0
        ECONNRESET,
346
0
        ENOBUFS,
347
0
    };
348
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
349
0
        SetFuzzedErrNo(m_fuzzed_data_provider, getsockname_errnos);
350
0
        return -1;
351
0
    }
352
0
    *name_len = m_fuzzed_data_provider.ConsumeData(name, *name_len);
353
0
    return 0;
354
0
}
355
356
bool FuzzedSock::SetNonBlocking() const
357
0
{
358
0
    constexpr std::array setnonblocking_errnos{
359
0
        EBADF,
360
0
        EPERM,
361
0
    };
362
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
363
0
        SetFuzzedErrNo(m_fuzzed_data_provider, setnonblocking_errnos);
364
0
        return false;
365
0
    }
366
0
    return true;
367
0
}
368
369
bool FuzzedSock::IsSelectable() const
370
0
{
371
0
    return m_selectable;
372
0
}
373
374
bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const
375
0
{
376
0
    constexpr std::array wait_errnos{
377
0
        EBADF,
378
0
        EINTR,
379
0
        EINVAL,
380
0
    };
381
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
382
0
        SetFuzzedErrNo(m_fuzzed_data_provider, wait_errnos);
383
0
        return false;
384
0
    }
385
0
    if (occurred != nullptr) {
386
        // We simulate the requested event as occurred when ConsumeBool()
387
        // returns false. This avoids simulating endless waiting if the
388
        // FuzzedDataProvider runs out of data.
389
0
        *occurred = m_fuzzed_data_provider.ConsumeBool() ? 0 : requested;
390
0
    }
391
0
    return true;
392
0
}
393
394
bool FuzzedSock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const
395
0
{
396
0
    for (auto& [sock, events] : events_per_sock) {
397
0
        (void)sock;
398
        // We simulate the requested event as occurred when ConsumeBool()
399
        // returns false. This avoids simulating endless waiting if the
400
        // FuzzedDataProvider runs out of data.
401
0
        events.occurred = m_fuzzed_data_provider.ConsumeBool() ? 0 : events.requested;
402
0
    }
403
0
    return true;
404
0
}
405
406
bool FuzzedSock::IsConnected(std::string& errmsg) const
407
0
{
408
0
    if (m_fuzzed_data_provider.ConsumeBool()) {
409
0
        return true;
410
0
    }
411
0
    errmsg = "disconnected at random by the fuzzer";
412
0
    return false;
413
0
}
414
415
void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, CNode& node) noexcept
416
0
{
417
0
    auto successfully_connected = fuzzed_data_provider.ConsumeBool();
418
0
    auto remote_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
419
0
    auto local_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
420
0
    auto version = fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(MIN_PEER_PROTO_VERSION, std::numeric_limits<int32_t>::max());
421
0
    auto relay_txs = fuzzed_data_provider.ConsumeBool();
422
0
    connman.Handshake(node, successfully_connected, remote_services, local_services, version, relay_txs);
423
0
}