Squashed 'third_party/allwpilib/' changes from 83f1860047..f1a82828fe
f1a82828fe [wpiutil] Add DataLog and DataLogManager Stop() (#5860)
2a04e12c6f [apriltag] AprilTagFieldLayout: Add accessors for origin and field dimensions (#5869)
33e0089afb Cleanup usages of std::function<void(void)> (#5864)
d06fa633d5 [build] Fix protobuf generation when building with make (#5867)
049732afb8 [cscore] Make camera connection logging clearer (#5866)
87f7c19f90 [wpimath] Make InterpolatingDoubleTreeMap constructor public (#5865)
6b53ef47cf [wpimath] Don't recreate TrapezoidProfile in ProfiledPIDController calculate() (#5863)
8a3a268ae6 [commands] Add finallyDo with zero-arg lambda (#5862)
1c35d42cd0 [wpilib] Pop diagnostic for deprecated function use (#5859)
ddc8db6c26 [wpimath] Add feedforward constant constructor to ElevatorSim (#5823)
c6aff2c431 [upstream_utils] Update to LLVM 17.0.4 (#5855)
a9c5b18a39 [build] Update OpenCV to 2024-4.8.0-2 (#5854)
9540b6922d [hal] Add CAN IDs for AndyMark and Vivid Hosting (#5852)
83a7d33c47 [glass] Improve display of protobuf/struct type strings (#5850)
a4a8ad9c75 [commands] Make Java SelectCommand generic (#5849)
9eecf2a456 [build] Add CMake option to build Java sources jars (#5768)
9536a311cb [wpilib] Add support for the PS5 DualSense controller (#5257)
8d5e6737fc [wpilibc] SolenoidSim: Add virtual destructor (#5848)
07e13d60a2 [ntcore] Fix write_impl (#5847)
1713386869 [wpiutil] ProtobufMessageDatabase: Fix out-of-order Add() rebuild (#5845)
35472f5fc9 [ntcore] Fix a use-after-free in client close (#5844)
ed168b522c [ntcore] Disable buf pool when asan is enabled (#5843)
3e7ba2cc6f [wpinet] WebSocket: Fix write behavior (#5841)
80c47da237 [sim] Disable the robot program when DS disconnects (#5818)
abe1cec90c [wpilib] Update Usage Reporting ResourceType from NI Libraries (#5842)
cdf981abba [glass] Fix position of data type in NT view (#5840)
04dcd80adb [build] Publish unit tests for examples (#5838)
49920234ac [build] Fix checkstyle rules to allow Windows paths (#5839)
366b715942 [wpilib] Fix SendableChooser test (#5835)
3ba501f947 [commands] Java: Fix CommandXboxController.leftTrigger() parameter order (#5831)
ec569a58ef [wpimath] Make KalmanTypeFilter interface public (#5830)
b91317fd36 [wpiutil] DataLog.addSchema(): Don't add into a set view (#5829)
2ab4fcbc24 [wpiutil] ProtobufMessageDatabase: Clear messages first (#5827)
98c14f1692 [wpimath] Add EKF/UKF u-y-R correct overload (#5832)
60bcdeded9 [ci] Disable java in sanitizer builds (#5833)
c87f8fd538 [commands] Add DeferredCommand (#5566)
ad80eb3a0b [ci] Update actions for comment-command (#5824)
c7d6ad5a0b [ntcore] WebSocketConnection: Use weak capture (#5822)
8a8e220792 [simgui] Add 'Invalid' option for AllianceStation (#5820)
cfc6a47f76 [sim] DS plugin: Fix off-by-one error when setting alliance station (#5819)
8efa586ace [ntcore] Don't check type string on publishing an entry (#5816)
23ea188e60 [glass] Add protobuf decode error log message (#5812)
928e87b4f4 [build] Add combined test meta-task (#5813)
63ef585d4b [wpiutil] Fix compilation of MathExtras.h on Windows with /sdl (#5809)
b03a7668f9 [build] Windows CMake/vcpkg fixes (#5807)
3f08bcde54 [hal] Fix HAL AllianceStation on rio (#5811)
196d963dc4 [ntcore] Fix off-by-one error in stream write (#5810)
f4cbcbc984 Fix typos (NFC) (#5804)
ec0f7fefb0 [myrobot] Update the myRobot JRE (#5805)
3d618bdbfd [wpiutil] Fix Java struct array unpacking (#5801)
1fa7445667 [ntcore] Check for valid client in incoming text and binary (#5799)
269b9647da [ci] Update JDK for combine step (#5794)
bee32f080e [docs] Add wpiunits to JavaDocs (#5793)
25dad5a531 [wpinet] TCPConnector_parallel: Don't use thread_local (#5791)
4a93581f1a [build] cmake: use default library type for libglassnt, libglass, wpigui, and imgui (#5797)
abb2857e03 [wpilib] Counter: Fix default distance per pulse, add distance and rate to C++ (#5796)
b14a61e1c0 [readme] Add link to QuickBuffers release page (#5795)
cf54d9ccb7 [wpiutil, ntcore] Add structured data support (#5391)
ecb7cfa9ef [wpimath] Add Exponential motion profile (#5720)
7c6fe56cf2 [ntcore] Fix crash on disconnect (#5788)
85147bf69e [wpinet] WebSocketSerializer: Fix UB (#5787)
244163acad [wpinet] uv::Stream::TryWrite(): Return 0 on EAGAIN (#5784)
820728503d [hal] Remove extra semicolon in RoboRioData (#5786)
45f307d87e [upstream_utils] Upgrade to LLVM 17.0.3 (#5785)
4ce4d63efc [wpilibj] Fix RobotBase.isSimulation() (#5783)
579007ceb3 [commands] Add requirements parameter to Commands.idle() (#5774)
3f3a169149 [wpilib] Make physics sim setState() functions public (#5779)
7501e4ac88 [wpilib] Close sim device in ADIS IMUs (#5776)
99630d2e78 [wpimath] Upgrade to EJML 0.43.1 (#5778)
02cbbc997d [wpimath] Make Vector-Vector binary operators return Vector (#5772)
ed93889e17 [examples] Fix typo in TimesliceRobot example name (#5773)
da70e4c262 [docs] Add jinja2 to CMake prerequisites (#5771)
e814595ea7 [wpimath] Add ChassisSpeeds.fromRobotRelativeSpeeds() (#5744)
f98c943445 [wpimath] LinearSystemId: Add DCMotorSystem overload (#5770)
b3eb64b0f7 [wpiutil] ct_string: Use inline namespace for literals (#5767)
7d9ba256c2 Revert "[build] Add CMake option to build Java source jars (#5756)" (#5766)
1f6492e3d8 [sysid] Update JSON library usage (#5765)
638f04f626 [wpiutil] Add protobuf to thirdparty sources (#5746)
210255bfff [wpiutil] Update json to 3.11.2 (#5680)
896772c750 [wpimath] Add DCMotor functions for Kraken X60 and Neo Vortex (#5759)
fd427f6c82 [wpimath] Fix hardcoded module count in SwerveDriveKinematics.resetHeading() (#5762)
c0b4c6cce6 [wpimath] Add overloads for Transform2d and Transform3d (#5757)
9a0aafd8ab [examples] Make swerve examples multiply desired module speeds by cosine of heading error (#5758)
1c724884ca [build] Add CMake option to build Java source jars (#5756)
5b0db6b93e [ci] Forward CI as well (#5755)
f8cbbbac12 [ci] Take 2 on passing GITHUB_REF (#5754)
b9944be09c [ci] Pass GITHUB_REF to docker container (#5753)
de5e4eda6c [build] Update apriltag, libssh, googletest for 2024 (#5752)
227e660e20 [upstream_utils] Upgrade to LLVM 17.0.2 (#5750)
36f94c9f21 [commands,romi,xrp] Add frcYear to vendordep (#5747)
741d166457 [glass] NT view: enhance array support (#5732)
1d23513945 [ntcore] Fix string array value comparison (#5745)
ff1849052e [commands] Make command scheduling order consistent (#5470)
58e8474368 [build] Disable armsimulation unit test (#5739)
fb07b0da49 [examples] Add XRP C++ Examples and Templates (#5743)
81893ad73d Run wpiformat with clang-format 17 (#5740)
faa1e665ba [wpimath] Add ElevatorFeedforward.calculate(currentV, nextV) overload (#5715)
a789632052 [build] Update to native utils 2024.3.1 (#5738)
8f60ab5182 [build] Update OpenCV to 2024-4.8.0-1 (#5737)
33243f982b [wpimath] Expand Quaternion class with additional operators (#5600)
420f2f7c80 [ntcore] Add RTT-only subprotocol (#5731)
2b63e35ded [ntcore] Fix moving outgoing queue to new period (#5735)
be939cb636 [ntcore] Fix notification of SetDefaultEntryValue (#5733)
69a54de202 [build] Update enterprise plugin (#5730)
fef03a3ff5 [commands] Clean up C++ includes after Requirements was added (#5719)
8b7c6852cf [ntcore] Networking improvements (#5659)
1d19e09ca9 [wpiutil] Set WPI_{UN}IGNORE_DEPRECATED to empty when all else fails (#5728)
58141d6eb5 [wpilib] Make BooleanEvent more consistent (#5436)
6576d9b474 [wpilib] SendableChooser: implement Sendable instead of NTSendable (#5718)
a4030c670f [build] Update to gradle 8.4, enable win arm builds (#5727)
0960f11eba [wpinet] Revert removal of uv_clock_gettime() (#5723)
cb1bd0a3be [wpiutil] Get more precise system time on Windows (#5722)
4831277ffe [wpigui] Fix loading a maximized window on second monitor (#5721)
3eb372c25a [wpiutil] SendableBuilder: Add PublishConst methods (#5158)
1fec8596a4 [ci] Fix -dirty version (#5716)
f7e47d03f3 [build] Remove unnecessary CMake config installs (#5714)
a331ed2374 [sysid] Add SysId (#5672)
8d2cbfce16 [wpiutil] DataLog: Stop logging if insufficient free space (#5699)
48facb9cef [ntcoreffi] Add DataLogManager (#5702)
aecbcb08fc [ntcore] Correctly start DataLog for existing publishers (#5703)
5e295dfbda [wpiutil] DataLog: Limit total buffer allocation (#5700)
c7c7e05d9d [ci] Unbreak combiner (#5698)
c92bad52cb [wpilib] DataLogManager: Use system time valid function (#5697)
d404af5f24 [wpilib] RobotController: Add isSystemTimeValid() (#5696)
e56f1a3632 [ci] Run combine but skip all steps (#5695)
8f5bcad244 [ci] Use sccache for cmake builds (#5692)
703dedc4a6 [ci] Upgrade get-cmake action to fix node12 deprecation warning (#5694)
c69a0d7504 [ci] Don't run example unit test that segfaults (#5693)
66358d103e Add menu items for online docs to GUI tools (#5689)
4be8384a76 [ci] Disable combine on PR builds (#5691)
90288f06a6 [ci] Fix Gradle disk space issues (#5688)
9e9583412e [wpigui] Make wpi::gui::OpenURL() fork the process first (#5687)
d4fcd80b7b [ci] Gradle: Use container only for build step (#5684)
7b70e66772 [outlineviewer] Fix thirdparty library include sorting (#5683)
5f651df5d5 [build] Clean up Gradle configs (#5685)
65b26738d5 Add CMakeSettings.json to gitignore (#5682)
d0305951ad Fix GitHub inline warnings (#5681)
e8d4a20331 [build][cmake] Fix windows tests and re-enable CI tests (#5674)
2b58bbde0b [xrp] Add Reflectance sensor and rangefinder classes (#5673)
dd5612fbee [json] Add forward definition header (#5676)
eab44534c3 [wpimath] Remove unused SmallString include (#5677)
5ab54ff760 Replace wpi::raw_istream with wpi::MemoryBuffer (#5675)
1b6ec5a95d [wpiutil] Upgrade to LLVM 17.0.1 (#5482)
07a0d22fe6 [build] Build examples in CMake CI (#5667)
97021f074a [build] Upgrade imgui and implot (#5668)
87ce1e3761 [build] Fix wpilibNewCommands CMake install (#5671)
6ef94de9b5 [wpimath] Add tests for ArmFeedforward and ElevatorFeedforward (#5663)
c395b29fb4 [wpinet] Add WebSocket::TrySendFrames() (#5607)
c4643ba047 [romi/xrp] Fix version typo in vendordep json (#5664)
51dcb8b55a [examples] Make Romi/XRP Examples use appropriate vendordeps (#5665)
daf7702007 [build] Test each example in a new environment (#5662)
e67df8c180 [wpilib] Const-qualify EncoderSim getters (#5660)
7be290147c [wpiutil] Refactor SpanMatcher and TestPrinters from ntcore (#5658)
9fe258427a [commands] Add proxy factory to Commands (#5603)
633c5a8a22 [commands] Add C++ Requirements struct (#5504)
b265a68eea [commands] Add interruptor parameter to onCommandInterrupt callbacks (#5461)
e93c233d60 [ntcore] Compute Value memory size when creating value (#5657)
5383589f99 [wpinet] uv::Request: Return shared_ptr from Release() (#5656)
40b552be4a [wpinet] uv::Stream: Return error from TryWrite() (#5655)
202a75fe08 [wpinet] RequestImpl: Avoid infinite loop in shared_from_this() (#5654)
8896515eb7 [wpinet] uv::Buffer: Add bytes() accessor (#5653)
ae59a2fba2 [wpinet] uv::Error: Change default error to 0 (#5652)
3b51ecc35b [wpiutil] SpanExtras: Add take_back and take_front (#5651)
17f1062885 Replace std::snprintf() with wpi::format_to_n_c_str() (#5645)
bb39900353 [romi/xrp] Add Romi and XRP Vendordeps (#5644)
cb99517838 [build] cmake: Use default install location on windows for dlls (#5580)
25b0622d4c [build] Add Windows CMake CI (#5516)
34e7849605 Add warning to development builds instructions (NFC) (#5646)
e9e611c9d8 [cameraserver] Remove CameraServer.SetSize() (#5650)
94f58cc536 [wpilib] Remove Compressor.Enabled() (#5649)
4da5aee88a [wpimath] Remove SlewRateLimiter 2 argument constructor (#5648)
2e3ddf5502 Update versions in development builds instructions to 2024 (#5647)
19a8850fb1 [examples] Add TimesliceRobot templates (#3683)
9047682202 [sim] Add XRP-specific plugin (#5631)
575348b81c [wpilib] Use IsSimulation() consistently (#3534)
12e2043b77 [wpilib] Clean up Notifier (#5630)
4bac4dd0f4 [wpimath] Move PIDController from frc2 to frc namespace (#5640)
494cfd78c1 [wpiutil] Fix deprecation warning in LLVM for C++23 (#5642)
43a727e868 [apriltag] Make loadAprilTagFieldLayout throw an unchecked exception instead (#5629)
ad4b017321 [ci] Use Ninja for faster builds (#5626)
4f2114d6f5 Fix warnings from GCC 13 release build (#5637)
e7e927fe26 [build] Also compress debug info for CMake RelWithDebInfo build type (#5638)
205a40c895 [build] Specify zlib for debug info compression (#5636)
707444f000 [apriltag] Suppress -Wtype-limits warning in asserts from GCC 13 (#5635)
3b79cb6ed3 [commands] Revert SubsystemBase deprecation/removal (#5634)
bc7f23a632 [build] Compress Linux debug info (#5633)
57b2d6f254 [build] Update to image 2024 v1.0 (#5625)
339ef1ea39 [wpilib] DataLogManager: Warn user if logging to RoboRIO 1 internal storage (#5617)
7a9a901a73 [build] Fix cmake config files (#5624)
298f8a6e33 [wpilib] Add Mechanism2d tests and make Java impl match C++ (#5527)
d7ef817bae [apriltag] Update apriltag library (#5619)
c3fb31fd0e [docs] Switch to Java 17 api docs (#5613)
bd64f81cf9 [build] Run Google tests in release mode in CI (#5615)
66e6bd81ea [wpimath] Cleanup wpimath/algorithms.md (NFC) (#5621)
4fa56fd884 [build] Add missing find_dependency call (#5623)
f63d958995 [build] Update to native utils 2024.2.0 (#5601)
a9ab08f48b [wpimath] Rename ChassisSpeeds.fromDiscreteSpeeds() to discretize() (#5616)
8e05983a4a [wpimath] Add math docs to plant inversion feedforward internals (NFC) (#5618)
3a33ce918b [ntcore] Add missing StringMap include (#5620)
a6157f184d [wpiutil] timestamp: Add ShutdownNowRio (#5610)
e9f612f581 [build] Guard policy setting for CMake versions below 3.24 (#5612)
1a6df6fec6 [wpimath] Fix DARE Q decomposition (#5611)
9b3f7fb548 [build] Exclude IntelliJ folders from spotless XML (#5602)
814f18c7f5 [wpimath] Fix computation of C for DARE (A, C) detectability check (#5609)
ac23f92451 [hal] Add GetTeamNumber (#5596)
a750bee54d [wpimath] Use std::norm() in IsStabilizable() (#5599)
8e2465f8a0 [wpimath] Add arithmetic functions to wheel speeds classes (#5465)
10d4f5b5df [wpimath] Clean up notation in DARE precondition docs (#5595)
b2dd59450b [hal] Fix unfinished/incorrect GetCPUTemp functions (#5598)
99f66b1e24 [wpimath] Replace frc/EigenCore.h typedefs with Eigen's where possible (#5597)
383289bc4b [build] Make custom CMake macros use lowercase (#5594)
45e7720ec1 [build] Add error message when downloading files in CMake (#5593)
4e0d785356 [wpimath] ChassisSpeeds: document that values aren't relative to the robot (NFC) (#5551)
3c04580a57 [commands] ProxyCommand: Use inner command name in unique_ptr constructor (#5570)
cf19102c4a [commands] SelectCommand: Fix leakage and multiple composition bug (#5571)
171375f440 [ntcoreffi] Link to NI libraries (#5589)
89add5d05b Disable flaky tests (#5591)
a8d4b162ab [ntcore] Remove RPC manual tests (#5590)
39a73b5b58 [commands] C++: Add CommandPtr supplier constructor to ProxyCommand (#5572)
36d514eae7 [commands] Refactor C++ ScheduleCommand to use SmallSet (#5568)
52297ffe29 [commands] Add idle command (#5555)
67043a8eeb [wpimath] Add angular jerk unit (#5582)
51b0fb1492 [wpimath] Fix incorrect header inclusion in angular_acceleration.h (#5587)
b7657a8e28 [wpimath] Split WPIMathJNI into logical chunks (#5552)
ea17f90f87 [build] Fix tool builds with multiple arm platforms installed (#5586)
f1d7b05723 [wpimath] Clean up unit formatter (#5584)
d7264ff597 Replace wpi::errs() usage with fmtlib (#5560)
ab3bf39e0e [wpiutil] Upgrade to fmt 10.1.1 (#5585)
165ebe4c79 Upgrade to fmt 10.1.0 (#5326)
8e2a7fd306 Include thirdparty libraries with angle brackets (#5578)
e322ab8e46 [wpimath] Fix docs for DARE ABQRN stabilizability check (NFC) (#5579)
360fb835f4 [upstream_utils] Handle edge case in filename matches (#5576)
9d86624c00 [build] Fix CMake configure warnings (#5577)
969979d6c7 [wpiutil] Update to foonathan memory 0.7-3 (#5573)
0d2d989e84 [wpimath] Update to gcem 1.17.0 (#5575)
cf86af7166 [wpiutil] Update to mpack 1.1.1 (#5574)
a0c029a35b [commands] Fix dangling SelectCommand documentation (NFC) (#5567)
349141b91b [upstream_utils] Document adding a patch (NFC) (#5432)
7889b35b67 [wpimath] Add RamseteController comparison to LTV controller docs (NFC) (#5559)
b3ef536677 [build] Ignore nt/sim json files in spotless (#5565)
ed895815b5 [build] Compile Java with UTF-8 encoding (#5564)
2e4ad35e36 [wpiutil] jni_util: Add JSpan and CriticalJSpan (#5554)
8f3d6a1d4b [wpimath] Remove discretizeAQTaylor() (#5562)
7c20fa1b18 [wpimath] Refactor DARE tests to reduce RAM usage at compile time (#5557)
89e738262c [ntcore] Limit buffer pool size to 64KB per connection (#5485)
96f7fa662e Upgrade Maven dependencies (#5553)
7a2d336d52 [wpinet] Leak multicast handles during windows shutdown (#5550)
f9e2757d8f [wpimath] Use JDoubleArrayRef in all JNI functions (#5546)
0cf6e37dc1 [wpimath] Make LTV controller constructors use faster DARE solver (#5543)
6953a303b3 [build] Fix the windows build with fmt (#5544)
7a37e3a496 [wpimath] Correct Rotation3d::RotateBy doc comment (NFC) (#5541)
186b409e16 [wpimath] Remove internal Eigen header include (#5539)
03764dfe93 [wpimath] Add static matrix support to DARE solver (#5536)
394cfeadbd [wpimath] Use SDA algorithm instead of SSCA for DARE solver (#5526)
a4b7fde767 [wpilib] Add mechanism specific SetState overloads to physics sims (#5534)
8121566258 [wpimath] Fix CoordinateSystem.convert() Transform3d overload (#5532)
b542e01a0b [glass] Fix array crash when clearing existing workspace (#5535)
e2e1b763b2 [wpigui] Fix PFD file dialogs not closing after window closing (#5530)
86d7bbc4e4 [examples] Add Java Examples and Templates for the XRP (#5529)
e8b5d44752 [wpimath] Make Java Quaternion use doubles instead of Vector (#5525)
38c198fa64 [myRobot] Add apriltags to myRobot build (#5528)
00450c3548 [wpimath] Upgrade to EJML 0.42 (#5531)
faf3cecd83 [wpimath] Don't copy Matrix and underlying storage in VecBuilder (#5524)
6b896a38dc [build] Don't enforce WITH_FLAT_INSTALL with MSVC (part 2) (#5517)
c01814b80e [wpiutil] Add C API for DataLog (#5509)
b5bd0771eb [wpimath] Document extrinsic vs intrinsic rotations (NFC) (#5508)
84ed8aec05 [build] Don't enforce WITH_FLAT_INSTALL with MSVC (#5515)
999f677d8c [ntcoreffi] Add WPI_Impl_SetupNowRio to exported symbols (#5510)
338f37d302 Fix header sorting of libssh (#5507)
75cbd9d6d0 [glass] Add background color selector to glass plots (#5506)
e2c190487b [examples] Add flywheel bang-bang controller example (#4071)
c52dad609e [wpinet] WebSocket: Send pong in response to ping (#5498)
e2d17a24a6 [hal] Expose power rail disable and cpu temp functionality (#5477)
3ad5d2e42d [hal,wpiutil] Use HMB for FPGA Timestamps (#5499)
b46a872494 [ntcore] Remove pImpl from implementation (#5480)
d8c59ccc71 [wpimath] Add tests for MathUtil clamp() and interpolate() (#5501)
0552c8621d [glass,ov] Improve Glass and OutlineViewer title bar message (#5502)
90e37a129f [wpiutil,wpimath] Add generic InterpolatingTreeMap (#5372)
d83a6edc20 [wpilib] Update GetMatchTime docs and units (#5232)
6db2c42966 [wpimath] Trajectory: Throw on empty lists of States (#5497)
21439b606c [wpimath] Disallow LTV controller max velocities above 15 m/s (#5495)
7496e0d208 [ntcore] Value: More efficiently store arrays (#5484)
0c93aded8a [wpimath] Change kinematics.ToTwist2d(end - start) to kinematics.ToTwist2d(start, end) (#5493)
815a8403e5 [wpimath] Give infeasible trajectory constraints a better exception message (#5492)
35a8b129d9 [wpimath] Add RotateBy() function to pose classes (#5491)
26d6e68c8f [upstream_utils] Add GCEM to CI (#5483)
6aa469ae45 [wpilib] Document how to create LinearSystem object for physics sim classes (NFC) (#5488)
a01b6467d3 [wpimath] Link to docs on LQR and KF tolerances (#5486)
d814f1d123 [wpimath] Fix copy-paste error from Pose2d docs (NFC) (#5490)
98f074b072 [wpimath] Add folder prefix to geometry includes (#5489)
e9858c10e9 [glass] Add tooltips for NT settings (#5476)
12dda24f06 [examples] Fix C robot template not correctly looping (#5474)
fc75d31755 [apriltag] Update apriltaglib (#5475)
a95994fff6 [wpiutil] timestamp: Call FPGA functions directly (#5235)
2ba8fbb6f4 [wpimath] Improve documentation for SwerveModulePosition::operator- (#5468)
b8cdf97621 [build] Prepare for Windows arm64 builds (#5390)
552f4b76b5 [wpimath] Add FOC-enabled Falcon constants to the DCMotor class (#5469)
1938251436 [examples] Add Feedforward to ElevatorProfiledPid (#5300)
873c2a6c10 [examples] Update ElevatorTrapezoidProfile example (#5466)
99b88be4f3 [wpilib] Reduce usage of NTSendable (#5434)
d125711023 [hal] Fix Java REVPH faults bitfield (take 2) (#5464)
c3fab7f1f2 [ntcore] Don't update timestamp when value is unchanged (#5356)
5ec7f18bdc [wpilib] EventLoop docs: Remove BooleanEvent references (NFC) (#5463)
c065ae1fcf [wpiunits] Add subproject for a Java typesafe unit system (#5371)
44acca7c00 [wpiutil] Add ClassPreloader (#5365)
88b11832ec [hal] Fix Java REVPH faults bitfield (#5148)
fb57d82e52 [ntcore] Enhance Java raw value support
3a6e40a44b [wpiutil] Enhance DataLog Java raw value support
8dae5af271 [wpiutil] Add compile-time string utilities (ct_string) (#5462)
fc56f8049a [wpilib] DriverStation: Change alliance station to use optional (#5229)
ef155438bd [build] Consume libuv via cmake config instead of via pkg-config (#5438)
86e91e6724 [wpimath] Refactor TrapezoidProfile API (#5457)
72a4543493 [wpilib] DutyCycleEncoderSim: Expand API (#5443)
657338715d [wpimath] Add ChassisSpeeds method to fix drifting during compound swerve drive maneuvers (#5425)
1af224c21b Add missing <functional> includes (#5459)
0b91ca6d5a [wpilib] SendableChooser: Add onChange listener (#5458)
6f7cdd460e [wpimath] Pose3d: Switch to JNI for exp and log (#5444)
c69e34c80c [wpimath] ChassisSpeeds: Add arithmetic functions (#5293)
335e7dd89d [wpilib] Simulation: Add ctor parameter to set starting state of mechanism sims (#5288)
14f30752ab [wpilib] Deprecate Accelerometer and Gyro interfaces (#5445)
70b60e3a74 [commands] Trigger: Fix method names in requireNonNullParam (#5454)
593767c8c7 [wpimath] Improve Euler angle calculations in gimbal lock (#5437)
daf022d3da [build] Make devImplementation inherit from implementation (#5450)
9b8d90b852 [examples] Convert the unitless joystick inputs to actual physical units (#5451)
1f6428ab63 [ntcore] Fix undefined comparison behavior when array is empty (#5448)
17eb9161cd Update code owners for removal of old commands (#5447)
3c4b58ae1e [wpinet] Upgrade to libuv 1.46.0 (#5446)
aaea85ff16 [commands] Merge CommandBase into Command and SubsystemBase into Subsystem (#5392)
7ac932996a [ci] Use PAT for workflow dispatch (#5442)
efe1987e8b [ci] Trigger pages repo workflow (#5441)
828bc5276f [wpiutil] Upgrade to LLVM 16.0.6 (#5435)
701df9eb87 [ci] Change documentation publish to single-commit (#5440)
e5452e3f69 [wpiutil] Add WPICleaner and an example how to use it (#4850)
7a099cb02a [commands] Remove deprecated classes and functions (#5409)
b250a03944 [wpilib] Add function to wait for DS Connection (#5230)
a6463ed761 [wpiutil] Fix unused variable warning in release build (#5430)
f031513470 [ntcore] NetworkTable::GetSubTables(): Remove duplicates (#5076)
f8e74e2f7c [hal] Unify PWM simulation Speed, Position, and Raw (#5277)
fd5699b240 Remove references to Drake (#5427)
e2d385d80a [build] cmake: Respect USE_SYSTEM_FMTLIB (#5429)
d37f990ce3 [hal] Fix HAL Relay/Main doc module (NFC) (#5422)
a7a8b874ac [docs] Expand HAL_ENUM in doxygen docs (#5421)
3a61deedde [wpimath] Rotation2d: Only use gcem::hypot when constexpr evaluated (#5419)
96145de7db [examples] Fix formatting (NFC) (#5420)
fffe6a7b9a [examples] Improve Pneumatics example coverage in Solenoid and RapidReactCmdBot examples (#4998)
6b5817836d [wpimath] Add tolerance for some tests (#5416)
3233883f3e [cscore] Fix warnings on macos arm (#5415)
c4fc21838f [commands] Add ConditionalCommand getInterruptionBehavior (#5161)
89fc51f0d4 Add tests for SendableChooser and Command Sendable functionality (#5179)
663bf25aaf [docs] Generate docs for symbols in __cplusplus (#5412)
fe32127ea8 [command] Clean up Command doc comments (NFC) (#5321)
c1a01569b4 [wpilib][hal] PWM Raw using microseconds (#5283)
1fca519fb4 [wpiutil] Remove remnants of ghc fs and tcb_span libraries (#5411)
90602cc135 [github] Update issue template to collect more project info (#5090)
34412ac57e [build] Exclude files in bin from Spotless (#5410)
61aa60f0e3 [wpilib] Add robot callback that is called when the DS is initially connected (#5231)
ebae341a91 [commands] Add test for subsystem registration and periodic (#5408)
5d3a133f9f Remove spaces in NOLINT comments (#5407)
3a0e484691 [wpimath] Fix clang-tidy warnings (#5403)
eb3810c765 [wpiutil] Fix clang-tidy warnings (#5406)
c4dc697192 [hal] WS Simulation: Add message filtering capability (#5395)
0eccc3f247 [ntcore] Fix clang-tidy warnings (#5405)
f4dda4bac0 [hal] Add javadocs for JNI (NFC) (#5298)
1c20c69793 [cscore] Fix clang-tidy warnings (#5404)
1501607e48 [commands] Fix clang-tidy warnings (#5402)
991f4b0f62 [wpimath] PIDController: Add IZone (#5315)
f5b0d1484b [wpimath] Add isNear method to MathUtil (#5353)
2ce248f66c [hal] Fix clang-tidy warnings (#5401)
5fc4aee2d2 [wpimath] SwerveDriveKinematics: Rename currentChassisSpeed to desiredChassisSpeed (#5393)
50b90ceb54 [wpimath] SwerveDriveKinematics: Add reset method (#5398)
316cd2a453 [commands] Notify DriverStationSim in CommandTestBaseWithParam (#5400)
d4ea5fa902 [cscore] VideoMode: Add equals override (Java) (#5397)
d6bd72d738 [wpimath] ProfiledPIDController: Add getConstraints (#5399)
25ad5017a9 [wpimath] Refactor kinematics, odometry, and pose estimator (#5355)
5c2addda0f [doc] Add missing pneumatics docs (NFC) (#5389)
c3e04a6ea2 Fix loading tests on macos 12 (#5388)
d5ed9fb859 [wpimath] Create separate archive with just units headers (#5383)
901ab693d4 [wpimath] Use UtilityClassTest for more utility classes (#5384)
9d53231b01 [wpilib] DataLogManager: Add warning for low storage space (#5364)
d466933963 [wpiutil] Group doxygen into MPack module (#5380)
652d1c44e3 [wpiutil] Upgrade to macOS 12 to remove concept shims (#5379)
6414be0e5d [wpimath] Group units doxygen modules (#5382)
7ab5800487 [wpiutil] Fix docs typo in SmallVector (#5381)
59905ea721 Replace WPI_DEPRECATED() macro with [[deprecated]] attribute (#5373)
753cb49a5e [ntcore] Fix doxygen module in generated C types (NFC) (#5374)
1c00a52b67 [hal] Expose CAN timestamp base clock (#5357)
91cbcea841 Replace SFINAE with concepts (#5361)
d57d1a4598 [wpimath] Remove unnecessary template argument from unit formatter (#5367)
5acc5e22aa [wpimath] Only compute eigenvalues with EigenSolvers (#5369)
d3c9316a97 extend shuffleboard test timeout (#5377)
1ea868081a [ci] Fix /format command (#5376)
5fac18ff4a Update formatting to clang-format 16 (#5370)
a94a998002 [wpimath] Generalize Eigen formatter (#5360)
125f6ea101 [wpimath] Make SwerveDriveKinematics::ToChassisSpeeds() take const-ref argument (#5363)
51066a5a8a [wpimath] Move unit formatters into units library (#5358)
282c032b60 [wpilibc] Add unit-aware Joystick.GetDirection() (#5319)
073d19cb69 [build] Fix CMake warning (#5359)
01490fc77b [wpiutil] DataLog: Add documentation for append methods (NFC) (#5348)
c9b612c986 [wpilibcExamples] Make C++ state-space elevator KF and LQR match Java (#5346)
eed1e6e3cb [wpimath] Replace DiscretizeAQTaylor() with DiscretizeAQ() (#5344)
c976f40364 [readme] Document how to run examples in simulation (#5340)
4d28bdc19e [ci] Update Github Pages deploy action parameters (#5343)
e0f851871f [ci] Fix github pages deploy version (#5342)
063c8cbedc Run wpiformat (NFC) (#5341)
96e41c0447 [ci] Update deploy and sshagent actions (#5338)
fd294bdd71 [build] Fix compilation with GCC 13 (#5322)
d223e4040b [dlt] Add delete without download functionality (#5329)
abc19bcb43 [upstream_utils] Zero out commit hashes and show 40 digits in index hashes (#5336)
e909f2e687 [build] Update gradle cache repo name (#5334)
52bd5b972d [wpimath] Rewrite DARE solver (#5328)
3876a2523a [wpimath] Remove unused MatrixImpl() function (#5330)
c82fcb1975 [wpiutil] Add reflection based cleanup helper (#4919)
15ba95df7e [wpiutil] Use std::filesystem (#4941)
77c2124fc5 [wpimath] Remove Eigen's custom STL types (#4945)
27fb47ab10 [glass] Field2D: Embed standard field images (#5159)
102e4f2566 [wpilib] Remove deprecated and broken SPI methods (#5249)
463a90f1df [wpilib, hal] Add function to read the RSL state (#5312)
7a90475eec [wpilib] Update RobotBase documentation (NFC) (#5320)
218cfea16b [wpilib] DutyCycleEncoder: Fix reset behavior (#5287)
91392823ff [build] Update to gradle 8.1 (#5303)
258b7cc48b [wpilibj] Filesystem.getDeployDirectory(): Strip JNI path from user.dir (#5317)
26cc43bee1 [wpilib] Add documentation to SPI mode enum (NFC) (#5324)
ac4da9b1cb [hal] Add HAL docs for Addressable LED (NFC) (#5304)
21d4244cf7 [wpimath] Fix DCMotor docs (NFC) (#5309)
1dff81bea7 [hal] Miscellaneous HAL doc fixes (NFC) (#5306)
7ce75574bf [wpimath] Upgrade to Drake v1.15.0 (#5310)
576bd646ae [hal] Add CANManufacturer for Redux Robotics (#5305)
ee3b4621e5 [commands] Add onlyWhile and onlyIf (#5291)
40ca094686 [commands] Fix RepeatCommand calling end() twice (#5261)
9cbeb841f5 [rtns] Match imaging tool capitalization (#5265)
a63d06ff77 [examples] Add constants to java gearsbot example (#5248)
b6c43322a3 [wpilibc] XboxController: Add return tag to docs (NFC) (#5246)
5162d0001c [hal] Fix and document addressable LED timings (#5272)
90fabe9651 [wpilibj] Use method references in drive class initSendable() (#5251)
24828afd11 [wpimath] Fix desaturateWheelSpeeds to account for negative speeds (#5269)
e099948a77 [wpimath] Clean up rank notation in docs (NFC) (#5274)
fd2d8cb9c1 [hal] Use std::log2() for base-2 logarithm (#5278)
ba8c64bcff [wpimath] Fix misspelled Javadoc parameters in pose estimators (NFC) (#5292)
f53c6813d5 [wpimath] Patch Eigen warnings (#5290)
663703d370 [gitattributes] Mark json files as lf text files (#5256)
aa34aacf6e [wpilib] Shuffleboard: Keep duplicates on SelectTab() (#5198)
63512bbbb8 [wpimath] Fix potential divide-by-zero in RKDP (#5242)
9227b2166e [wpilibj] DriverStation: Fix joystick data logs (#5240)
fbf92e9190 [wpinet] ParallelTcpConnector: don't connect to duplicate addresses (#5169)
2108a61362 [ntcore] NT4 client: close timed-out connections (#5175)
0a66479693 [ntcore] Optimize scan of outgoing messages (#5227)
b510c17ef6 [hal] Fix RobotController.getComments() mishandling quotes inside the comments string (#5197)
e7a7eb2e93 [commands] WaitCommand: Remove subclass doc note (NFC) (#5200)
a465f2d8f0 [examples] Shuffleboard: Correct parameter order (#5204)
a3364422fa LICENSE.md: Bump year to 2023 (#5195)
df3242a40a [wpimath] Fix NaN in C++ MakeCostMatrix() that takes an array (#5194)
00abb8c1e0 [commands] RamseteCommand: default-initialize m_prevSpeeds (#5188)
c886273fd7 [wpilibj] DutyCycleEncoder.setDistancePerRotation(): fix simulation (#5147)
53b5fd2ace [ntcore] Use int64 for datalog type string (#5186)
56b758320f [wpilib] DataLogManager: increase time for datetime to be valid (#5185)
08f298e4cd [wpimath] Fix Pose3d log returning Twist3d NaN for theta between 1E-8 and 1E-7 (#5168)
6d0c5b19db [commands] CommandScheduler.isComposed: Remove incorrect throws clause (NFC) (#5183)
0d22cf5ff7 [wpilib] Fix enableLiveWindowInTest crashing in disabled (#5173)
32ec5b3f75 [wpilib] Add isTestEnabled and minor docs cleanup (#5172)
e5c4c6b1a7 [wpimath] Fix invalid iterator access in TimeInterpolatableBuffer (#5138)
099d048d9e [wpimath] Fix Pose3d log returning Twist3d NaN for theta between 1E-9 and 1E-8 (#5143)
4af84a1c12 Fix Typos (NFC) (#5137)
ce3686b80d [wpimath] Check LTV controller max velocity precondition (#5142)
4b0eecaee0 [commands] Subsystem: Add default command removal method (#5064)
edf4ded412 [wpilib] PH: Revert to 5V rail being fixed 5V (#5122)
4c46b6aff9 [wpilibc] Fix DataLogManager crash on exit in sim (#5125)
490ca4a68a [wpilibc] Fix XboxController::GetBackButton doc (NFC) (#5131)
cbb5b0b802 [hal] Simulation: Fix REV PH solenoids 8+ (#5132)
bb7053d9ee [hal] Fix HAL_GetRuntimeType being slow on the roboRIO (#5130)
9efed9a533 Update .clang-format to c++20 (#5121)
dbbfe1aed2 [wpilib] Use PH voltage to calc Analog pressure switch threshold (#5115)
de65a135c3 [wpilib] DutyCycleEncoderSim: Add channel number constructor (#5118)
3e9788cdff [docs] Strip path from generated NT docs (#5119)
ecb072724d [ntcore] Client::Disconnect(): actually close connection (#5113)
0d462a4561 [glass] NT view: Change string/string array to quoted (#5111)
ba37986561 [ntcore] NetworkClient::Disconnect: Add null check (#5112)
25ab9cda92 [glass,ov] Provide menu item to create topic from root (#5110)
2f6251d4a6 [glass] Set default value when publishing new topic (#5109)
e9a7bed988 [wpimath] Add timestamp getter to MathShared (#5091)
9cc14bbb43 [ntcore] Add stress test to dev executable (#5107)
8068369542 [wpinet] uv: Stop creating handles when closing loop (#5102)
805c837a42 [ntcore] Fix use-after-free in server (#5101)
fd18577ba0 [commands] Improve documentation of addRequirements (NFC) (#5103)
74dea9f05e [wpimath] Fix exception for empty pose buffer in pose estimators (#5106)
9eef79d638 [wpilib] PneumaticHub: Document range of enableCompressorAnalog (NFC) (#5099)
843574a810 [ntcore] Use wpi::Now instead of loop time for transmit time
226ef35212 [wpinet] WebSocket: Reduce server send frame overhead
b30664d630 [ntcore] Reduce initial connection overhead
804e5ce236 [examples] MecanumDrive: Fix axis comment in C++ example (NFC) (#5096)
49af88f2bb [examples] ArmSimulation: Fix flaky test (#5093)
d56314f866 [wpiutil] Disable mock time on the Rio (#5092)
43975ac7cc [examples] ArmSimulation, ElevatorSimulation: Extract mechanism to class (#5052)
5483464158 [examples, templates] Improve descriptions (NFC) (#5051)
785e7dd85c [wpilibc] SendableChooser: static_assert copy- and default-constructibility (#5078)
e57ded8c39 [ntcore] Improve disconnect error reporting (#5085)
01f0394419 [wpinet] Revert WebSocket: When Close() is called, call closed immediately (#5084)
59be120982 [wpimath] Fix Pose3d exp()/log() and add rotation vector constructor to Rotation3d (#5072)
37f065032f [wpilib] Refactor TimedRobot tests (#5068)
22a170bee7 [wpilib] Add Notifier test (#5070)
2f310a748c [wpimath] Fix DCMotor.getSpeed() (#5061)
b43ec87f57 [wpilib] ElevatorSim: Fix WouldHitLimit methods (#5057)
19267bef0c [ntcore] Output warning on property set on unpublished topic (#5059)
84cbd48d84 [ntcore] Handle excludeSelf on SetDefault (#5058)
1f35750865 [cameraserver] Add GetInstance() to all functions (#5054)
8230fc631d [wpilib] Revert throw on nonexistent SimDevice name in SimDeviceSim (#5053)
b879a6f8c6 [wpinet] WebSocket: When Close() is called, call closed immediately (#5047)
49459d3e45 [ntcore] Change wire timeout to fixed 1 second (#5048)
4079eabe9b [wpimath] Discard stale pose estimates (#5045)
fe5d226a19 [glass] Fix option for debug-level NT logging (#5049)
b7535252c2 [ntcore] Don't leak buffers in rare WS shutdown case (#5046)
b61ac6db33 [ntcore] Add client disconnect function (#5022)
7b828ce84f [wpimath] Add nearest to Pose2d and Translation2d (#4882)
08a536291b [examples] Improvements to Elevator Simulation Example (#4937)
193a10d020 [wpigui] Limit frame rate to 120 fps by default (#5030)
7867bbde0e [wpilib] Clarify DS functions provided by FMS (NFC) (#5043)
fa7c01b598 [glass] Add option for debug-level NT logging (#5007)
2b81610248 [wpiutil] Add msgpack to datalog Python example (#5032)
a4a369b8da CONTRIBUTING.md: Add unicodeit CLI to math docs guidelines (#5031)
d991f6e435 [wpilib] Throw on nonexistent SimDevice name in SimDeviceSim constructor (#5041)
a27a047ae8 [hal] Check for null in getSimDeviceName JNI (#5038)
2f96cae31a [examples] Hatchbots: Add telemetry (#5011)
83ef8f9658 [simulation] GUI: Fix buffer overflow in joystick axes copy (#5036)
4054893669 [commands] Fix C++ Select() factory (#5024)
f75acd11ce [commands] Use Timer.restart() (#5023)
8bf67b1b33 [wpimath] PIDController::Calculate(double, double): update setpoint flag (#5021)
49bb1358d8 [wpiutil] MemoryBuffer: Fix GetMemoryBufferForStream (#5017)
9c4c07c0f9 [wpiutil] Remove NDEBUG check for debug-level logging (#5018)
1a47cc2e86 [ntcore] Use full handle when subscribing (#5013)
7cd30cffbc Ignore networktables.json (#5006)
92aecab2ef [commands] Command controllers are not subclasses (NFC) (#5000)
8785bba080 [ntcore] Special-case default timestamps (#5003)
9e5b7b8040 [ntcore] Handle topicsonly followed by value subscribe (#4991)
917906530a [wpilib] Add Timer::Restart() (#4963)
00aa66e4fd [wpimath] Remove extraneous assignments from DiscretizeAB() (#4967)
893320544a [examples] C++ RamseteCommand: Fix units (#4954)
b95d0e060d [wpilib] XboxController: Fix docs discrepancy (NFC) (#4993)
008232b43c [ntcore] Write empty persistent file if none found (#4996)
522be348f4 [examples] Rewrite tags (NFC) (#4961)
d48a83dee2 [wpimath] Update Wikipedia links for quaternion to Euler angle conversion (NFC) (#4995)
504fa22143 [wpimath] Workaround intellisense Eigen issue (#4992)
b2b25bf09f [commands] Fix docs inconsistency for toggleOnFalse(Command) (NFC) (#4978)
ce3dc4eb3b [hal] Properly use control word that is in sync with DS data (#4989)
1ea48caa7d [wpilib] Fix C++ ADXRS450 and Java SPI gyro defs (#4988)
fb101925a7 [build] Include wpimathjni in commands binaries (#4981)
657951f6dd [starter] Add a process starter for use by the installer for launching tools (#4931)
a60ca9d71c [examples] Update AprilTag field load API usage (#4975)
f8a45f1558 [wpimath] Remove print statements from tests (#4977)
ecba8b99a8 [examples] Fix swapped arguments in MecanumControllerCommand example (#4976)
e95e88fdf9 [examples] Add comment to drivedistanceoffboard example (#4877)
371d15dec3 [examples] Add Computer Vision Pose Estimation and Latency Compensation Example (#4901)
cb9b8938af [sim] Enable docking in the GUI (#4960)
3b084ecbe0 [apriltag] AprilTagFieldLayout: Improve API shape for loading builtin JSONs (#4949)
27ba096ea1 [wpilib] Fix MOI calculation error in SingleJointedArmSim (#4968)
42c997a3c4 [wpimath] Fix Pose3d exponential and clean up Pose3d logarithm (#4970)
5f1a025f27 [wpilibj] Fix typo in MecanumDrive docs (NFC) (#4969)
0ebf79b54c [wpimath] Fix typo in Pose3d::Exp() docs (NFC) (#4966)
a8c465f3fb [wpimath] HolonomicDriveController: Add getters for the controllers (#4948)
a7b1ab683d [wpilibc] Add unit test for fast deconstruction of GenericHID (#4953)
bd6479dc29 [build] Add Spotless for JSON (#4956)
5cb0340a8c [hal, wpilib] Load joystick values upon code initialization (#4950)
ab0e8c37a7 [readme] Update build requirements (NFC) (#4947)
b74ac1c645 [build] Add apriltag to C++ cmake example builds (#4944)
cf1a411acf [examples] Add example programs for AprilTags detection (#4932)
1e05b21ab5 [wpimath] Fix PID atSetpoint to not return true prematurely (#4906)
e5a6197633 [wpimath] Fix SwerveDriveKinematics not initializing a new array each time (#4942)
039edcc23f [ntcore] Queue current value on subscriber creation (#4938)
f7f19207e0 [wpimath] Allow multiple vision measurements from same timestamp (#4917)
befd12911c [commands] Delete UB-causing rvalue variants of CommandPtr methods (#4923)
34519de60a [commands] Fix spacing in command composition exception (#4924)
dc4355c031 [hal] Add handle constructor and name getters for sim devices (#4925)
53d8d33bca [hal, wpilibj] Add missing distance per pulse functions to EncoderSim (#4928)
530ae40614 [apriltag] Explain what April tag poses represent (NFC) (#4930)
79f565191e [examples] DigitalCommunication, I2CCommunication: Add tests (#4865)
2cd9be413f [wpilib, examples] Cleanup PotentiometerPID, Ultrasonic, UltrasonicPID examples (#4893)
babb0c1fcf [apriltag] Add 2023 field layout JSON (#4912)
330ba45f9c [wpimath] Fix swerve kinematics util classes equals function (#4907)
51272ef6b3 [fieldImages] Add 2023 field (#4915)
0d105ab771 [commands] Deduplicate command test utils (#4897)
cf4235ea36 [wpiutil] Guard MSVC pragma in SymbolExports.h (#4911)
2d4b7b9147 [build] Update opencv version in opencv.gradle (#4909)
aec6f3d506 [ntcore] Fix client flush behavior (#4903)
bfe346c76a [build] Fix cmake java resources (#4898)
Change-Id: Ia1dd90fe42c6cd5df281b8a5b710e136f54355f4
git-subtree-dir: third_party/allwpilib
git-subtree-split: f1a82828fed8950f9a3f1586c44327027627a0c8
Signed-off-by: James Kuszmaul <jabukuszmaul+collab@gmail.com>
diff --git a/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceAnnouncer.java b/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceAnnouncer.java
index 6dff454..9ccb322 100644
--- a/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceAnnouncer.java
+++ b/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceAnnouncer.java
@@ -4,11 +4,18 @@
package edu.wpi.first.net;
+import edu.wpi.first.util.WPICleaner;
+import java.lang.ref.Cleaner.Cleanable;
import java.util.Map;
/** Class to announce over mDNS that a service is available. */
public class MulticastServiceAnnouncer implements AutoCloseable {
private final int m_handle;
+ private final Cleanable m_cleanable;
+
+ private static Runnable cleanupAction(int handle) {
+ return () -> WPINetJNI.freeMulticastServiceAnnouncer(handle);
+ }
/**
* Creates a MulticastServiceAnnouncer.
@@ -24,6 +31,7 @@
String[] values = txt.values().toArray(String[]::new);
m_handle =
WPINetJNI.createMulticastServiceAnnouncer(serviceName, serviceType, port, keys, values);
+ m_cleanable = WPICleaner.register(this, cleanupAction(m_handle));
}
/**
@@ -36,11 +44,12 @@
public MulticastServiceAnnouncer(String serviceName, String serviceType, int port) {
m_handle =
WPINetJNI.createMulticastServiceAnnouncer(serviceName, serviceType, port, null, null);
+ m_cleanable = WPICleaner.register(this, cleanupAction(m_handle));
}
@Override
public void close() {
- WPINetJNI.freeMulticastServiceAnnouncer(m_handle);
+ m_cleanable.clean();
}
public void start() {
diff --git a/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceResolver.java b/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceResolver.java
index 2426de2..b676f4c 100644
--- a/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceResolver.java
+++ b/wpinet/src/main/java/edu/wpi/first/net/MulticastServiceResolver.java
@@ -4,9 +4,17 @@
package edu.wpi.first.net;
+import edu.wpi.first.util.WPICleaner;
+import java.lang.ref.Cleaner.Cleanable;
+
/** Class to resolve a service over mDNS. */
public class MulticastServiceResolver implements AutoCloseable {
private final int m_handle;
+ private final Cleanable m_cleanable;
+
+ private static Runnable cleanupAction(int handle) {
+ return () -> WPINetJNI.freeMulticastServiceResolver(handle);
+ }
/**
* Creates a MulticastServiceResolver.
@@ -15,11 +23,12 @@
*/
public MulticastServiceResolver(String serviceType) {
m_handle = WPINetJNI.createMulticastServiceResolver(serviceType);
+ m_cleanable = WPICleaner.register(this, cleanupAction(m_handle));
}
@Override
public void close() {
- WPINetJNI.freeMulticastServiceResolver(m_handle);
+ m_cleanable.clean();
}
public void start() {
diff --git a/wpinet/src/main/native/cpp/DsClient.cpp b/wpinet/src/main/native/cpp/DsClient.cpp
index 86f8f00..97509cb 100644
--- a/wpinet/src/main/native/cpp/DsClient.cpp
+++ b/wpinet/src/main/native/cpp/DsClient.cpp
@@ -21,6 +21,9 @@
: m_logger{logger},
m_tcp{uv::Tcp::Create(loop)},
m_timer{uv::Timer::Create(loop)} {
+ if (!m_tcp || !m_timer) {
+ return;
+ }
m_tcp->end.connect([this] {
WPI_DEBUG4(m_logger, "DS connection closed");
clearIp();
diff --git a/wpinet/src/main/native/cpp/EventLoopRunner.cpp b/wpinet/src/main/native/cpp/EventLoopRunner.cpp
index 7c7e79c..6c143ac 100644
--- a/wpinet/src/main/native/cpp/EventLoopRunner.cpp
+++ b/wpinet/src/main/native/cpp/EventLoopRunner.cpp
@@ -59,6 +59,7 @@
h.SetLoopClosing(true);
h.Close();
});
+ loop.SetClosing();
});
m_owner.Join();
}
diff --git a/wpinet/src/main/native/cpp/MulticastHandleManager.cpp b/wpinet/src/main/native/cpp/MulticastHandleManager.cpp
index d249a1c..ab44da8 100644
--- a/wpinet/src/main/native/cpp/MulticastHandleManager.cpp
+++ b/wpinet/src/main/native/cpp/MulticastHandleManager.cpp
@@ -10,3 +10,17 @@
static MulticastHandleManager manager;
return manager;
}
+
+#ifdef _WIN32
+MulticastHandleManager::~MulticastHandleManager() {
+ // Multicast handles cannot be safely destructed on windows during shutdown.
+ // Just leak all handles.
+ for (auto&& i : resolvers) {
+ i.second.release();
+ }
+
+ for (auto&& i : announcers) {
+ i.second.release();
+ }
+}
+#endif
diff --git a/wpinet/src/main/native/cpp/MulticastHandleManager.h b/wpinet/src/main/native/cpp/MulticastHandleManager.h
index 8c070f7..9925e84 100644
--- a/wpinet/src/main/native/cpp/MulticastHandleManager.h
+++ b/wpinet/src/main/native/cpp/MulticastHandleManager.h
@@ -20,6 +20,9 @@
resolvers;
wpi::DenseMap<size_t, std::unique_ptr<wpi::MulticastServiceAnnouncer>>
announcers;
+#ifdef _WIN32
+ ~MulticastHandleManager();
+#endif
};
MulticastHandleManager& GetMulticastManager();
diff --git a/wpinet/src/main/native/cpp/ParallelTcpConnector.cpp b/wpinet/src/main/native/cpp/ParallelTcpConnector.cpp
index 317f0a2..5fb1dd5 100644
--- a/wpinet/src/main/native/cpp/ParallelTcpConnector.cpp
+++ b/wpinet/src/main/native/cpp/ParallelTcpConnector.cpp
@@ -4,6 +4,8 @@
#include "wpinet/ParallelTcpConnector.h"
+#include <cstring>
+
#include <fmt/format.h>
#include <wpi/Logger.h>
@@ -24,6 +26,9 @@
m_reconnectRate{reconnectRate},
m_connected{std::move(connected)},
m_reconnectTimer{uv::Timer::Create(loop)} {
+ if (!m_reconnectTimer) {
+ return;
+ }
m_reconnectTimer->timeout.connect([this] {
if (!IsConnected()) {
WPI_DEBUG1(m_logger, "timed out, reconnecting");
@@ -62,6 +67,29 @@
}
}
+static bool AddressEquals(const sockaddr& a, const sockaddr& b) {
+ if (a.sa_family != b.sa_family) {
+ return false;
+ }
+ if (a.sa_family == AF_INET) {
+ return reinterpret_cast<const sockaddr_in&>(a).sin_addr.s_addr ==
+ reinterpret_cast<const sockaddr_in&>(b).sin_addr.s_addr;
+ }
+ if (a.sa_family == AF_INET6) {
+ return std::memcmp(&(reinterpret_cast<const sockaddr_in6&>(a).sin6_addr),
+ &(reinterpret_cast<const sockaddr_in6&>(b).sin6_addr),
+ sizeof(in6_addr)) == 0;
+ }
+ return false;
+}
+
+static inline sockaddr_storage CopyAddress(const sockaddr& addr,
+ socklen_t len) {
+ sockaddr_storage storage;
+ std::memcpy(&storage, &addr, len);
+ return storage;
+}
+
void ParallelTcpConnector::Connect() {
if (IsConnected()) {
return;
@@ -85,8 +113,25 @@
// kick off parallel connection attempts
for (auto ai = &addrinfo; ai; ai = ai->ai_next) {
+ // check for duplicates
+ bool duplicate = false;
+ for (auto&& attempt : m_attempts) {
+ if (AddressEquals(*ai->ai_addr, reinterpret_cast<const sockaddr&>(
+ attempt.first))) {
+ duplicate = true;
+ break;
+ }
+ }
+ if (duplicate) {
+ continue;
+ }
+
auto tcp = uv::Tcp::Create(m_loop);
- m_attempts.emplace_back(tcp);
+ if (!tcp) {
+ continue;
+ }
+ m_attempts.emplace_back(CopyAddress(*ai->ai_addr, ai->ai_addrlen),
+ tcp);
auto connreq = std::make_shared<uv::TcpConnectReq>();
connreq->connected.connect(
@@ -164,8 +209,8 @@
}
m_resolvers.clear();
- for (auto&& tcpWeak : m_attempts) {
- if (auto tcp = tcpWeak.lock()) {
+ for (auto&& attempt : m_attempts) {
+ if (auto tcp = attempt.second.lock()) {
if (tcp.get() != except) {
WPI_DEBUG4(m_logger, "canceling connection attempt ({})",
static_cast<void*>(tcp.get()));
diff --git a/wpinet/src/main/native/cpp/PortForwarder.cpp b/wpinet/src/main/native/cpp/PortForwarder.cpp
index 257b620..67cd806 100644
--- a/wpinet/src/main/native/cpp/PortForwarder.cpp
+++ b/wpinet/src/main/native/cpp/PortForwarder.cpp
@@ -49,6 +49,9 @@
unsigned int remotePort) {
m_impl->runner.ExecSync([&](uv::Loop& loop) {
auto server = uv::Tcp::Create(loop);
+ if (!server) {
+ return;
+ }
// bind to local port
server->Bind("", port);
@@ -71,6 +74,10 @@
client->SetData(connected);
auto remote = uv::Tcp::Create(loop);
+ if (!remote) {
+ client->Close();
+ return;
+ }
remote->error.connect(
[remotePtr = remote.get(),
clientWeak = std::weak_ptr<uv::Tcp>(client)](uv::Error err) {
diff --git a/wpinet/src/main/native/cpp/WebSocket.cpp b/wpinet/src/main/native/cpp/WebSocket.cpp
index ba57925..43b901e 100644
--- a/wpinet/src/main/native/cpp/WebSocket.cpp
+++ b/wpinet/src/main/native/cpp/WebSocket.cpp
@@ -5,6 +5,9 @@
#include "wpinet/WebSocket.h"
#include <random>
+#include <span>
+#include <string>
+#include <string_view>
#include <fmt/format.h>
#include <wpi/Base64.h>
@@ -14,34 +17,91 @@
#include <wpi/raw_ostream.h>
#include <wpi/sha1.h>
+#include "WebSocketDebug.h"
+#include "WebSocketSerializer.h"
#include "wpinet/HttpParser.h"
#include "wpinet/raw_uv_ostream.h"
#include "wpinet/uv/Stream.h"
using namespace wpi;
-namespace {
-class WebSocketWriteReq : public uv::WriteReq {
+#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG
+static std::string DebugBinary(std::span<const uint8_t> val) {
+#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG_CONTENT
+ std::string str;
+ wpi::raw_string_ostream stros{str};
+ for (auto ch : val) {
+ stros << fmt::format("{:02x},", static_cast<unsigned int>(ch) & 0xff);
+ }
+ return str;
+#else
+ return "";
+#endif
+}
+
+static inline std::string_view DebugText(std::string_view val) {
+#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG_CONTENT
+ return val;
+#else
+ return "";
+#endif
+}
+#endif // WPINET_WEBSOCKET_VERBOSE_DEBUG
+
+class WebSocket::WriteReq : public uv::WriteReq,
+ public detail::WebSocketWriteReqBase {
public:
- explicit WebSocketWriteReq(
+ explicit WriteReq(
+ std::weak_ptr<WebSocket> ws,
std::function<void(std::span<uv::Buffer>, uv::Error)> callback)
- : m_callback{std::move(callback)} {
- finish.connect([this](uv::Error err) {
- for (auto&& buf : m_internalBufs) {
- buf.Deallocate();
- }
- m_callback(m_userBufs, err);
- });
+ : m_ws{std::move(ws)}, m_callback{std::move(callback)} {
+ finish.connect([this](uv::Error err) { Send(err); });
}
+ void Send(uv::Error err) {
+ auto ws = m_ws.lock();
+ if (!ws || err) {
+ WS_DEBUG("no WS or error, calling callback\n");
+ m_frames.ReleaseBufs();
+ m_callback(m_userBufs, err);
+ return;
+ }
+
+ // Continue() is designed so this is *only* called on frame boundaries
+ if (m_controlCont) {
+ // We have a control frame; switch to it. We will come back here via
+ // the control frame's m_cont when it's done.
+ WS_DEBUG("Continuing with a control write\n");
+ auto controlCont = std::move(m_controlCont);
+ m_controlCont.reset();
+ return controlCont->Send({});
+ }
+ int result = Continue(ws->m_stream, shared_from_this());
+ WS_DEBUG("Continue() -> {}\n", result);
+ if (result <= 0) {
+ m_frames.ReleaseBufs();
+ m_callback(m_userBufs, uv::Error{result});
+ if (result == 0 && m_cont) {
+ WS_DEBUG("Continuing with another write\n");
+ ws->m_curWriteReq = m_cont;
+ return m_cont->Send({});
+ } else {
+ ws->m_writeInProgress = false;
+ ws->m_curWriteReq.reset();
+ ws->m_lastWriteReq.reset();
+ }
+ }
+ }
+
+ std::weak_ptr<WebSocket> m_ws;
std::function<void(std::span<uv::Buffer>, uv::Error)> m_callback;
- SmallVector<uv::Buffer, 4> m_internalBufs;
- SmallVector<uv::Buffer, 4> m_userBufs;
+ std::shared_ptr<WriteReq> m_cont;
+ std::shared_ptr<WriteReq> m_controlCont;
};
-} // namespace
static constexpr uint8_t kFlagMasking = 0x80;
static constexpr uint8_t kLenMask = 0x7f;
+static constexpr size_t kWriteAllocSize = 4096;
class WebSocket::ClientHandshakeData {
public:
@@ -154,7 +214,7 @@
// Build client request
SmallVector<uv::Buffer, 4> bufs;
- raw_uv_ostream os{bufs, 4096};
+ raw_uv_ostream os{bufs, kWriteAllocSize};
os << "GET " << uri << " HTTP/1.1\r\n";
os << "Host: " << host << "\r\n";
@@ -258,11 +318,12 @@
// Start handshake timer if a timeout was specified
if (options.handshakeTimeout != (uv::Timer::Time::max)()) {
- auto timer = uv::Timer::Create(m_stream.GetLoopRef());
- timer->timeout.connect(
- [this]() { Terminate(1006, "connection timed out"); });
- timer->Start(options.handshakeTimeout);
- m_clientHandshake->timer = timer;
+ if (auto timer = uv::Timer::Create(m_stream.GetLoopRef())) {
+ timer->timeout.connect(
+ [this]() { Terminate(1006, "connection timed out"); });
+ timer->Start(options.handshakeTimeout);
+ m_clientHandshake->timer = timer;
+ }
}
}
@@ -272,7 +333,7 @@
// Build server response
SmallVector<uv::Buffer, 4> bufs;
- raw_uv_ostream os{bufs, 4096};
+ raw_uv_ostream os{bufs, kWriteAllocSize};
// Handle unsupported version
if (version != "13") {
@@ -320,13 +381,13 @@
void WebSocket::SendClose(uint16_t code, std::string_view reason) {
SmallVector<uv::Buffer, 4> bufs;
if (code != 1005) {
- raw_uv_ostream os{bufs, 4096};
+ raw_uv_ostream os{bufs, kWriteAllocSize};
const uint8_t codeMsb[] = {static_cast<uint8_t>((code >> 8) & 0xff),
static_cast<uint8_t>(code & 0xff)};
os << std::span{codeMsb};
os << reason;
}
- Send(kFlagFin | kOpClose, bufs, [](auto bufs, uv::Error) {
+ SendControl(kFlagFin | kOpClose, bufs, [](auto bufs, uv::Error) {
for (auto&& buf : bufs) {
buf.Deallocate();
}
@@ -345,6 +406,17 @@
m_stream.Shutdown([this] { m_stream.Close(); });
}
+static inline void Unmask(std::span<uint8_t> data,
+ std::span<const uint8_t, 4> key) {
+ int n = 0;
+ for (uint8_t& ch : data) {
+ ch ^= key[n++];
+ if (n >= 4) {
+ n = 0;
+ }
+ }
+}
+
void WebSocket::HandleIncoming(uv::Buffer& buf, size_t size) {
// ignore incoming data if we're failed or closed
if (m_state == FAILED || m_state == CLOSED) {
@@ -445,32 +517,37 @@
}
// limit maximum size
- if ((m_payload.size() + m_frameSize) > m_maxMessageSize) {
+ bool control = (m_header[0] & kFlagControl) != 0;
+ if (((control ? m_controlPayload.size() : m_payload.size()) +
+ m_frameSize) > m_maxMessageSize) {
return Fail(1009, "message too large");
}
}
}
if (m_frameSize != UINT64_MAX) {
- size_t need = m_frameStart + m_frameSize - m_payload.size();
+ bool control = (m_header[0] & kFlagControl) != 0;
+ size_t need;
+ if (control) {
+ need = m_frameSize - m_controlPayload.size();
+ } else {
+ need = m_frameStart + m_frameSize - m_payload.size();
+ }
size_t toCopy = (std::min)(need, data.size());
- m_payload.append(data.data(), data.data() + toCopy);
+ if (control) {
+ m_controlPayload.append(data.data(), data.data() + toCopy);
+ } else {
+ m_payload.append(data.data(), data.data() + toCopy);
+ }
data.remove_prefix(toCopy);
need -= toCopy;
if (need == 0) {
// We have a complete frame
// If the message had masking, unmask it
if ((m_header[1] & kFlagMasking) != 0) {
- uint8_t key[4] = {
- m_header[m_headerSize - 4], m_header[m_headerSize - 3],
- m_header[m_headerSize - 2], m_header[m_headerSize - 1]};
- int n = 0;
- for (uint8_t& ch : std::span{m_payload}.subspan(m_frameStart)) {
- ch ^= key[n++];
- if (n >= 4) {
- n = 0;
- }
- }
+ Unmask(control ? std::span{m_controlPayload}
+ : std::span{m_payload}.subspan(m_frameStart),
+ std::span<const uint8_t, 4>{&m_header[m_headerSize - 4], 4});
}
// Handle message
@@ -478,17 +555,23 @@
uint8_t opcode = m_header[0] & kOpMask;
switch (opcode) {
case kOpCont:
+ WS_DEBUG("WS Fragment {} [{}]\n", m_payload.size(),
+ DebugBinary(m_payload));
switch (m_fragmentOpcode) {
case kOpText:
if (!m_combineFragments || fin) {
- text(std::string_view{reinterpret_cast<char*>(
- m_payload.data()),
- m_payload.size()},
- fin);
+ std::string_view content{
+ reinterpret_cast<char*>(m_payload.data()),
+ m_payload.size()};
+ WS_DEBUG("WS RecvText(Defrag) {} ({})\n", m_payload.size(),
+ DebugText(content));
+ text(content, fin);
}
break;
case kOpBinary:
if (!m_combineFragments || fin) {
+ WS_DEBUG("WS RecvBinary(Defrag) {} ({})\n", m_payload.size(),
+ DebugBinary(m_payload));
binary(m_payload, fin);
}
break;
@@ -500,42 +583,38 @@
m_fragmentOpcode = 0;
}
break;
- case kOpText:
+ case kOpText: {
+ std::string_view content{reinterpret_cast<char*>(m_payload.data()),
+ m_payload.size()};
if (m_fragmentOpcode != 0) {
+ WS_DEBUG("WS RecvText {} ({}) -> INCOMPLETE FRAGMENT\n",
+ m_payload.size(), DebugText(content));
return Fail(1002, "incomplete fragment");
}
if (!m_combineFragments || fin) {
-#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG
- fmt::print(
- "WS RecvText({})\n",
- std::string_view{reinterpret_cast<char*>(m_payload.data()),
- m_payload.size()});
-#endif
- text(std::string_view{reinterpret_cast<char*>(m_payload.data()),
- m_payload.size()},
- fin);
+ WS_DEBUG("WS RecvText {} ({})\n", m_payload.size(),
+ DebugText(content));
+ text(content, fin);
}
if (!fin) {
+ WS_DEBUG("WS RecvText {} StartFrag\n", m_payload.size());
m_fragmentOpcode = opcode;
}
break;
+ }
case kOpBinary:
if (m_fragmentOpcode != 0) {
+ WS_DEBUG("WS RecvBinary {} ({}) -> INCOMPLETE FRAGMENT\n",
+ m_payload.size(), DebugBinary(m_payload));
return Fail(1002, "incomplete fragment");
}
if (!m_combineFragments || fin) {
-#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG
- SmallString<128> str;
- raw_svector_ostream stros{str};
- for (auto ch : m_payload) {
- stros << fmt::format("{:02x},",
- static_cast<unsigned int>(ch) & 0xff);
- }
- fmt::print("WS RecvBinary({})\n", str.str());
-#endif
+ WS_DEBUG("WS RecvBinary {} ({})\n", m_payload.size(),
+ DebugBinary(m_payload));
binary(m_payload, fin);
}
if (!fin) {
+ WS_DEBUG("WS RecvBinary {} StartFrag\n", m_payload.size());
m_fragmentOpcode = opcode;
}
break;
@@ -545,14 +624,15 @@
if (!fin) {
code = 1002;
reason = "cannot fragment control frames";
- } else if (m_payload.size() < 2) {
+ } else if (m_controlPayload.size() < 2) {
code = 1005;
} else {
- code = (static_cast<uint16_t>(m_payload[0]) << 8) |
- static_cast<uint16_t>(m_payload[1]);
- reason = drop_front(
- {reinterpret_cast<char*>(m_payload.data()), m_payload.size()},
- 2);
+ code = (static_cast<uint16_t>(m_controlPayload[0]) << 8) |
+ static_cast<uint16_t>(m_controlPayload[1]);
+ reason =
+ drop_front({reinterpret_cast<char*>(m_controlPayload.data()),
+ m_controlPayload.size()},
+ 2);
}
// Echo the close if we didn't previously send it
if (m_state != CLOSING) {
@@ -569,13 +649,30 @@
if (!fin) {
return Fail(1002, "cannot fragment control frames");
}
- ping(m_payload);
+ // If the connection is open, send a Pong in response
+ if (m_state == OPEN) {
+ SmallVector<uv::Buffer, 4> bufs;
+ {
+ raw_uv_ostream os{bufs, kWriteAllocSize};
+ os << m_controlPayload;
+ }
+ SendPong(bufs, [](auto bufs, uv::Error) {
+ for (auto&& buf : bufs) {
+ buf.Deallocate();
+ }
+ });
+ }
+ WS_DEBUG("WS RecvPing() {} ({})\n", m_controlPayload.size(),
+ DebugBinary(m_controlPayload));
+ ping(m_controlPayload);
break;
case kOpPong:
if (!fin) {
return Fail(1002, "cannot fragment control frames");
}
- pong(m_payload);
+ WS_DEBUG("WS RecvPong() {} ({})\n", m_controlPayload.size(),
+ DebugBinary(m_controlPayload));
+ pong(m_controlPayload);
break;
default:
return Fail(1002, "invalid message opcode");
@@ -585,7 +682,11 @@
m_header.clear();
m_headerSize = 0;
if (!m_combineFragments || fin) {
- m_payload.clear();
+ if (control) {
+ m_controlPayload.clear();
+ } else {
+ m_payload.clear();
+ }
}
m_frameStart = m_payload.size();
m_frameSize = UINT64_MAX;
@@ -594,114 +695,157 @@
}
}
-static void WriteFrame(WebSocketWriteReq& req,
- SmallVectorImpl<uv::Buffer>& bufs, bool server,
- uint8_t opcode, std::span<const uv::Buffer> data) {
- SmallVector<uv::Buffer, 4> internalBufs;
- raw_uv_ostream os{internalBufs, 4096};
-
+static void VerboseDebug(const WebSocket::Frame& frame) {
#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG
- if ((opcode & 0x7f) == 0x01) {
+ if ((frame.opcode & 0x7f) == 0x01) {
SmallString<128> str;
- for (auto&& d : data) {
+#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG_CONTENT
+ for (auto&& d : frame.data) {
str.append(std::string_view(d.base, d.len));
}
+#endif
fmt::print("WS SendText({})\n", str.str());
- } else if ((opcode & 0x7f) == 0x02) {
+ } else if ((frame.opcode & 0x7f) == 0x02) {
SmallString<128> str;
+#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG_CONTENT
raw_svector_ostream stros{str};
- for (auto&& d : data) {
+ for (auto&& d : frame.data) {
for (auto ch : d.data()) {
stros << fmt::format("{:02x},", static_cast<unsigned int>(ch) & 0xff);
}
}
- fmt::print("WS SendBinary({})\n", str.str());
- }
#endif
-
- // opcode (includes FIN bit)
- os << static_cast<unsigned char>(opcode);
-
- // payload length
- uint64_t size = 0;
- for (auto&& buf : data) {
- size += buf.len;
- }
- if (size < 126) {
- os << static_cast<unsigned char>((server ? 0x00 : kFlagMasking) | size);
- } else if (size <= 0xffff) {
- os << static_cast<unsigned char>((server ? 0x00 : kFlagMasking) | 126);
- const uint8_t sizeMsb[] = {static_cast<uint8_t>((size >> 8) & 0xff),
- static_cast<uint8_t>(size & 0xff)};
- os << std::span{sizeMsb};
+ fmt::print("WS SendBinary({})\n", str.str());
} else {
- os << static_cast<unsigned char>((server ? 0x00 : kFlagMasking) | 127);
- const uint8_t sizeMsb[] = {static_cast<uint8_t>((size >> 56) & 0xff),
- static_cast<uint8_t>((size >> 48) & 0xff),
- static_cast<uint8_t>((size >> 40) & 0xff),
- static_cast<uint8_t>((size >> 32) & 0xff),
- static_cast<uint8_t>((size >> 24) & 0xff),
- static_cast<uint8_t>((size >> 16) & 0xff),
- static_cast<uint8_t>((size >> 8) & 0xff),
- static_cast<uint8_t>(size & 0xff)};
- os << std::span{sizeMsb};
- }
-
- // clients need to mask the input data
- if (!server) {
- // generate masking key
- static std::random_device rd;
- static std::default_random_engine gen{rd()};
- std::uniform_int_distribution<unsigned int> dist(0, 255);
- uint8_t key[4];
- for (uint8_t& v : key) {
- v = dist(gen);
- }
- os << std::span<const uint8_t>{key, 4};
- // copy and mask data
- int n = 0;
- for (auto&& buf : data) {
- for (auto&& ch : buf.data()) {
- os << static_cast<unsigned char>(static_cast<uint8_t>(ch) ^ key[n++]);
- if (n >= 4) {
- n = 0;
- }
+ SmallString<128> str;
+#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG_CONTENT
+ raw_svector_ostream stros{str};
+ for (auto&& d : frame.data) {
+ for (auto ch : d.data()) {
+ stros << fmt::format("{:02x},", static_cast<unsigned int>(ch) & 0xff);
}
}
- bufs.append(internalBufs.begin(), internalBufs.end());
- // don't send the user bufs as we copied their data
- } else {
- bufs.append(internalBufs.begin(), internalBufs.end());
- // servers can just send the buffers directly without masking
- bufs.append(data.begin(), data.end());
+#endif
+ fmt::print("WS SendOp({}, {})\n", frame.opcode, str.str());
}
- req.m_internalBufs.append(internalBufs.begin(), internalBufs.end());
- req.m_userBufs.append(data.begin(), data.end());
+#endif
}
void WebSocket::SendFrames(
std::span<const Frame> frames,
std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {
// If we're not open, emit an error and don't send the data
+ WS_DEBUG("SendFrames({})\n", frames.size());
if (m_state != OPEN) {
- int err;
- if (m_state == CONNECTING) {
- err = UV_EAGAIN;
- } else {
- err = UV_ESHUTDOWN;
- }
- SmallVector<uv::Buffer, 4> bufs;
- for (auto&& frame : frames) {
- bufs.append(frame.data.begin(), frame.data.end());
- }
- callback(bufs, uv::Error{err});
+ SendError(frames, callback);
return;
}
- auto req = std::make_shared<WebSocketWriteReq>(std::move(callback));
+ // Build request
+ auto req = std::make_shared<WriteReq>(weak_from_this(), std::move(callback));
+ int numBytes = 0;
+ for (auto&& frame : frames) {
+ VerboseDebug(frame);
+ numBytes += req->m_frames.AddFrame(frame, m_server);
+ req->m_continueFrameOffs.emplace_back(numBytes);
+ req->m_userBufs.append(frame.data.begin(), frame.data.end());
+ }
+
+ if (m_writeInProgress) {
+ if (auto lastReq = m_lastWriteReq.lock()) {
+ // if write currently in progress, process as a continuation of that
+ m_lastWriteReq = req;
+ // make sure we're really at the end
+ while (lastReq->m_cont) {
+ lastReq = lastReq->m_cont;
+ }
+ lastReq->m_cont = std::move(req);
+ return;
+ }
+ }
+
+ m_writeInProgress = true;
+ m_curWriteReq = req;
+ m_lastWriteReq = req;
+ req->Send({});
+}
+
+std::span<const WebSocket::Frame> WebSocket::TrySendFrames(
+ std::span<const Frame> frames,
+ std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {
+ // If we're not open, emit an error and don't send the data
+ if (m_state != WebSocket::OPEN) {
+ SendError(frames, callback);
+ return {};
+ }
+
+ // If something else is still in flight, don't send anything
+ if (m_writeInProgress) {
+ return frames;
+ }
+
+ return detail::TrySendFrames(
+ m_server, m_stream, frames,
+ [this](std::function<void(std::span<uv::Buffer>, uv::Error)>&& cb) {
+ auto req = std::make_shared<WriteReq>(weak_from_this(), std::move(cb));
+ m_writeInProgress = true;
+ m_curWriteReq = req;
+ m_lastWriteReq = req;
+ return req;
+ },
+ std::move(callback));
+}
+
+void WebSocket::SendControl(
+ uint8_t opcode, std::span<const uv::Buffer> data,
+ std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {
+ Frame frame{opcode, data};
+ // If we're not open, emit an error and don't send the data
+ if (m_state != WebSocket::OPEN) {
+ SendError({{frame}}, callback);
+ return;
+ }
+
+ // If nothing else is in flight, just use SendFrames()
+ std::shared_ptr<WriteReq> curReq = m_curWriteReq.lock();
+ if (!m_writeInProgress || !curReq) {
+ return SendFrames({{frame}}, std::move(callback));
+ }
+
+ // There's a write request in flight, but since this is a control frame, we
+ // want to send it as soon as we can, without waiting for all frames in that
+ // request (or any continuations) to be sent.
+ auto req = std::make_shared<WriteReq>(weak_from_this(), std::move(callback));
+ VerboseDebug(frame);
+ size_t numBytes = req->m_frames.AddFrame(frame, m_server);
+ req->m_userBufs.append(frame.data.begin(), frame.data.end());
+ req->m_continueFrameOffs.emplace_back(numBytes);
+ req->m_cont = curReq;
+ // There may be multiple control packets in flight; maintain in-order
+ // transmission. Linear search here is O(n^2), but should be pretty rare.
+ if (!curReq->m_controlCont) {
+ curReq->m_controlCont = std::move(req);
+ } else {
+ curReq = curReq->m_controlCont;
+ while (curReq->m_cont != req->m_cont) {
+ curReq = curReq->m_cont;
+ }
+ curReq->m_cont = std::move(req);
+ }
+}
+
+void WebSocket::SendError(
+ std::span<const Frame> frames,
+ const std::function<void(std::span<uv::Buffer>, uv::Error)>& callback) {
+ int err;
+ if (m_state == WebSocket::CONNECTING) {
+ err = UV_EAGAIN;
+ } else {
+ err = UV_ESHUTDOWN;
+ }
SmallVector<uv::Buffer, 4> bufs;
for (auto&& frame : frames) {
- WriteFrame(*req, bufs, m_server, frame.opcode, frame.data);
+ bufs.append(frame.data.begin(), frame.data.end());
}
- m_stream.Write(bufs, req);
+ callback(bufs, uv::Error{err});
}
diff --git a/wpinet/src/main/native/cpp/WebSocketDebug.h b/wpinet/src/main/native/cpp/WebSocketDebug.h
new file mode 100644
index 0000000..5653b5f
--- /dev/null
+++ b/wpinet/src/main/native/cpp/WebSocketDebug.h
@@ -0,0 +1,21 @@
+// Copyright (c) FIRST and other WPILib contributors.
+// Open Source Software; you can modify and/or share it under the terms of
+// the WPILib BSD license file in the root directory of this project.
+
+#pragma once
+
+#include <fmt/format.h>
+
+// #define WPINET_WEBSOCKET_VERBOSE_DEBUG
+// #define WPINET_WEBSOCKET_VERBOSE_DEBUG_CONTENT
+
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
+#endif
+
+#ifdef WPINET_WEBSOCKET_VERBOSE_DEBUG
+#define WS_DEBUG(format, ...) \
+ ::fmt::print(FMT_STRING(format) __VA_OPT__(, ) __VA_ARGS__)
+#else
+#define WS_DEBUG(fmt, ...)
+#endif
diff --git a/wpinet/src/main/native/cpp/WebSocketSerializer.cpp b/wpinet/src/main/native/cpp/WebSocketSerializer.cpp
new file mode 100644
index 0000000..c5d9548
--- /dev/null
+++ b/wpinet/src/main/native/cpp/WebSocketSerializer.cpp
@@ -0,0 +1,108 @@
+// Copyright (c) FIRST and other WPILib contributors.
+// Open Source Software; you can modify and/or share it under the terms of
+// the WPILib BSD license file in the root directory of this project.
+
+#include "WebSocketSerializer.h"
+
+#include <random>
+
+using namespace wpi::detail;
+
+static constexpr uint8_t kFlagMasking = 0x80;
+static constexpr size_t kWriteAllocSize = 4096;
+
+static std::span<uint8_t> BuildHeader(std::span<uint8_t, 10> header,
+ bool server,
+ const wpi::WebSocket::Frame& frame) {
+ uint8_t* pHeader = header.data();
+
+ // opcode (includes FIN bit)
+ *pHeader++ = frame.opcode;
+
+ // payload length
+ uint64_t size = 0;
+ for (auto&& buf : frame.data) {
+ size += buf.len;
+ }
+ if (size < 126) {
+ *pHeader++ = (server ? 0x00 : kFlagMasking) | size;
+ } else if (size <= 0xffff) {
+ *pHeader++ = (server ? 0x00 : kFlagMasking) | 126;
+ *pHeader++ = (size >> 8) & 0xff;
+ *pHeader++ = size & 0xff;
+ } else {
+ *pHeader++ = (server ? 0x00 : kFlagMasking) | 127;
+ *pHeader++ = (size >> 56) & 0xff;
+ *pHeader++ = (size >> 48) & 0xff;
+ *pHeader++ = (size >> 40) & 0xff;
+ *pHeader++ = (size >> 32) & 0xff;
+ *pHeader++ = (size >> 24) & 0xff;
+ *pHeader++ = (size >> 16) & 0xff;
+ *pHeader++ = (size >> 8) & 0xff;
+ *pHeader++ = size & 0xff;
+ }
+ return header.subspan(0, pHeader - header.data());
+}
+
+size_t SerializedFrames::AddClientFrame(const WebSocket::Frame& frame) {
+ uint8_t headerBuf[10];
+ auto header = BuildHeader(headerBuf, false, frame);
+
+ // allocate a buffer per frame
+ size_t size = header.size() + 4;
+ for (auto&& buf : frame.data) {
+ size += buf.len;
+ }
+ m_allocBufs.emplace_back(uv::Buffer::Allocate(size));
+ m_bufs.emplace_back(m_allocBufs.back());
+
+ char* internalBuf = m_allocBufs.back().data().data();
+ std::memcpy(internalBuf, header.data(), header.size());
+ internalBuf += header.size();
+
+ // generate masking key
+ static std::random_device rd;
+ static std::default_random_engine gen{rd()};
+ std::uniform_int_distribution<unsigned int> dist(0, 255);
+ uint8_t key[4];
+ for (uint8_t& v : key) {
+ v = dist(gen);
+ }
+ std::memcpy(internalBuf, key, 4);
+ internalBuf += 4;
+
+ // copy and mask data
+ int n = 0;
+ for (auto&& buf : frame.data) {
+ for (auto&& ch : buf.data()) {
+ *internalBuf++ = static_cast<uint8_t>(ch) ^ key[n++];
+ if (n >= 4) {
+ n = 0;
+ }
+ }
+ }
+ return size;
+}
+
+size_t SerializedFrames::AddServerFrame(const WebSocket::Frame& frame) {
+ uint8_t headerBuf[10];
+ auto header = BuildHeader(headerBuf, true, frame);
+
+ // manage allocBufs to efficiently store header
+ if (m_allocBufs.empty() ||
+ (m_allocBufPos + header.size()) > kWriteAllocSize) {
+ m_allocBufs.emplace_back(uv::Buffer::Allocate(kWriteAllocSize));
+ m_allocBufPos = 0;
+ }
+ char* internalBuf = m_allocBufs.back().data().data() + m_allocBufPos;
+ std::memcpy(internalBuf, header.data(), header.size());
+ m_bufs.emplace_back(internalBuf, header.size());
+ m_allocBufPos += header.size();
+ // servers can just send the buffers directly without masking
+ m_bufs.append(frame.data.begin(), frame.data.end());
+ size_t sent = header.size();
+ for (auto&& buf : frame.data) {
+ sent += buf.len;
+ }
+ return sent;
+}
diff --git a/wpinet/src/main/native/cpp/WebSocketSerializer.h b/wpinet/src/main/native/cpp/WebSocketSerializer.h
new file mode 100644
index 0000000..264b8f5
--- /dev/null
+++ b/wpinet/src/main/native/cpp/WebSocketSerializer.h
@@ -0,0 +1,304 @@
+// Copyright (c) FIRST and other WPILib contributors.
+// Open Source Software; you can modify and/or share it under the terms of
+// the WPILib BSD license file in the root directory of this project.
+
+#pragma once
+
+#include <functional>
+#include <memory>
+#include <utility>
+
+#include <wpi/SmallVector.h>
+#include <wpi/SpanExtras.h>
+
+#include "WebSocketDebug.h"
+#include "wpinet/WebSocket.h"
+#include "wpinet/uv/Buffer.h"
+
+namespace wpi::detail {
+
+class SerializedFrames {
+ public:
+ SerializedFrames() = default;
+ SerializedFrames(const SerializedFrames&) = delete;
+ SerializedFrames& operator=(const SerializedFrames&) = delete;
+ ~SerializedFrames() { ReleaseBufs(); }
+
+ size_t AddFrame(const WebSocket::Frame& frame, bool server) {
+ if (server) {
+ return AddServerFrame(frame);
+ } else {
+ return AddClientFrame(frame);
+ }
+ }
+
+ size_t AddClientFrame(const WebSocket::Frame& frame);
+ size_t AddServerFrame(const WebSocket::Frame& frame);
+
+ void ReleaseBufs() {
+ for (auto&& buf : m_allocBufs) {
+ buf.Deallocate();
+ }
+ m_allocBufs.clear();
+ }
+
+ SmallVector<uv::Buffer, 4> m_allocBufs;
+ SmallVector<uv::Buffer, 4> m_bufs;
+ size_t m_allocBufPos = 0;
+};
+
+class WebSocketWriteReqBase {
+ public:
+ template <typename Stream, typename Req>
+ int Continue(Stream& stream, std::shared_ptr<Req> req);
+
+ SmallVector<uv::Buffer, 4> m_userBufs;
+ SerializedFrames m_frames;
+ SmallVector<int, 0> m_continueFrameOffs;
+ size_t m_continueBufPos = 0;
+ size_t m_continueFramePos = 0;
+};
+
+template <typename Stream, typename Req>
+int WebSocketWriteReqBase::Continue(Stream& stream, std::shared_ptr<Req> req) {
+ if (m_continueBufPos >= m_frames.m_bufs.size()) {
+ return 0; // nothing more to send
+ }
+
+ // try writing everything remaining
+ std::span bufs = std::span{m_frames.m_bufs}.subspan(m_continueBufPos);
+ int numBytes = 0;
+ for (auto&& buf : bufs) {
+ numBytes += buf.len;
+ }
+
+ int sentBytes = stream.TryWrite(bufs);
+ WS_DEBUG("TryWrite({}) -> {} (expected {})\n", bufs.size(), sentBytes,
+ numBytes);
+ if (sentBytes < 0) {
+ return sentBytes; // error
+ }
+
+ if (sentBytes == numBytes) {
+ m_continueBufPos = m_frames.m_bufs.size();
+ return 0; // nothing more to send
+ }
+
+ // we didn't send everything; deal with the leftovers
+
+ // figure out what the last (partially) frame sent actually was
+ auto offIt = m_continueFrameOffs.begin() + m_continueFramePos;
+ auto offEnd = m_continueFrameOffs.end();
+ while (offIt != offEnd && *offIt < sentBytes) {
+ ++offIt;
+ }
+ assert(offIt != offEnd);
+
+ // build a list of buffers to send as a normal write:
+ SmallVector<uv::Buffer, 4> writeBufs;
+ auto bufIt = bufs.begin();
+ auto bufEnd = bufs.end();
+
+ // start with the remaining portion of the last buffer actually sent
+ int pos = 0;
+ while (bufIt != bufEnd && pos < sentBytes) {
+ pos += (bufIt++)->len;
+ }
+ if (bufIt != bufs.begin() && pos != sentBytes) {
+ writeBufs.emplace_back(
+ wpi::take_back((bufIt - 1)->bytes(), pos - sentBytes));
+ }
+
+ // continue through the last buffer of the last partial frame
+ while (bufIt != bufEnd && offIt != offEnd && pos < *offIt) {
+ pos += bufIt->len;
+ writeBufs.emplace_back(*bufIt++);
+ }
+ if (offIt != offEnd) {
+ ++offIt;
+ }
+
+ // if writeBufs is still empty, write all of the next frame
+ if (writeBufs.empty()) {
+ while (bufIt != bufEnd && offIt != offEnd && pos < *offIt) {
+ pos += bufIt->len;
+ writeBufs.emplace_back(*bufIt++);
+ }
+ if (offIt != offEnd) {
+ ++offIt;
+ }
+ }
+
+ m_continueFramePos = offIt - m_continueFrameOffs.begin();
+ m_continueBufPos += bufIt - bufs.begin();
+
+ if (writeBufs.empty()) {
+ WS_DEBUG("Write Done\n");
+ return 0;
+ }
+ WS_DEBUG("Write({})\n", writeBufs.size());
+ stream.Write(writeBufs, req);
+ return 1;
+}
+
+template <typename MakeReq, typename Stream>
+std::span<const WebSocket::Frame> TrySendFrames(
+ bool server, Stream& stream, std::span<const WebSocket::Frame> frames,
+ MakeReq&& makeReq,
+ std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {
+ WS_DEBUG("TrySendFrames({})\n", frames.size());
+ auto frameIt = frames.begin();
+ auto frameEnd = frames.end();
+ while (frameIt != frameEnd) {
+ auto frameStart = frameIt;
+
+ // build buffers to send
+ SerializedFrames sendFrames;
+ SmallVector<int, 32> frameOffs;
+ int numBytes = 0;
+ while (frameIt != frameEnd) {
+ frameOffs.emplace_back(numBytes);
+ numBytes += sendFrames.AddFrame(*frameIt++, server);
+ if ((server && (numBytes >= 65536 || frameOffs.size() > 32)) ||
+ (!server && numBytes >= 8192)) {
+ // don't waste too much memory or effort on header generation or masking
+ break;
+ }
+ }
+
+ // try to send
+ int sentBytes = stream.TryWrite(sendFrames.m_bufs);
+ WS_DEBUG("TryWrite({}) -> {} (expected {})\n", sendFrames.m_bufs.size(),
+ sentBytes, numBytes);
+
+ if (sentBytes == 0) {
+ // we haven't started a frame yet; clean up any bufs that have actually
+ // sent, and return unsent frames
+ SmallVector<uv::Buffer, 4> bufs;
+ for (auto it = frames.begin(); it != frameStart; ++it) {
+ bufs.append(it->data.begin(), it->data.end());
+ }
+ callback(bufs, {});
+#ifdef __clang__
+ // work around clang bug
+ return {frames.data() + (frameStart - frames.begin()),
+ frames.data() + (frameEnd - frames.begin())};
+#else
+ return {frameStart, frameEnd};
+#endif
+ } else if (sentBytes < 0) {
+ // error
+ SmallVector<uv::Buffer, 4> bufs;
+ for (auto&& frame : frames) {
+ bufs.append(frame.data.begin(), frame.data.end());
+ }
+ callback(bufs, uv::Error{sentBytes});
+ return frames;
+ } else if (sentBytes != numBytes) {
+ // we didn't send everything; deal with the leftovers
+
+ // figure out what the last (partially) frame sent actually was
+ auto offIt = frameOffs.begin();
+ auto offEnd = frameOffs.end();
+ bool isFin = true;
+ while (offIt != offEnd && *offIt < sentBytes) {
+ ++offIt;
+ isFin = (frameStart->opcode & WebSocket::kFlagFin) != 0;
+ ++frameStart;
+ }
+
+ if (offIt != offEnd && *offIt == sentBytes && isFin) {
+ // we finished at a normal FIN frame boundary; no need for a Write()
+ SmallVector<uv::Buffer, 4> bufs;
+ for (auto it = frames.begin(); it != frameStart; ++it) {
+ bufs.append(it->data.begin(), it->data.end());
+ }
+ callback(bufs, {});
+#ifdef __clang__
+ // work around clang bug
+ return {frames.data() + (frameStart - frames.begin()),
+ frames.data() + (frameEnd - frames.begin())};
+#else
+ return {frameStart, frameEnd};
+#endif
+ }
+
+ // build a list of buffers to send as a normal write:
+ SmallVector<uv::Buffer, 4> writeBufs;
+ auto bufIt = sendFrames.m_bufs.begin();
+ auto bufEnd = sendFrames.m_bufs.end();
+
+ // start with the remaining portion of the last buffer actually sent
+ int pos = 0;
+ while (bufIt != bufEnd && pos < sentBytes) {
+ pos += (bufIt++)->len;
+ }
+ if (bufIt != sendFrames.m_bufs.begin() && pos != sentBytes) {
+ writeBufs.emplace_back(
+ wpi::take_back((bufIt - 1)->bytes(), pos - sentBytes));
+ }
+
+ // continue through the last buffer of the last partial frame
+ while (bufIt != bufEnd && offIt != offEnd && pos < *offIt) {
+ pos += bufIt->len;
+ writeBufs.emplace_back(*bufIt++);
+ }
+ if (offIt != offEnd) {
+ ++offIt;
+ }
+
+ // move allocated buffers into request
+ auto req = makeReq(std::move(callback));
+ req->m_frames.m_allocBufs = std::move(sendFrames.m_allocBufs);
+ req->m_frames.m_allocBufPos = sendFrames.m_allocBufPos;
+
+ // if partial frame was non-FIN, put any additional non-FIN frames into
+ // continuation (so the caller isn't responsible for doing this)
+ size_t continuePos = 0;
+ while (frameStart != frameEnd && !isFin) {
+ if (offIt != offEnd) {
+ // we already generated the wire buffers for this frame, use them
+ while (pos < *offIt && bufIt != bufEnd) {
+ pos += bufIt->len;
+ continuePos += bufIt->len;
+ req->m_frames.m_bufs.emplace_back(*bufIt++);
+ }
+ ++offIt;
+ } else {
+ // WS_DEBUG("generating frame for continuation {} {}\n",
+ // frameStart->opcode, frameStart->data.size());
+ // need to generate and add this frame
+ continuePos += req->m_frames.AddFrame(*frameStart, server);
+ }
+ req->m_continueFrameOffs.emplace_back(continuePos);
+ isFin = (frameStart->opcode & WebSocket::kFlagFin) != 0;
+ ++frameStart;
+ }
+
+ // only the non-returned user buffers are added to the request
+ for (auto it = frames.begin(); it != frameStart; ++it) {
+ req->m_userBufs.append(it->data.begin(), it->data.end());
+ }
+
+ WS_DEBUG("Write({})\n", writeBufs.size());
+ stream.Write(writeBufs, req);
+#ifdef __clang__
+ // work around clang bug
+ return {frames.data() + (frameStart - frames.begin()),
+ frames.data() + (frameEnd - frames.begin())};
+#else
+ return {frameStart, frameEnd};
+#endif
+ }
+ }
+
+ // nothing left to send
+ SmallVector<uv::Buffer, 4> bufs;
+ for (auto&& frame : frames) {
+ bufs.append(frame.data.begin(), frame.data.end());
+ }
+ callback(bufs, {});
+ return {};
+}
+
+} // namespace wpi::detail
diff --git a/wpinet/src/main/native/cpp/http_parser.cpp b/wpinet/src/main/native/cpp/http_parser.cpp
index 2bec4a7..66fff6a 100644
--- a/wpinet/src/main/native/cpp/http_parser.cpp
+++ b/wpinet/src/main/native/cpp/http_parser.cpp
@@ -1859,7 +1859,7 @@
&& parser->content_length != ULLONG_MAX);
/* The difference between advancing content_length and p is because
- * the latter will automaticaly advance on the next loop iteration.
+ * the latter will automatically advance on the next loop iteration.
* Further, if content_length ends up at 0, we want to see the last
* byte again for our message complete callback.
*/
@@ -2347,7 +2347,7 @@
case s_dead:
return 1;
- /* Skip delimeters */
+ /* Skip delimiters */
case s_req_schema_slash:
case s_req_schema_slash_slash:
case s_req_server_start:
diff --git a/wpinet/src/main/native/cpp/uv/Async.cpp b/wpinet/src/main/native/cpp/uv/Async.cpp
index f84bb9b..58ef5f3 100644
--- a/wpinet/src/main/native/cpp/uv/Async.cpp
+++ b/wpinet/src/main/native/cpp/uv/Async.cpp
@@ -17,6 +17,9 @@
}
std::shared_ptr<Async<>> Async<>::Create(const std::shared_ptr<Loop>& loop) {
+ if (loop->IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Async>(loop, private_init{});
int err = uv_async_init(loop->GetRaw(), h->GetRaw(), [](uv_async_t* handle) {
Async& h = *static_cast<Async*>(handle->data);
diff --git a/wpinet/src/main/native/cpp/uv/Check.cpp b/wpinet/src/main/native/cpp/uv/Check.cpp
index 13c2229..75ff47c 100644
--- a/wpinet/src/main/native/cpp/uv/Check.cpp
+++ b/wpinet/src/main/native/cpp/uv/Check.cpp
@@ -9,6 +9,9 @@
namespace wpi::uv {
std::shared_ptr<Check> Check::Create(Loop& loop) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Check>(private_init{});
int err = uv_check_init(loop.GetRaw(), h->GetRaw());
if (err < 0) {
@@ -20,6 +23,9 @@
}
void Check::Start() {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(&uv_check_start, GetRaw(), [](uv_check_t* handle) {
Check& h = *static_cast<Check*>(handle->data);
h.check();
diff --git a/wpinet/src/main/native/cpp/uv/FsEvent.cpp b/wpinet/src/main/native/cpp/uv/FsEvent.cpp
index 044390e..d77bf37 100644
--- a/wpinet/src/main/native/cpp/uv/FsEvent.cpp
+++ b/wpinet/src/main/native/cpp/uv/FsEvent.cpp
@@ -13,6 +13,9 @@
namespace wpi::uv {
std::shared_ptr<FsEvent> FsEvent::Create(Loop& loop) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<FsEvent>(private_init{});
int err = uv_fs_event_init(loop.GetRaw(), h->GetRaw());
if (err < 0) {
diff --git a/wpinet/src/main/native/cpp/uv/GetAddrInfo.cpp b/wpinet/src/main/native/cpp/uv/GetAddrInfo.cpp
index 14721f2..c3ec000 100644
--- a/wpinet/src/main/native/cpp/uv/GetAddrInfo.cpp
+++ b/wpinet/src/main/native/cpp/uv/GetAddrInfo.cpp
@@ -18,6 +18,9 @@
void GetAddrInfo(Loop& loop, const std::shared_ptr<GetAddrInfoReq>& req,
std::string_view node, std::string_view service,
const addrinfo* hints) {
+ if (loop.IsClosing()) {
+ return;
+ }
SmallString<128> nodeStr{node};
SmallString<128> serviceStr{service};
int err = uv_getaddrinfo(
diff --git a/wpinet/src/main/native/cpp/uv/GetNameInfo.cpp b/wpinet/src/main/native/cpp/uv/GetNameInfo.cpp
index a6ad36d..9720cc3 100644
--- a/wpinet/src/main/native/cpp/uv/GetNameInfo.cpp
+++ b/wpinet/src/main/native/cpp/uv/GetNameInfo.cpp
@@ -15,6 +15,9 @@
void GetNameInfo(Loop& loop, const std::shared_ptr<GetNameInfoReq>& req,
const sockaddr& addr, int flags) {
+ if (loop.IsClosing()) {
+ return;
+ }
int err = uv_getnameinfo(
loop.GetRaw(), req->GetRaw(),
[](uv_getnameinfo_t* req, int status, const char* hostname,
diff --git a/wpinet/src/main/native/cpp/uv/Idle.cpp b/wpinet/src/main/native/cpp/uv/Idle.cpp
index 452bc7e..7b94b3f 100644
--- a/wpinet/src/main/native/cpp/uv/Idle.cpp
+++ b/wpinet/src/main/native/cpp/uv/Idle.cpp
@@ -9,6 +9,9 @@
namespace wpi::uv {
std::shared_ptr<Idle> Idle::Create(Loop& loop) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Idle>(private_init{});
int err = uv_idle_init(loop.GetRaw(), h->GetRaw());
if (err < 0) {
@@ -20,6 +23,9 @@
}
void Idle::Start() {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(&uv_idle_start, GetRaw(), [](uv_idle_t* handle) {
Idle& h = *static_cast<Idle*>(handle->data);
h.idle();
diff --git a/wpinet/src/main/native/cpp/uv/NetworkStream.cpp b/wpinet/src/main/native/cpp/uv/NetworkStream.cpp
index 3538596..12750b2 100644
--- a/wpinet/src/main/native/cpp/uv/NetworkStream.cpp
+++ b/wpinet/src/main/native/cpp/uv/NetworkStream.cpp
@@ -11,6 +11,9 @@
}
void NetworkStream::Listen(int backlog) {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(&uv_listen, GetRawStream(), backlog,
[](uv_stream_t* handle, int status) {
auto& h = *static_cast<NetworkStream*>(handle->data);
diff --git a/wpinet/src/main/native/cpp/uv/Pipe.cpp b/wpinet/src/main/native/cpp/uv/Pipe.cpp
index 9548874..7993604 100644
--- a/wpinet/src/main/native/cpp/uv/Pipe.cpp
+++ b/wpinet/src/main/native/cpp/uv/Pipe.cpp
@@ -11,6 +11,9 @@
namespace wpi::uv {
std::shared_ptr<Pipe> Pipe::Create(Loop& loop, bool ipc) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Pipe>(private_init{});
int err = uv_pipe_init(loop.GetRaw(), h->GetRaw(), ipc ? 1 : 0);
if (err < 0) {
@@ -22,7 +25,7 @@
}
void Pipe::Reuse(std::function<void()> callback, bool ipc) {
- if (IsClosing()) {
+ if (IsLoopClosing() || IsClosing()) {
return;
}
if (!m_reuseData) {
@@ -69,6 +72,9 @@
void Pipe::Connect(std::string_view name,
const std::shared_ptr<PipeConnectReq>& req) {
+ if (IsLoopClosing()) {
+ return;
+ }
SmallString<128> nameBuf{name};
uv_pipe_connect(req->GetRaw(), GetRaw(), nameBuf.c_str(),
[](uv_connect_t* req, int status) {
diff --git a/wpinet/src/main/native/cpp/uv/Poll.cpp b/wpinet/src/main/native/cpp/uv/Poll.cpp
index 3713453..7d35615 100644
--- a/wpinet/src/main/native/cpp/uv/Poll.cpp
+++ b/wpinet/src/main/native/cpp/uv/Poll.cpp
@@ -9,6 +9,9 @@
namespace wpi::uv {
std::shared_ptr<Poll> Poll::Create(Loop& loop, int fd) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Poll>(private_init{});
int err = uv_poll_init(loop.GetRaw(), h->GetRaw(), fd);
if (err < 0) {
@@ -20,6 +23,9 @@
}
std::shared_ptr<Poll> Poll::CreateSocket(Loop& loop, uv_os_sock_t sock) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Poll>(private_init{});
int err = uv_poll_init_socket(loop.GetRaw(), h->GetRaw(), sock);
if (err < 0) {
@@ -31,7 +37,7 @@
}
void Poll::Reuse(int fd, std::function<void()> callback) {
- if (IsClosing()) {
+ if (IsLoopClosing() || IsClosing()) {
return;
}
if (!m_reuseData) {
@@ -56,7 +62,7 @@
}
void Poll::ReuseSocket(uv_os_sock_t sock, std::function<void()> callback) {
- if (IsClosing()) {
+ if (IsLoopClosing() || IsClosing()) {
return;
}
if (!m_reuseData) {
@@ -81,6 +87,9 @@
}
void Poll::Start(int events) {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(&uv_poll_start, GetRaw(), events,
[](uv_poll_t* handle, int status, int events) {
Poll& h = *static_cast<Poll*>(handle->data);
diff --git a/wpinet/src/main/native/cpp/uv/Prepare.cpp b/wpinet/src/main/native/cpp/uv/Prepare.cpp
index e4ca160..aa1a89d 100644
--- a/wpinet/src/main/native/cpp/uv/Prepare.cpp
+++ b/wpinet/src/main/native/cpp/uv/Prepare.cpp
@@ -9,6 +9,9 @@
namespace wpi::uv {
std::shared_ptr<Prepare> Prepare::Create(Loop& loop) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Prepare>(private_init{});
int err = uv_prepare_init(loop.GetRaw(), h->GetRaw());
if (err < 0) {
@@ -20,6 +23,9 @@
}
void Prepare::Start() {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(&uv_prepare_start, GetRaw(), [](uv_prepare_t* handle) {
Prepare& h = *static_cast<Prepare*>(handle->data);
h.prepare();
diff --git a/wpinet/src/main/native/cpp/uv/Process.cpp b/wpinet/src/main/native/cpp/uv/Process.cpp
index 3c10db6..c872ff9 100644
--- a/wpinet/src/main/native/cpp/uv/Process.cpp
+++ b/wpinet/src/main/native/cpp/uv/Process.cpp
@@ -13,6 +13,10 @@
std::shared_ptr<Process> Process::SpawnArray(Loop& loop, std::string_view file,
std::span<const Option> options) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
+
// convert Option array to libuv structure
uv_process_options_t coptions;
diff --git a/wpinet/src/main/native/cpp/uv/Signal.cpp b/wpinet/src/main/native/cpp/uv/Signal.cpp
index 10dd7b4..8f998e2 100644
--- a/wpinet/src/main/native/cpp/uv/Signal.cpp
+++ b/wpinet/src/main/native/cpp/uv/Signal.cpp
@@ -9,6 +9,9 @@
namespace wpi::uv {
std::shared_ptr<Signal> Signal::Create(Loop& loop) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Signal>(private_init{});
int err = uv_signal_init(loop.GetRaw(), h->GetRaw());
if (err < 0) {
@@ -20,6 +23,9 @@
}
void Signal::Start(int signum) {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(
&uv_signal_start, GetRaw(),
[](uv_signal_t* handle, int signum) {
diff --git a/wpinet/src/main/native/cpp/uv/Stream.cpp b/wpinet/src/main/native/cpp/uv/Stream.cpp
index e7f6031..e054003 100644
--- a/wpinet/src/main/native/cpp/uv/Stream.cpp
+++ b/wpinet/src/main/native/cpp/uv/Stream.cpp
@@ -35,6 +35,9 @@
}
void Stream::Shutdown(const std::shared_ptr<ShutdownReq>& req) {
+ if (IsLoopClosing()) {
+ return;
+ }
if (Invoke(&uv_shutdown, req->GetRaw(), GetRawStream(),
[](uv_shutdown_t* req, int status) {
auto& h = *static_cast<ShutdownReq*>(req->data);
@@ -50,6 +53,9 @@
}
void Stream::Shutdown(std::function<void()> callback) {
+ if (IsLoopClosing()) {
+ return;
+ }
auto req = std::make_shared<ShutdownReq>();
if (callback) {
req->complete.connect(std::move(callback));
@@ -58,6 +64,9 @@
}
void Stream::StartRead() {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(&uv_read_start, GetRawStream(), &Handle::AllocBuf,
[](uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) {
auto& h = *static_cast<Stream*>(stream->data);
@@ -79,14 +88,17 @@
void Stream::Write(std::span<const Buffer> bufs,
const std::shared_ptr<WriteReq>& req) {
+ if (IsLoopClosing()) {
+ return;
+ }
if (Invoke(&uv_write, req->GetRaw(), GetRawStream(), bufs.data(), bufs.size(),
[](uv_write_t* r, int status) {
auto& h = *static_cast<WriteReq*>(r->data);
if (status < 0) {
h.ReportError(status);
}
+ auto ptr = h.Release(); // one-shot, but finish() may Keep()
h.finish(Error(status));
- h.Release(); // this is always a one-shot
})) {
req->Keep();
}
@@ -98,20 +110,32 @@
}
int Stream::TryWrite(std::span<const Buffer> bufs) {
+ if (IsLoopClosing()) {
+ return UV_ECANCELED;
+ }
int val = uv_try_write(GetRawStream(), bufs.data(), bufs.size());
+ if (val == UV_EAGAIN) {
+ return 0;
+ }
if (val < 0) {
this->ReportError(val);
- return 0;
+ return val;
}
return val;
}
int Stream::TryWrite2(std::span<const Buffer> bufs, Stream& send) {
+ if (IsLoopClosing()) {
+ return UV_ECANCELED;
+ }
int val = uv_try_write2(GetRawStream(), bufs.data(), bufs.size(),
send.GetRawStream());
+ if (val == UV_EAGAIN) {
+ return 0;
+ }
if (val < 0) {
this->ReportError(val);
- return 0;
+ return val;
}
return val;
}
diff --git a/wpinet/src/main/native/cpp/uv/Tcp.cpp b/wpinet/src/main/native/cpp/uv/Tcp.cpp
index ae01683..b163a0e 100644
--- a/wpinet/src/main/native/cpp/uv/Tcp.cpp
+++ b/wpinet/src/main/native/cpp/uv/Tcp.cpp
@@ -11,6 +11,9 @@
namespace wpi::uv {
std::shared_ptr<Tcp> Tcp::Create(Loop& loop, unsigned int flags) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Tcp>(private_init{});
int err = uv_tcp_init_ex(loop.GetRaw(), h->GetRaw(), flags);
if (err < 0) {
@@ -22,7 +25,7 @@
}
void Tcp::Reuse(std::function<void()> callback, unsigned int flags) {
- if (IsClosing()) {
+ if (IsLoopClosing() || IsClosing()) {
return;
}
if (!m_reuseData) {
@@ -103,6 +106,9 @@
void Tcp::Connect(const sockaddr& addr,
const std::shared_ptr<TcpConnectReq>& req) {
+ if (IsLoopClosing()) {
+ return;
+ }
if (Invoke(&uv_tcp_connect, req->GetRaw(), GetRaw(), &addr,
[](uv_connect_t* req, int status) {
auto& h = *static_cast<TcpConnectReq*>(req->data);
@@ -118,6 +124,9 @@
}
void Tcp::Connect(const sockaddr& addr, std::function<void()> callback) {
+ if (IsLoopClosing()) {
+ return;
+ }
auto req = std::make_shared<TcpConnectReq>();
req->connected.connect(std::move(callback));
Connect(addr, req);
diff --git a/wpinet/src/main/native/cpp/uv/Timer.cpp b/wpinet/src/main/native/cpp/uv/Timer.cpp
index 9d52173..e9b33fc 100644
--- a/wpinet/src/main/native/cpp/uv/Timer.cpp
+++ b/wpinet/src/main/native/cpp/uv/Timer.cpp
@@ -9,6 +9,9 @@
namespace wpi::uv {
std::shared_ptr<Timer> Timer::Create(Loop& loop) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Timer>(private_init{});
int err = uv_timer_init(loop.GetRaw(), h->GetRaw());
if (err < 0) {
@@ -32,6 +35,9 @@
}
void Timer::Start(Time timeout, Time repeat) {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(
&uv_timer_start, GetRaw(),
[](uv_timer_t* handle) {
diff --git a/wpinet/src/main/native/cpp/uv/Tty.cpp b/wpinet/src/main/native/cpp/uv/Tty.cpp
index 6043a93..5e5756c 100644
--- a/wpinet/src/main/native/cpp/uv/Tty.cpp
+++ b/wpinet/src/main/native/cpp/uv/Tty.cpp
@@ -9,6 +9,9 @@
namespace wpi::uv {
std::shared_ptr<Tty> Tty::Create(Loop& loop, uv_file fd, bool readable) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Tty>(private_init{});
int err = uv_tty_init(loop.GetRaw(), h->GetRaw(), fd, readable ? 1 : 0);
if (err < 0) {
diff --git a/wpinet/src/main/native/cpp/uv/Udp.cpp b/wpinet/src/main/native/cpp/uv/Udp.cpp
index 689d5a7..1922c57 100644
--- a/wpinet/src/main/native/cpp/uv/Udp.cpp
+++ b/wpinet/src/main/native/cpp/uv/Udp.cpp
@@ -38,6 +38,9 @@
}
std::shared_ptr<Udp> Udp::Create(Loop& loop, unsigned int flags) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Udp>(private_init{});
int err = uv_udp_init_ex(loop.GetRaw(), h->GetRaw(), flags);
if (err < 0) {
@@ -135,6 +138,9 @@
void Udp::Send(const sockaddr& addr, std::span<const Buffer> bufs,
const std::shared_ptr<UdpSendReq>& req) {
+ if (IsLoopClosing()) {
+ return;
+ }
if (Invoke(&uv_udp_send, req->GetRaw(), GetRaw(), bufs.data(), bufs.size(),
&addr, [](uv_udp_send_t* r, int status) {
auto& h = *static_cast<UdpSendReq*>(r->data);
@@ -150,12 +156,18 @@
void Udp::Send(const sockaddr& addr, std::span<const Buffer> bufs,
std::function<void(std::span<Buffer>, Error)> callback) {
+ if (IsLoopClosing()) {
+ return;
+ }
Send(addr, bufs,
std::make_shared<CallbackUdpSendReq>(bufs, std::move(callback)));
}
void Udp::Send(std::span<const Buffer> bufs,
const std::shared_ptr<UdpSendReq>& req) {
+ if (IsLoopClosing()) {
+ return;
+ }
if (Invoke(&uv_udp_send, req->GetRaw(), GetRaw(), bufs.data(), bufs.size(),
nullptr, [](uv_udp_send_t* r, int status) {
auto& h = *static_cast<UdpSendReq*>(r->data);
@@ -171,10 +183,16 @@
void Udp::Send(std::span<const Buffer> bufs,
std::function<void(std::span<Buffer>, Error)> callback) {
+ if (IsLoopClosing()) {
+ return;
+ }
Send(bufs, std::make_shared<CallbackUdpSendReq>(bufs, std::move(callback)));
}
void Udp::StartRecv() {
+ if (IsLoopClosing()) {
+ return;
+ }
Invoke(&uv_udp_recv_start, GetRaw(), &AllocBuf,
[](uv_udp_t* handle, ssize_t nread, const uv_buf_t* buf,
const sockaddr* addr, unsigned flags) {
diff --git a/wpinet/src/main/native/cpp/uv/Work.cpp b/wpinet/src/main/native/cpp/uv/Work.cpp
index 818a93b..d94619f 100644
--- a/wpinet/src/main/native/cpp/uv/Work.cpp
+++ b/wpinet/src/main/native/cpp/uv/Work.cpp
@@ -13,6 +13,9 @@
}
void QueueWork(Loop& loop, const std::shared_ptr<WorkReq>& req) {
+ if (loop.IsClosing()) {
+ return;
+ }
int err = uv_queue_work(
loop.GetRaw(), req->GetRaw(),
[](uv_work_t* req) {
@@ -37,6 +40,9 @@
void QueueWork(Loop& loop, std::function<void()> work,
std::function<void()> afterWork) {
+ if (loop.IsClosing()) {
+ return;
+ }
auto req = std::make_shared<WorkReq>();
if (work) {
req->work.connect(std::move(work));
diff --git a/wpinet/src/main/native/include/wpinet/ParallelTcpConnector.h b/wpinet/src/main/native/include/wpinet/ParallelTcpConnector.h
index e7bc953..c3c869c 100644
--- a/wpinet/src/main/native/include/wpinet/ParallelTcpConnector.h
+++ b/wpinet/src/main/native/include/wpinet/ParallelTcpConnector.h
@@ -59,6 +59,9 @@
static std::shared_ptr<ParallelTcpConnector> Create(
wpi::uv::Loop& loop, wpi::uv::Timer::Time reconnectRate,
wpi::Logger& logger, std::function<void(wpi::uv::Tcp& tcp)> connected) {
+ if (loop.IsClosing()) {
+ return nullptr;
+ }
return std::make_shared<ParallelTcpConnector>(
loop, reconnectRate, logger, std::move(connected), private_init{});
}
@@ -112,7 +115,8 @@
std::shared_ptr<wpi::uv::Timer> m_reconnectTimer;
std::vector<std::pair<std::string, unsigned int>> m_servers;
std::vector<std::weak_ptr<wpi::uv::GetAddrInfoReq>> m_resolvers;
- std::vector<std::weak_ptr<wpi::uv::Tcp>> m_attempts;
+ std::vector<std::pair<sockaddr_storage, std::weak_ptr<wpi::uv::Tcp>>>
+ m_attempts;
bool m_isConnected{false};
};
diff --git a/wpinet/src/main/native/include/wpinet/WebSocket.h b/wpinet/src/main/native/include/wpinet/WebSocket.h
index 1f295c9..297c123 100644
--- a/wpinet/src/main/native/include/wpinet/WebSocket.h
+++ b/wpinet/src/main/native/include/wpinet/WebSocket.h
@@ -34,6 +34,7 @@
class WebSocket : public std::enable_shared_from_this<WebSocket> {
struct private_init {};
+ public:
static constexpr uint8_t kOpCont = 0x00;
static constexpr uint8_t kOpText = 0x01;
static constexpr uint8_t kOpBinary = 0x02;
@@ -42,8 +43,8 @@
static constexpr uint8_t kOpPong = 0x0A;
static constexpr uint8_t kOpMask = 0x0F;
static constexpr uint8_t kFlagFin = 0x80;
+ static constexpr uint8_t kFlagControl = 0x08;
- public:
WebSocket(uv::Stream& stream, bool server, const private_init&);
WebSocket(const WebSocket&) = delete;
WebSocket(WebSocket&&) = delete;
@@ -93,7 +94,7 @@
static constexpr uint8_t kPing = kFlagFin | kOpPing;
static constexpr uint8_t kPong = kFlagFin | kOpPong;
- Frame(uint8_t opcode, std::span<const uv::Buffer> data)
+ constexpr Frame(uint8_t opcode, std::span<const uv::Buffer> data)
: opcode{opcode}, data{data} {}
uint8_t opcode;
@@ -339,7 +340,7 @@
void SendPing(
std::span<const uv::Buffer> data,
std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {
- Send(kFlagFin | kOpPing, data, std::move(callback));
+ SendControl(kFlagFin | kOpPing, data, std::move(callback));
}
/**
@@ -376,7 +377,7 @@
void SendPong(
std::span<const uv::Buffer> data,
std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {
- Send(kFlagFin | kOpPong, data, std::move(callback));
+ SendControl(kFlagFin | kOpPong, data, std::move(callback));
}
/**
@@ -402,6 +403,31 @@
std::function<void(std::span<uv::Buffer>, uv::Error)> callback);
/**
+ * Try to send multiple frames. Tries to send as many frames as possible
+ * immediately, and only queues the "last" frame it can (as the network queue
+ * will almost always fill partway through a frame). The frames following
+ * the last frame will NOT be queued for transmission; the caller is
+ * responsible for how to handle (e.g. re-send) those frames (e.g. when the
+ * callback is called).
+ *
+ * @param frames Frame type/data pairs
+ * @param callback Callback which is invoked when the write completes of the
+ * last frame that is not returned.
+ * @return Remaining frames that will not be sent
+ */
+ std::span<const Frame> TrySendFrames(
+ std::span<const Frame> frames,
+ std::function<void(std::span<uv::Buffer>, uv::Error)> callback);
+
+ /**
+ * Returns whether or not a previous TrySendFrames is still in progress.
+ * Calling TrySendFrames if this returns true will return all frames.
+ *
+ * @return True if a TryWrite is in progress
+ */
+ bool IsWriteInProgress() const { return m_writeInProgress; }
+
+ /**
* Fail the connection.
*/
void Fail(uint16_t code = 1002, std::string_view reason = "protocol error");
@@ -460,7 +486,8 @@
sig::Signal<std::span<const uint8_t>, bool> binary;
/**
- * Ping event. Emitted when a ping message is received.
+ * Ping event. Emitted when a ping message is received. A pong message is
+ * automatically sent in response, so this is simply a notification.
*/
sig::Signal<std::span<const uint8_t>> ping;
@@ -484,6 +511,12 @@
size_t m_maxMessageSize = 128 * 1024;
bool m_combineFragments = true;
+ // outgoing write request
+ bool m_writeInProgress = false;
+ class WriteReq;
+ std::weak_ptr<WriteReq> m_curWriteReq;
+ std::weak_ptr<WriteReq> m_lastWriteReq;
+
// operating state
State m_state = CONNECTING;
@@ -491,6 +524,7 @@
SmallVector<uint8_t, 14> m_header;
size_t m_headerSize = 0;
SmallVector<uint8_t, 1024> m_payload;
+ SmallVector<uint8_t, 64> m_controlPayload;
size_t m_frameStart = 0;
uint64_t m_frameSize = UINT64_MAX;
uint8_t m_fragmentOpcode = 0;
@@ -507,10 +541,16 @@
void SendClose(uint16_t code, std::string_view reason);
void SetClosed(uint16_t code, std::string_view reason, bool failed = false);
void HandleIncoming(uv::Buffer& buf, size_t size);
+ void SendControl(
+ uint8_t opcode, std::span<const uv::Buffer> data,
+ std::function<void(std::span<uv::Buffer>, uv::Error)> callback);
void Send(uint8_t opcode, std::span<const uv::Buffer> data,
std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {
SendFrames({{Frame{opcode, data}}}, std::move(callback));
}
+ void SendError(
+ std::span<const Frame> frames,
+ const std::function<void(std::span<uv::Buffer>, uv::Error)>& callback);
};
} // namespace wpi
diff --git a/wpinet/src/main/native/include/wpinet/http_parser.h b/wpinet/src/main/native/include/wpinet/http_parser.h
index 2189b8f..3993a4f 100644
--- a/wpinet/src/main/native/include/wpinet/http_parser.h
+++ b/wpinet/src/main/native/include/wpinet/http_parser.h
@@ -36,7 +36,7 @@
# define HTTP_PARSER_STRICT 1
#endif
-/* Maximium header size allowed. If the macro is not defined
+/* Maximum header size allowed. If the macro is not defined
* before including this header then the default is used. To
* change the maximum header size, define the macro in the build
* environment (e.g. -DHTTP_MAX_HEADER_SIZE=<value>). To remove
diff --git a/wpinet/src/main/native/include/wpinet/uv/Async.h b/wpinet/src/main/native/include/wpinet/uv/Async.h
index eb3a005..2eb13d7 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Async.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Async.h
@@ -62,6 +62,9 @@
* @param loop Loop object where this handle runs.
*/
static std::shared_ptr<Async> Create(const std::shared_ptr<Loop>& loop) {
+ if (loop->IsClosing()) {
+ return nullptr;
+ }
auto h = std::make_shared<Async>(loop, private_init{});
int err =
uv_async_init(loop->GetRaw(), h->GetRaw(), [](uv_async_t* handle) {
@@ -89,6 +92,9 @@
template <typename... U>
void Send(U&&... u) {
auto loop = m_loop.lock();
+ if (loop->IsClosing()) {
+ return;
+ }
if (loop && loop->GetThreadId() == std::this_thread::get_id()) {
// called from within the loop, just call the function directly
wakeup(std::forward<U>(u)...);
@@ -161,6 +167,9 @@
*/
void Send() {
if (auto loop = m_loop.lock()) {
+ if (loop->IsClosing()) {
+ return;
+ }
if (loop->GetThreadId() == std::this_thread::get_id()) {
// called from within the loop, just call the function directly
wakeup();
diff --git a/wpinet/src/main/native/include/wpinet/uv/AsyncFunction.h b/wpinet/src/main/native/include/wpinet/uv/AsyncFunction.h
index 82a5913..f59875a 100644
--- a/wpinet/src/main/native/include/wpinet/uv/AsyncFunction.h
+++ b/wpinet/src/main/native/include/wpinet/uv/AsyncFunction.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <uv.h>
+#include <concepts>
#include <functional>
#include <memory>
#include <thread>
@@ -74,6 +75,9 @@
static std::shared_ptr<AsyncFunction> Create(
const std::shared_ptr<Loop>& loop,
std::function<void(promise<R>, T...)> func = nullptr) {
+ if (loop->IsClosing()) {
+ return nullptr;
+ }
auto h =
std::make_shared<AsyncFunction>(loop, std::move(func), private_init{});
int err =
@@ -123,6 +127,13 @@
uint64_t req = m_promises.CreateRequest();
auto loop = m_loop.lock();
+ if (loop->IsClosing()) {
+ if constexpr (std::same_as<R, void>) {
+ return m_promises.MakeReadyFuture();
+ } else {
+ return m_promises.MakeReadyFuture({});
+ }
+ }
if (loop && loop->GetThreadId() == std::this_thread::get_id()) {
// called from within the loop, just call the function directly
wakeup(m_promises.CreatePromise(req), std::forward<U>(u)...);
diff --git a/wpinet/src/main/native/include/wpinet/uv/Buffer.h b/wpinet/src/main/native/include/wpinet/uv/Buffer.h
index 4b58b0f..01dc881 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Buffer.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Buffer.h
@@ -42,10 +42,23 @@
base = const_cast<char*>(base_);
len = static_cast<decltype(len)>(len_);
}
+ Buffer(uint8_t* base_, size_t len_) {
+ base = reinterpret_cast<char*>(base_);
+ len = static_cast<decltype(len)>(len_);
+ }
+ Buffer(const uint8_t* base_, size_t len_) {
+ base = reinterpret_cast<char*>(const_cast<uint8_t*>(base_));
+ len = static_cast<decltype(len)>(len_);
+ }
std::span<const char> data() const { return {base, len}; }
std::span<char> data() { return {base, len}; }
+ std::span<const uint8_t> bytes() const {
+ return {reinterpret_cast<const uint8_t*>(base), len};
+ }
+ std::span<uint8_t> bytes() { return {reinterpret_cast<uint8_t*>(base), len}; }
+
operator std::span<const char>() const { return data(); } // NOLINT
operator std::span<char>() { return data(); } // NOLINT
diff --git a/wpinet/src/main/native/include/wpinet/uv/Error.h b/wpinet/src/main/native/include/wpinet/uv/Error.h
index cc2a5d5..f3f8d24 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Error.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Error.h
@@ -38,7 +38,7 @@
const char* name() const { return uv_err_name(m_err); }
private:
- int m_err{UV_UNKNOWN};
+ int m_err{0};
};
} // namespace wpi::uv
diff --git a/wpinet/src/main/native/include/wpinet/uv/Idle.h b/wpinet/src/main/native/include/wpinet/uv/Idle.h
index 4ed6d07..5cb4625 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Idle.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Idle.h
@@ -28,7 +28,7 @@
* for I/O.
*
* @warning Despite the name, idle handles will signal every loop iteration,
- * not when the loop is actually "idle". This also means they can easly become
+ * not when the loop is actually "idle". This also means they can easily become
* CPU hogs.
*/
class Idle final : public HandleImpl<Idle, uv_idle_t> {
diff --git a/wpinet/src/main/native/include/wpinet/uv/Loop.h b/wpinet/src/main/native/include/wpinet/uv/Loop.h
index 129faf5..0897c87 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Loop.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Loop.h
@@ -71,6 +71,20 @@
static std::shared_ptr<Loop> GetDefault();
/**
+ * Set the loop closing flag.
+ *
+ * This will prevent new handles from being created on the loop.
+ */
+ void SetClosing() { m_closing = true; }
+
+ /**
+ * Return the loop closed flag.
+ *
+ * @return True if SetClosed() has been called.
+ */
+ bool IsClosing() const { return m_closing; }
+
+ /**
* Release all internal loop resources.
*
* Call this function only when the loop has finished executing and all open
@@ -247,6 +261,7 @@
uv_loop_t* m_loop;
uv_loop_t m_loopStruct;
std::atomic<std::thread::id> m_tid;
+ bool m_closing = false;
};
} // namespace wpi::uv
diff --git a/wpinet/src/main/native/include/wpinet/uv/Poll.h b/wpinet/src/main/native/include/wpinet/uv/Poll.h
index 1e63836..3ba9a2c 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Poll.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Poll.h
@@ -7,6 +7,7 @@
#include <uv.h>
+#include <functional>
#include <memory>
#include <wpi/Signal.h>
diff --git a/wpinet/src/main/native/include/wpinet/uv/Request.h b/wpinet/src/main/native/include/wpinet/uv/Request.h
index 3fbec6f..d16d289 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Request.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Request.h
@@ -9,6 +9,7 @@
#include <functional>
#include <memory>
+#include <utility>
#include "wpinet/uv/Error.h"
@@ -92,8 +93,12 @@
*
* Derived classes can override this method for different memory management
* approaches (e.g. pooled storage of requests).
+ *
+ * @return Previous shared pointer
*/
- virtual void Release() noexcept { m_self.reset(); }
+ virtual std::shared_ptr<Request> Release() noexcept {
+ return std::move(m_self);
+ }
/**
* Error callback. By default, this is set up to report errors to the handle
@@ -130,11 +135,11 @@
class RequestImpl : public Request {
public:
std::shared_ptr<T> shared_from_this() {
- return std::static_pointer_cast<T>(this->shared_from_this());
+ return std::static_pointer_cast<T>(Request::shared_from_this());
}
std::shared_ptr<const T> shared_from_this() const {
- return std::static_pointer_cast<const T>(this->shared_from_this());
+ return std::static_pointer_cast<const T>(Request::shared_from_this());
}
/**
diff --git a/wpinet/src/main/native/include/wpinet/uv/Stream.h b/wpinet/src/main/native/include/wpinet/uv/Stream.h
index 9568455..29e5811 100644
--- a/wpinet/src/main/native/include/wpinet/uv/Stream.h
+++ b/wpinet/src/main/native/include/wpinet/uv/Stream.h
@@ -194,8 +194,9 @@
* An error signal will be emitted in case of errors.
*
* @param bufs The buffers to be written to the stream.
- * @return Number of bytes written.
+ * @return Number of bytes written, or negative (error code) on error
*/
+ [[nodiscard]]
int TryWrite(std::span<const Buffer> bufs);
/**
@@ -206,8 +207,9 @@
* An error signal will be emitted in case of errors.
*
* @param bufs The buffers to be written to the stream.
- * @return Number of bytes written.
+ * @return Number of bytes written, or negative (error code) on error
*/
+ [[nodiscard]]
int TryWrite(std::initializer_list<Buffer> bufs) {
return TryWrite({bufs.begin(), bufs.end()});
}
@@ -221,8 +223,9 @@
*
* @param bufs The buffers to be written to the stream.
* @param send send stream
- * @return Number of bytes written.
+ * @return Number of bytes written, or negative (error code) on error
*/
+ [[nodiscard]]
int TryWrite2(std::span<const Buffer> bufs, Stream& send);
/**
@@ -234,8 +237,9 @@
*
* @param bufs The buffers to be written to the stream.
* @param send send stream
- * @return Number of bytes written.
+ * @return Number of bytes written, or negative (error code) on error
*/
+ [[nodiscard]]
int TryWrite2(std::initializer_list<Buffer> bufs, Stream& send) {
return TryWrite2({bufs.begin(), bufs.end()}, send);
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv.h b/wpinet/src/main/native/thirdparty/libuv/include/uv.h
index dbaeb1e..d5342b0 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv.h
@@ -28,6 +28,7 @@
#error "Define either BUILDING_UV_SHARED or USING_UV_SHARED, not both."
#endif
+#ifndef UV_EXTERN
#ifdef _WIN32
/* Windows - set up dll import/export decorators. */
# if defined(BUILDING_UV_SHARED)
@@ -47,14 +48,20 @@
#else
# define UV_EXTERN /* nothing */
#endif
+#endif /* UV_EXTERN */
#include "uv/errno.h"
#include "uv/version.h"
#include <stddef.h>
#include <stdio.h>
-
#include <stdint.h>
+/* Internal type, do not use. */
+struct uv__queue {
+ struct uv__queue* next;
+ struct uv__queue* prev;
+};
+
#if defined(_WIN32)
# include "uv/win.h"
#else
@@ -145,6 +152,8 @@
XX(EFTYPE, "inappropriate file type or format") \
XX(EILSEQ, "illegal byte sequence") \
XX(ESOCKTNOSUPPORT, "socket type not supported") \
+ XX(ENODATA, "no data available") \
+ XX(EUNATCH, "protocol driver not attached") \
#define UV_HANDLE_TYPE_MAP(XX) \
XX(ASYNC, async) \
@@ -240,9 +249,12 @@
typedef struct uv_interface_address_s uv_interface_address_t;
typedef struct uv_dirent_s uv_dirent_t;
typedef struct uv_passwd_s uv_passwd_t;
+typedef struct uv_group_s uv_group_t;
typedef struct uv_utsname_s uv_utsname_t;
typedef struct uv_statfs_s uv_statfs_t;
+typedef struct uv_metrics_s uv_metrics_t;
+
typedef enum {
UV_LOOP_BLOCK_SIGNAL = 0,
UV_METRICS_IDLE_TIME
@@ -275,13 +287,13 @@
UV_EXTERN int uv_loop_close(uv_loop_t* loop);
/*
* NOTE:
- * This function is DEPRECATED (to be removed after 0.12), users should
+ * This function is DEPRECATED, users should
* allocate the loop manually and use uv_loop_init instead.
*/
UV_EXTERN uv_loop_t* uv_loop_new(void);
/*
* NOTE:
- * This function is DEPRECATED (to be removed after 0.12). Users should use
+ * This function is DEPRECATED. Users should use
* uv_loop_close and free the memory manually instead.
*/
UV_EXTERN void uv_loop_delete(uv_loop_t*);
@@ -337,11 +349,32 @@
void* buf,
size_t buflen);
+typedef enum {
+ UV_CLOCK_MONOTONIC,
+ UV_CLOCK_REALTIME
+} uv_clock_id;
+
+/* XXX(bnoordhuis) not 2038-proof, https://github.com/libuv/libuv/issues/3864 */
typedef struct {
long tv_sec;
long tv_nsec;
} uv_timespec_t;
+typedef struct {
+ int64_t tv_sec;
+ int32_t tv_nsec;
+} uv_timespec64_t;
+
+/* XXX(bnoordhuis) not 2038-proof, https://github.com/libuv/libuv/issues/3864 */
+typedef struct {
+ long tv_sec;
+ long tv_usec;
+} uv_timeval_t;
+
+typedef struct {
+ int64_t tv_sec;
+ int32_t tv_usec;
+} uv_timeval64_t;
typedef struct {
uint64_t st_dev;
@@ -430,7 +463,7 @@
uv_handle_type type; \
/* private */ \
uv_close_cb close_cb; \
- void* handle_queue[2]; \
+ struct uv__queue handle_queue; \
union { \
int fd; \
void* reserved[4]; \
@@ -766,6 +799,10 @@
UV_EXTERN uv_handle_type uv_guess_handle(uv_file file);
+enum {
+ UV_PIPE_NO_TRUNCATE = 1u << 0
+};
+
/*
* uv_pipe_t is a subclass of uv_stream_t.
*
@@ -782,10 +819,20 @@
UV_EXTERN int uv_pipe_init(uv_loop_t*, uv_pipe_t* handle, int ipc);
UV_EXTERN int uv_pipe_open(uv_pipe_t*, uv_file file);
UV_EXTERN int uv_pipe_bind(uv_pipe_t* handle, const char* name);
+UV_EXTERN int uv_pipe_bind2(uv_pipe_t* handle,
+ const char* name,
+ size_t namelen,
+ unsigned int flags);
UV_EXTERN void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb);
+UV_EXTERN int uv_pipe_connect2(uv_connect_t* req,
+ uv_pipe_t* handle,
+ const char* name,
+ size_t namelen,
+ unsigned int flags,
+ uv_connect_cb cb);
UV_EXTERN int uv_pipe_getsockname(const uv_pipe_t* handle,
char* buffer,
size_t* size);
@@ -1126,6 +1173,12 @@
char* homedir;
};
+struct uv_group_s {
+ char* groupname;
+ unsigned long gid;
+ char** members;
+};
+
struct uv_utsname_s {
char sysname[256];
char release[256];
@@ -1172,16 +1225,6 @@
UV_EXTERN int uv_open_osfhandle(uv_os_fd_t os_fd);
typedef struct {
- long tv_sec;
- long tv_usec;
-} uv_timeval_t;
-
-typedef struct {
- int64_t tv_sec;
- int32_t tv_usec;
-} uv_timeval64_t;
-
-typedef struct {
uv_timeval_t ru_utime; /* user CPU time used */
uv_timeval_t ru_stime; /* system CPU time used */
uint64_t ru_maxrss; /* maximum resident set size */
@@ -1206,6 +1249,9 @@
UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size);
UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd);
UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd);
+UV_EXTERN int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid);
+UV_EXTERN int uv_os_get_group(uv_group_t* grp, uv_uid_t gid);
+UV_EXTERN void uv_os_free_group(uv_group_t* grp);
UV_EXTERN uv_pid_t uv_os_getpid(void);
UV_EXTERN uv_pid_t uv_os_getppid(void);
@@ -1232,6 +1278,7 @@
UV_EXTERN unsigned int uv_available_parallelism(void);
UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);
+UV_EXTERN int uv_cpumask_size(void);
UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses,
int* count);
@@ -1264,6 +1311,15 @@
UV_EXTERN int uv_os_uname(uv_utsname_t* buffer);
+struct uv_metrics_s {
+ uint64_t loop_count;
+ uint64_t events;
+ uint64_t events_waiting;
+ /* private */
+ uint64_t* reserved[13];
+};
+
+UV_EXTERN int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics);
UV_EXTERN uint64_t uv_metrics_idle_time(uv_loop_t* loop);
typedef enum {
@@ -1697,7 +1753,9 @@
UV_EXTERN uint64_t uv_get_free_memory(void);
UV_EXTERN uint64_t uv_get_total_memory(void);
UV_EXTERN uint64_t uv_get_constrained_memory(void);
+UV_EXTERN uint64_t uv_get_available_memory(void);
+UV_EXTERN int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts);
UV_EXTERN uint64_t uv_hrtime(void);
UV_EXTERN void uv_sleep(unsigned int msec);
@@ -1774,6 +1832,14 @@
const uv_thread_options_t* params,
uv_thread_cb entry,
void* arg);
+UV_EXTERN int uv_thread_setaffinity(uv_thread_t* tid,
+ char* cpumask,
+ char* oldmask,
+ size_t mask_size);
+UV_EXTERN int uv_thread_getaffinity(uv_thread_t* tid,
+ char* cpumask,
+ size_t mask_size);
+UV_EXTERN int uv_thread_getcpu(void);
UV_EXTERN uv_thread_t uv_thread_self(void);
UV_EXTERN int uv_thread_join(uv_thread_t *tid);
UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2);
@@ -1795,7 +1861,7 @@
void* data;
/* Loop reference counting. */
unsigned int active_handles;
- void* handle_queue[2];
+ struct uv__queue handle_queue;
union {
void* unused;
unsigned int count;
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv/darwin.h b/wpinet/src/main/native/thirdparty/libuv/include/uv/darwin.h
index d226415..06962bf 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv/darwin.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv/darwin.h
@@ -40,7 +40,7 @@
void* cf_state; \
uv_mutex_t cf_mutex; \
uv_sem_t cf_sem; \
- void* cf_signals[2]; \
+ struct uv__queue cf_signals; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
uv__io_t event_watcher; \
@@ -48,8 +48,8 @@
int realpath_len; \
int cf_flags; \
uv_async_t* cf_cb; \
- void* cf_events[2]; \
- void* cf_member[2]; \
+ struct uv__queue cf_events; \
+ struct uv__queue cf_member; \
int cf_error; \
uv_mutex_t cf_mutex; \
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv/errno.h b/wpinet/src/main/native/thirdparty/libuv/include/uv/errno.h
index 71906b3..127278e 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv/errno.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv/errno.h
@@ -413,7 +413,6 @@
#elif defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__)
# define UV__EHOSTDOWN (-64)
@@ -457,4 +456,22 @@
# define UV__ESOCKTNOSUPPORT (-4025)
#endif
+/* FreeBSD defines ENODATA in /usr/include/c++/v1/errno.h which is only visible
+ * if C++ is being used. Define it directly to avoid problems when integrating
+ * libuv in a C++ project.
+ */
+#if defined(ENODATA) && !defined(_WIN32)
+# define UV__ENODATA UV__ERR(ENODATA)
+#elif defined(__FreeBSD__)
+# define UV__ENODATA (-9919)
+#else
+# define UV__ENODATA (-4024)
+#endif
+
+#if defined(EUNATCH) && !defined(_WIN32)
+# define UV__EUNATCH UV__ERR(EUNATCH)
+#else
+# define UV__EUNATCH (-4023)
+#endif
+
#endif /* UV_ERRNO_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv/linux.h b/wpinet/src/main/native/thirdparty/libuv/include/uv/linux.h
index 9b38405..9f22f8c 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv/linux.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv/linux.h
@@ -28,7 +28,7 @@
int inotify_fd; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
- void* watchers[2]; \
+ struct uv__queue watchers; \
int wd; \
#endif /* UV_LINUX_H */
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv/threadpool.h b/wpinet/src/main/native/thirdparty/libuv/include/uv/threadpool.h
index 9708ebd..24ce916 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv/threadpool.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv/threadpool.h
@@ -31,7 +31,7 @@
void (*work)(struct uv__work *w);
void (*done)(struct uv__work *w, int status);
struct uv_loop_s* loop;
- void* wq[2];
+ struct uv__queue wq;
};
#endif /* UV_THREADPOOL_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv/unix.h b/wpinet/src/main/native/thirdparty/libuv/include/uv/unix.h
index 256fef3..e334cab 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv/unix.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv/unix.h
@@ -51,7 +51,6 @@
# include "uv/darwin.h"
#elif defined(__DragonFly__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# include "uv/bsd.h"
@@ -85,8 +84,8 @@
struct uv__io_s {
uv__io_cb cb;
- void* pending_queue[2];
- void* watcher_queue[2];
+ struct uv__queue pending_queue;
+ struct uv__queue watcher_queue;
unsigned int pevents; /* Pending event mask i.e. mask at next tick. */
unsigned int events; /* Current event mask. */
int fd;
@@ -213,21 +212,21 @@
#define UV_LOOP_PRIVATE_FIELDS \
unsigned long flags; \
int backend_fd; \
- void* pending_queue[2]; \
- void* watcher_queue[2]; \
- void** watchers; \
+ struct uv__queue pending_queue; \
+ struct uv__queue watcher_queue; \
+ uv__io_t** watchers; \
unsigned int nwatchers; \
unsigned int nfds; \
- void* wq[2]; \
+ struct uv__queue wq; \
uv_mutex_t wq_mutex; \
uv_async_t wq_async; \
uv_rwlock_t cloexec_lock; \
uv_handle_t* closing_handles; \
- void* process_handles[2]; \
- void* prepare_handles[2]; \
- void* check_handles[2]; \
- void* idle_handles[2]; \
- void* async_handles[2]; \
+ struct uv__queue process_handles; \
+ struct uv__queue prepare_handles; \
+ struct uv__queue check_handles; \
+ struct uv__queue idle_handles; \
+ struct uv__queue async_handles; \
void (*async_unused)(void); /* TODO(bnoordhuis) Remove in libuv v2. */ \
uv__io_t async_io_watcher; \
int async_wfd; \
@@ -250,7 +249,7 @@
#define UV_PRIVATE_REQ_TYPES /* empty */
#define UV_WRITE_PRIVATE_FIELDS \
- void* queue[2]; \
+ struct uv__queue queue; \
unsigned int write_index; \
uv_buf_t* bufs; \
unsigned int nbufs; \
@@ -258,12 +257,12 @@
uv_buf_t bufsml[4]; \
#define UV_CONNECT_PRIVATE_FIELDS \
- void* queue[2]; \
+ struct uv__queue queue; \
#define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */
#define UV_UDP_SEND_PRIVATE_FIELDS \
- void* queue[2]; \
+ struct uv__queue queue; \
struct sockaddr_storage addr; \
unsigned int nbufs; \
uv_buf_t* bufs; \
@@ -279,8 +278,8 @@
uv_connect_t *connect_req; \
uv_shutdown_t *shutdown_req; \
uv__io_t io_watcher; \
- void* write_queue[2]; \
- void* write_completed_queue[2]; \
+ struct uv__queue write_queue; \
+ struct uv__queue write_completed_queue; \
uv_connection_cb connection_cb; \
int delayed_error; \
int accepted_fd; \
@@ -293,30 +292,30 @@
uv_alloc_cb alloc_cb; \
uv_udp_recv_cb recv_cb; \
uv__io_t io_watcher; \
- void* write_queue[2]; \
- void* write_completed_queue[2]; \
+ struct uv__queue write_queue; \
+ struct uv__queue write_completed_queue; \
#define UV_PIPE_PRIVATE_FIELDS \
- const char* pipe_fname; /* strdup'ed */
+ const char* pipe_fname; /* NULL or strdup'ed */
#define UV_POLL_PRIVATE_FIELDS \
uv__io_t io_watcher;
#define UV_PREPARE_PRIVATE_FIELDS \
uv_prepare_cb prepare_cb; \
- void* queue[2]; \
+ struct uv__queue queue; \
#define UV_CHECK_PRIVATE_FIELDS \
uv_check_cb check_cb; \
- void* queue[2]; \
+ struct uv__queue queue; \
#define UV_IDLE_PRIVATE_FIELDS \
uv_idle_cb idle_cb; \
- void* queue[2]; \
+ struct uv__queue queue; \
#define UV_ASYNC_PRIVATE_FIELDS \
uv_async_cb async_cb; \
- void* queue[2]; \
+ struct uv__queue queue; \
int pending; \
#define UV_TIMER_PRIVATE_FIELDS \
@@ -345,7 +344,7 @@
int retcode;
#define UV_PROCESS_PRIVATE_FIELDS \
- void* queue[2]; \
+ struct uv__queue queue; \
int status; \
#define UV_FS_PRIVATE_FIELDS \
@@ -410,6 +409,8 @@
# define UV_FS_O_DIRECT 0x04000
#elif defined(__linux__) && defined(__x86_64__)
# define UV_FS_O_DIRECT 0x04000
+#elif defined(__linux__) && defined(__loongarch__)
+# define UV_FS_O_DIRECT 0x04000
#elif defined(O_DIRECT)
# define UV_FS_O_DIRECT O_DIRECT
#else
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv/version.h b/wpinet/src/main/native/thirdparty/libuv/include/uv/version.h
index 9c9d292..24fac8d 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv/version.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv/version.h
@@ -31,8 +31,8 @@
*/
#define UV_VERSION_MAJOR 1
-#define UV_VERSION_MINOR 44
-#define UV_VERSION_PATCH 2
+#define UV_VERSION_MINOR 46
+#define UV_VERSION_PATCH 0
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""
diff --git a/wpinet/src/main/native/thirdparty/libuv/include/uv/win.h b/wpinet/src/main/native/thirdparty/libuv/include/uv/win.h
index 0a33366..613065d 100644
--- a/wpinet/src/main/native/thirdparty/libuv/include/uv/win.h
+++ b/wpinet/src/main/native/thirdparty/libuv/include/uv/win.h
@@ -20,7 +20,7 @@
*/
#ifndef _WIN32_WINNT
-# define _WIN32_WINNT 0x0600
+# define _WIN32_WINNT 0x0A00
#endif
#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
@@ -59,7 +59,6 @@
#include <signal.h>
#include <fcntl.h>
#include <sys/stat.h>
-
#include <stdint.h>
#include "uv/tree.h"
@@ -71,6 +70,11 @@
# define S_IFLNK 0xA000
#endif
+// Define missing in Windows Kit Include\{VERSION}\ucrt\sys\stat.h
+#if defined(_CRT_INTERNAL_NONSTDC_NAMES) && _CRT_INTERNAL_NONSTDC_NAMES && !defined(S_IFIFO)
+# define S_IFIFO _S_IFIFO
+#endif
+
/* Additional signals supported by uv_signal and or uv_kill. The CRT defines
* the following signals already:
*
@@ -275,11 +279,12 @@
} uv_rwlock_t;
typedef struct {
- unsigned int n;
- unsigned int count;
+ unsigned threshold;
+ unsigned in;
uv_mutex_t mutex;
- uv_sem_t turnstile1;
- uv_sem_t turnstile2;
+ /* TODO: in v2 make this a uv_cond_t, without unused_ */
+ CONDITION_VARIABLE cond;
+ unsigned out;
} uv_barrier_t;
typedef struct {
@@ -349,14 +354,14 @@
uv_idle_t* next_idle_handle; \
/* This handle holds the peer sockets for the fast variant of uv_poll_t */ \
SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \
- /* Counter to keep track of active tcp streams */ \
+ /* No longer used. */ \
unsigned int active_tcp_streams; \
- /* Counter to keep track of active udp streams */ \
+ /* No longer used. */ \
unsigned int active_udp_streams; \
/* Counter to started timer */ \
uint64_t timer_counter; \
/* Threadpool */ \
- void* wq[2]; \
+ struct uv__queue wq; \
uv_mutex_t wq_mutex; \
uv_async_t wq_async;
@@ -383,6 +388,7 @@
ULONG_PTR result; /* overlapped.Internal is reused to hold the result */\
HANDLE pipeHandle; \
DWORD duplex_flags; \
+ WCHAR* name; \
} connect; \
} u; \
struct uv_req_s* next_req;
@@ -484,7 +490,7 @@
uint32_t payload_remaining; \
uint64_t dummy; /* TODO: retained for ABI compat; remove this in v2.x. */ \
} ipc_data_frame; \
- void* ipc_xfer_queue[2]; \
+ struct uv__queue ipc_xfer_queue; \
int ipc_xfer_queue_length; \
uv_write_t* non_overlapped_writes_tail; \
CRITICAL_SECTION readfile_thread_lock; \
@@ -498,7 +504,7 @@
struct { uv_pipe_connection_fields } conn; \
} pipe;
-/* TODO: put the parser states in an union - TTY handles are always half-duplex
+/* TODO: put the parser states in a union - TTY handles are always half-duplex
* so read-state can safely overlap write-state. */
#define UV_TTY_PRIVATE_FIELDS \
HANDLE handle; \
@@ -606,7 +612,7 @@
struct uv_process_exit_s { \
UV_REQ_FIELDS \
} exit_req; \
- BYTE* child_stdio_buffer; \
+ void* unused; /* TODO: retained for ABI compat; remove this in v2.x. */ \
int exit_signal; \
HANDLE wait_handle; \
HANDLE process_handle; \
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/inet.cpp b/wpinet/src/main/native/thirdparty/libuv/src/inet.cpp
index 1b19025..71c9e5b 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/inet.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/inet.cpp
@@ -17,12 +17,7 @@
#include <stdio.h>
#include <string.h>
-
-#if defined(_MSC_VER) && _MSC_VER < 1600
-# include "uv/stdint-msvc2008.h"
-#else
-# include <stdint.h>
-#endif
+#include <stdint.h>
#include "uv.h"
#include "uv-common.h"
@@ -139,7 +134,7 @@
tp += strlen(tp);
break;
}
- tp += sprintf(tp, "%x", words[i]);
+ tp += snprintf(tp, sizeof tmp - (tp - tmp), "%x", words[i]);
}
/* Was it a trailing run of 0x00's? */
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/queue.h b/wpinet/src/main/native/thirdparty/libuv/src/queue.h
index ff3540a..5f8489e 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/queue.h
+++ b/wpinet/src/main/native/thirdparty/libuv/src/queue.h
@@ -18,91 +18,73 @@
#include <stddef.h>
-typedef void *QUEUE[2];
+#define uv__queue_data(pointer, type, field) \
+ ((type*) ((char*) (pointer) - offsetof(type, field)))
-/* Private macros. */
-#define QUEUE_NEXT(q) (*(QUEUE **) &((*(q))[0]))
-#define QUEUE_PREV(q) (*(QUEUE **) &((*(q))[1]))
-#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q)))
-#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
+#define uv__queue_foreach(q, h) \
+ for ((q) = (h)->next; (q) != (h); (q) = (q)->next)
-/* Public macros. */
-#define QUEUE_DATA(ptr, type, field) \
- ((type *) ((char *) (ptr) - offsetof(type, field)))
+static inline void uv__queue_init(struct uv__queue* q) {
+ q->next = q;
+ q->prev = q;
+}
-/* Important note: mutating the list while QUEUE_FOREACH is
- * iterating over its elements results in undefined behavior.
- */
-#define QUEUE_FOREACH(q, h) \
- for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q))
+static inline int uv__queue_empty(const struct uv__queue* q) {
+ return q == q->next;
+}
-#define QUEUE_EMPTY(q) \
- ((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q))
+static inline struct uv__queue* uv__queue_head(const struct uv__queue* q) {
+ return q->next;
+}
-#define QUEUE_HEAD(q) \
- (QUEUE_NEXT(q))
+static inline struct uv__queue* uv__queue_next(const struct uv__queue* q) {
+ return q->next;
+}
-#define QUEUE_INIT(q) \
- do { \
- QUEUE_NEXT(q) = (q); \
- QUEUE_PREV(q) = (q); \
- } \
- while (0)
+static inline void uv__queue_add(struct uv__queue* h, struct uv__queue* n) {
+ h->prev->next = n->next;
+ n->next->prev = h->prev;
+ h->prev = n->prev;
+ h->prev->next = h;
+}
-#define QUEUE_ADD(h, n) \
- do { \
- QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n); \
- QUEUE_NEXT_PREV(n) = QUEUE_PREV(h); \
- QUEUE_PREV(h) = QUEUE_PREV(n); \
- QUEUE_PREV_NEXT(h) = (h); \
- } \
- while (0)
+static inline void uv__queue_split(struct uv__queue* h,
+ struct uv__queue* q,
+ struct uv__queue* n) {
+ n->prev = h->prev;
+ n->prev->next = n;
+ n->next = q;
+ h->prev = q->prev;
+ h->prev->next = h;
+ q->prev = n;
+}
-#define QUEUE_SPLIT(h, q, n) \
- do { \
- QUEUE_PREV(n) = QUEUE_PREV(h); \
- QUEUE_PREV_NEXT(n) = (n); \
- QUEUE_NEXT(n) = (q); \
- QUEUE_PREV(h) = QUEUE_PREV(q); \
- QUEUE_PREV_NEXT(h) = (h); \
- QUEUE_PREV(q) = (n); \
- } \
- while (0)
+static inline void uv__queue_move(struct uv__queue* h, struct uv__queue* n) {
+ if (uv__queue_empty(h))
+ uv__queue_init(n);
+ else
+ uv__queue_split(h, h->next, n);
+}
-#define QUEUE_MOVE(h, n) \
- do { \
- if (QUEUE_EMPTY(h)) \
- QUEUE_INIT(n); \
- else { \
- QUEUE* q = QUEUE_HEAD(h); \
- QUEUE_SPLIT(h, q, n); \
- } \
- } \
- while (0)
+static inline void uv__queue_insert_head(struct uv__queue* h,
+ struct uv__queue* q) {
+ q->next = h->next;
+ q->prev = h;
+ q->next->prev = q;
+ h->next = q;
+}
-#define QUEUE_INSERT_HEAD(h, q) \
- do { \
- QUEUE_NEXT(q) = QUEUE_NEXT(h); \
- QUEUE_PREV(q) = (h); \
- QUEUE_NEXT_PREV(q) = (q); \
- QUEUE_NEXT(h) = (q); \
- } \
- while (0)
+static inline void uv__queue_insert_tail(struct uv__queue* h,
+ struct uv__queue* q) {
+ q->next = h;
+ q->prev = h->prev;
+ q->prev->next = q;
+ h->prev = q;
+}
-#define QUEUE_INSERT_TAIL(h, q) \
- do { \
- QUEUE_NEXT(q) = (h); \
- QUEUE_PREV(q) = QUEUE_PREV(h); \
- QUEUE_PREV_NEXT(q) = (q); \
- QUEUE_PREV(h) = (q); \
- } \
- while (0)
-
-#define QUEUE_REMOVE(q) \
- do { \
- QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q); \
- QUEUE_NEXT_PREV(q) = QUEUE_PREV(q); \
- } \
- while (0)
+static inline void uv__queue_remove(struct uv__queue* q) {
+ q->prev->next = q->next;
+ q->next->prev = q->prev;
+}
#endif /* QUEUE_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/thread-common.cpp b/wpinet/src/main/native/thirdparty/libuv/src/thread-common.cpp
new file mode 100644
index 0000000..c0e39b5
--- /dev/null
+++ b/wpinet/src/main/native/thirdparty/libuv/src/thread-common.cpp
@@ -0,0 +1,175 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#include <stdlib.h>
+#ifndef _WIN32
+#include <pthread.h>
+#endif
+
+#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
+STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
+#endif
+
+/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
+#if defined(_AIX) || \
+ defined(__OpenBSD__) || \
+ !defined(PTHREAD_BARRIER_SERIAL_THREAD)
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+ int rc;
+#ifdef _WIN32
+ uv_barrier_t* b;
+ b = barrier;
+
+ if (barrier == NULL || count == 0)
+ return UV_EINVAL;
+#else
+ struct _uv_barrier* b;
+
+ if (barrier == NULL || count == 0)
+ return UV_EINVAL;
+
+ b = (struct _uv_barrier *)uv__malloc(sizeof(*b));
+ if (b == NULL)
+ return UV_ENOMEM;
+#endif
+
+ b->in = 0;
+ b->out = 0;
+ b->threshold = count;
+
+ rc = uv_mutex_init(&b->mutex);
+ if (rc != 0)
+ goto error2;
+
+ /* TODO(vjnash): remove these uv_cond_t casts in v2. */
+ rc = uv_cond_init((uv_cond_t*) &b->cond);
+ if (rc != 0)
+ goto error;
+
+#ifndef _WIN32
+ barrier->b = b;
+#endif
+ return 0;
+
+error:
+ uv_mutex_destroy(&b->mutex);
+error2:
+#ifndef _WIN32
+ uv__free(b);
+#endif
+ return rc;
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+ int last;
+#ifdef _WIN32
+ uv_barrier_t* b;
+ b = barrier;
+#else
+ struct _uv_barrier* b;
+
+ if (barrier == NULL || barrier->b == NULL)
+ return UV_EINVAL;
+
+ b = barrier->b;
+#endif
+
+ uv_mutex_lock(&b->mutex);
+
+ while (b->out != 0)
+ uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
+
+ if (++b->in == b->threshold) {
+ b->in = 0;
+ b->out = b->threshold;
+ uv_cond_broadcast((uv_cond_t*) &b->cond);
+ } else {
+ do
+ uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
+ while (b->in != 0);
+ }
+
+ last = (--b->out == 0);
+ if (last)
+ uv_cond_broadcast((uv_cond_t*) &b->cond);
+
+ uv_mutex_unlock(&b->mutex);
+ return last;
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+#ifdef _WIN32
+ uv_barrier_t* b;
+ b = barrier;
+#else
+ struct _uv_barrier* b;
+ b = barrier->b;
+#endif
+
+ uv_mutex_lock(&b->mutex);
+
+ assert(b->in == 0);
+ while (b->out != 0)
+ uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
+
+ if (b->in != 0)
+ abort();
+
+ uv_mutex_unlock(&b->mutex);
+ uv_mutex_destroy(&b->mutex);
+ uv_cond_destroy((uv_cond_t*) &b->cond);
+
+#ifndef _WIN32
+ uv__free(barrier->b);
+ barrier->b = NULL;
+#endif
+}
+
+#else
+
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+ return UV__ERR(pthread_barrier_init(barrier, NULL, count));
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+ int rc;
+
+ rc = pthread_barrier_wait(barrier);
+ if (rc != 0)
+ if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
+ abort();
+
+ return rc == PTHREAD_BARRIER_SERIAL_THREAD;
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+ if (pthread_barrier_destroy(barrier))
+ abort();
+}
+
+#endif
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/threadpool.cpp b/wpinet/src/main/native/thirdparty/libuv/src/threadpool.cpp
index 718972c..aa282af 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/threadpool.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/threadpool.cpp
@@ -41,10 +41,10 @@
static unsigned int nthreads;
static uv_thread_t* threads;
static uv_thread_t default_threads[4];
-static QUEUE exit_message;
-static QUEUE wq;
-static QUEUE run_slow_work_message;
-static QUEUE slow_io_pending_wq;
+static struct uv__queue exit_message;
+static struct uv__queue wq;
+static struct uv__queue run_slow_work_message;
+static struct uv__queue slow_io_pending_wq;
static unsigned int slow_work_thread_threshold(void) {
return (nthreads + 1) / 2;
@@ -60,7 +60,7 @@
*/
static void worker(void* arg) {
struct uv__work* w;
- QUEUE* q;
+ struct uv__queue* q;
int is_slow_work;
uv_sem_post((uv_sem_t*) arg);
@@ -72,49 +72,49 @@
/* Keep waiting while either no work is present or only slow I/O
and we're at the threshold for that. */
- while (QUEUE_EMPTY(&wq) ||
- (QUEUE_HEAD(&wq) == &run_slow_work_message &&
- QUEUE_NEXT(&run_slow_work_message) == &wq &&
+ while (uv__queue_empty(&wq) ||
+ (uv__queue_head(&wq) == &run_slow_work_message &&
+ uv__queue_next(&run_slow_work_message) == &wq &&
slow_io_work_running >= slow_work_thread_threshold())) {
idle_threads += 1;
uv_cond_wait(&cond, &mutex);
idle_threads -= 1;
}
- q = QUEUE_HEAD(&wq);
+ q = uv__queue_head(&wq);
if (q == &exit_message) {
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
break;
}
- QUEUE_REMOVE(q);
- QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */
+ uv__queue_remove(q);
+ uv__queue_init(q); /* Signal uv_cancel() that the work req is executing. */
is_slow_work = 0;
if (q == &run_slow_work_message) {
/* If we're at the slow I/O threshold, re-schedule until after all
other work in the queue is done. */
if (slow_io_work_running >= slow_work_thread_threshold()) {
- QUEUE_INSERT_TAIL(&wq, q);
+ uv__queue_insert_tail(&wq, q);
continue;
}
/* If we encountered a request to run slow I/O work but there is none
to run, that means it's cancelled => Start over. */
- if (QUEUE_EMPTY(&slow_io_pending_wq))
+ if (uv__queue_empty(&slow_io_pending_wq))
continue;
is_slow_work = 1;
slow_io_work_running++;
- q = QUEUE_HEAD(&slow_io_pending_wq);
- QUEUE_REMOVE(q);
- QUEUE_INIT(q);
+ q = uv__queue_head(&slow_io_pending_wq);
+ uv__queue_remove(q);
+ uv__queue_init(q);
/* If there is more slow I/O work, schedule it to be run as well. */
- if (!QUEUE_EMPTY(&slow_io_pending_wq)) {
- QUEUE_INSERT_TAIL(&wq, &run_slow_work_message);
+ if (!uv__queue_empty(&slow_io_pending_wq)) {
+ uv__queue_insert_tail(&wq, &run_slow_work_message);
if (idle_threads > 0)
uv_cond_signal(&cond);
}
@@ -122,13 +122,13 @@
uv_mutex_unlock(&mutex);
- w = QUEUE_DATA(q, struct uv__work, wq);
+ w = uv__queue_data(q, struct uv__work, wq);
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
- QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
+ uv__queue_insert_tail(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
@@ -143,12 +143,12 @@
}
-static void post(QUEUE* q, enum uv__work_kind kind) {
+static void post(struct uv__queue* q, enum uv__work_kind kind) {
uv_mutex_lock(&mutex);
if (kind == UV__WORK_SLOW_IO) {
/* Insert into a separate queue. */
- QUEUE_INSERT_TAIL(&slow_io_pending_wq, q);
- if (!QUEUE_EMPTY(&run_slow_work_message)) {
+ uv__queue_insert_tail(&slow_io_pending_wq, q);
+ if (!uv__queue_empty(&run_slow_work_message)) {
/* Running slow I/O tasks is already scheduled => Nothing to do here.
The worker that runs said other task will schedule this one as well. */
uv_mutex_unlock(&mutex);
@@ -157,7 +157,7 @@
q = &run_slow_work_message;
}
- QUEUE_INSERT_TAIL(&wq, q);
+ uv__queue_insert_tail(&wq, q);
if (idle_threads > 0)
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
@@ -195,6 +195,7 @@
static void init_threads(void) {
+ uv_thread_options_t config;
unsigned int i;
const char* val;
uv_sem_t sem;
@@ -223,15 +224,18 @@
if (uv_mutex_init(&mutex))
abort();
- QUEUE_INIT(&wq);
- QUEUE_INIT(&slow_io_pending_wq);
- QUEUE_INIT(&run_slow_work_message);
+ uv__queue_init(&wq);
+ uv__queue_init(&slow_io_pending_wq);
+ uv__queue_init(&run_slow_work_message);
if (uv_sem_init(&sem, 0))
abort();
+ config.flags = UV_THREAD_HAS_STACK_SIZE;
+ config.stack_size = 8u << 20; /* 8 MB */
+
for (i = 0; i < nthreads; i++)
- if (uv_thread_create(threads + i, worker, &sem))
+ if (uv_thread_create_ex(threads + i, &config, worker, &sem))
abort();
for (i = 0; i < nthreads; i++)
@@ -275,15 +279,19 @@
}
+/* TODO(bnoordhuis) teach libuv how to cancel file operations
+ * that go through io_uring instead of the thread pool.
+ */
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
int cancelled;
+ uv_once(&once, init_once); /* Ensure |mutex| is initialized. */
uv_mutex_lock(&mutex);
uv_mutex_lock(&w->loop->wq_mutex);
- cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
+ cancelled = !uv__queue_empty(&w->wq) && w->work != NULL;
if (cancelled)
- QUEUE_REMOVE(&w->wq);
+ uv__queue_remove(&w->wq);
uv_mutex_unlock(&w->loop->wq_mutex);
uv_mutex_unlock(&mutex);
@@ -293,7 +301,7 @@
w->work = uv__cancelled;
uv_mutex_lock(&loop->wq_mutex);
- QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
+ uv__queue_insert_tail(&loop->wq, &w->wq);
uv_async_send(&loop->wq_async);
uv_mutex_unlock(&loop->wq_mutex);
@@ -304,22 +312,39 @@
void uv__work_done(uv_async_t* handle) {
struct uv__work* w;
uv_loop_t* loop;
- QUEUE* q;
- QUEUE wq;
+ struct uv__queue* q;
+ struct uv__queue wq;
int err;
+ int nevents;
loop = container_of(handle, uv_loop_t, wq_async);
uv_mutex_lock(&loop->wq_mutex);
- QUEUE_MOVE(&loop->wq, &wq);
+ uv__queue_move(&loop->wq, &wq);
uv_mutex_unlock(&loop->wq_mutex);
- while (!QUEUE_EMPTY(&wq)) {
- q = QUEUE_HEAD(&wq);
- QUEUE_REMOVE(q);
+ nevents = 0;
+
+ while (!uv__queue_empty(&wq)) {
+ q = uv__queue_head(&wq);
+ uv__queue_remove(q);
w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
w->done(w, err);
+ nevents++;
+ }
+
+ /* This check accomplishes 2 things:
+ * 1. Even if the queue was empty, the call to uv__work_done() should count
+ * as an event. Which will have been added by the event loop when
+ * calling this callback.
+ * 2. Prevents accidental wrap around in case nevents == 0 events == 0.
+ */
+ if (nevents > 1) {
+ /* Subtract 1 to counter the call to uv__work_done(). */
+ uv__metrics_inc_events(loop, nevents - 1);
+ if (uv__get_internal_fields(loop)->current_timeout == 0)
+ uv__metrics_inc_events_waiting(loop, nevents - 1);
}
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/async.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/async.cpp
index e1805c3..fef4ae9 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/async.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/async.cpp
@@ -24,7 +24,6 @@
#include "uv.h"
#include "internal.h"
-#include "atomic-ops.h"
#include <errno.h>
#include <stdio.h> /* snprintf() */
@@ -38,8 +37,11 @@
#include <sys/eventfd.h>
#endif
+#include <atomic>
+
static void uv__async_send(uv_loop_t* loop);
static int uv__async_start(uv_loop_t* loop);
+static void uv__cpu_relax(void);
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
@@ -52,8 +54,9 @@
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
handle->async_cb = async_cb;
handle->pending = 0;
+ handle->u.fd = 0; /* This will be used as a busy flag. */
- QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
+ uv__queue_insert_tail(&loop->async_handles, &handle->queue);
uv__handle_start(handle);
return 0;
@@ -61,46 +64,54 @@
int uv_async_send(uv_async_t* handle) {
+ std::atomic<int>* pending;
+ std::atomic<int>* busy;
+
+ pending = (std::atomic<int>*) &handle->pending;
+ busy = (std::atomic<int>*) &handle->u.fd;
+
/* Do a cheap read first. */
- if (ACCESS_ONCE(int, handle->pending) != 0)
+ if (atomic_load_explicit(pending, std::memory_order_relaxed) != 0)
return 0;
- /* Tell the other thread we're busy with the handle. */
- if (cmpxchgi(&handle->pending, 0, 1) != 0)
- return 0;
+ /* Set the loop to busy. */
+ atomic_fetch_add(busy, 1);
/* Wake up the other thread's event loop. */
- uv__async_send(handle->loop);
+ if (atomic_exchange(pending, 1) == 0)
+ uv__async_send(handle->loop);
- /* Tell the other thread we're done. */
- if (cmpxchgi(&handle->pending, 1, 2) != 1)
- abort();
+ /* Set the loop to not-busy. */
+ atomic_fetch_add(busy, -1);
return 0;
}
-/* Only call this from the event loop thread. */
-static int uv__async_spin(uv_async_t* handle) {
+/* Wait for the busy flag to clear before closing.
+ * Only call this from the event loop thread. */
+static void uv__async_spin(uv_async_t* handle) {
+ std::atomic<int>* pending;
+ std::atomic<int>* busy;
int i;
- int rc;
+
+ pending = (std::atomic<int>*) &handle->pending;
+ busy = (std::atomic<int>*) &handle->u.fd;
+
+ /* Set the pending flag first, so no new events will be added by other
+ * threads after this function returns. */
+ atomic_store(pending, 1);
for (;;) {
- /* 997 is not completely chosen at random. It's a prime number, acyclical
- * by nature, and should therefore hopefully dampen sympathetic resonance.
+ /* 997 is not completely chosen at random. It's a prime number, acyclic by
+ * nature, and should therefore hopefully dampen sympathetic resonance.
*/
for (i = 0; i < 997; i++) {
- /* rc=0 -- handle is not pending.
- * rc=1 -- handle is pending, other thread is still working with it.
- * rc=2 -- handle is pending, other thread is done.
- */
- rc = cmpxchgi(&handle->pending, 2, 0);
-
- if (rc != 1)
- return rc;
+ if (atomic_load(busy) == 0)
+ return;
/* Other thread is busy with this handle, spin until it's done. */
- cpu_relax();
+ uv__cpu_relax();
}
/* Yield the CPU. We may have preempted the other thread while it's
@@ -114,7 +125,7 @@
void uv__async_close(uv_async_t* handle) {
uv__async_spin(handle);
- QUEUE_REMOVE(&handle->queue);
+ uv__queue_remove(&handle->queue);
uv__handle_stop(handle);
}
@@ -122,9 +133,10 @@
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
char buf[1024];
ssize_t r;
- QUEUE queue;
- QUEUE* q;
+ struct uv__queue queue;
+ struct uv__queue* q;
uv_async_t* h;
+ std::atomic<int> *pending;
assert(w == &loop->async_io_watcher);
@@ -146,16 +158,18 @@
abort();
}
- QUEUE_MOVE(&loop->async_handles, &queue);
- while (!QUEUE_EMPTY(&queue)) {
- q = QUEUE_HEAD(&queue);
- h = QUEUE_DATA(q, uv_async_t, queue);
+ uv__queue_move(&loop->async_handles, &queue);
+ while (!uv__queue_empty(&queue)) {
+ q = uv__queue_head(&queue);
+ h = uv__queue_data(q, uv_async_t, queue);
- QUEUE_REMOVE(q);
- QUEUE_INSERT_TAIL(&loop->async_handles, q);
+ uv__queue_remove(q);
+ uv__queue_insert_tail(&loop->async_handles, q);
- if (0 == uv__async_spin(h))
- continue; /* Not pending. */
+ /* Atomically fetch and clear pending flag */
+ pending = (std::atomic<int>*) &h->pending;
+ if (atomic_exchange(pending, 0) == 0)
+ continue;
if (h->async_cb == NULL)
continue;
@@ -227,20 +241,28 @@
}
-int uv__async_fork(uv_loop_t* loop) {
- if (loop->async_io_watcher.fd == -1) /* never started */
- return 0;
-
- uv__async_stop(loop);
-
- return uv__async_start(loop);
-}
-
-
void uv__async_stop(uv_loop_t* loop) {
+ struct uv__queue queue;
+ struct uv__queue* q;
+ uv_async_t* h;
+
if (loop->async_io_watcher.fd == -1)
return;
+ /* Make sure no other thread is accessing the async handle fd after the loop
+ * cleanup.
+ */
+ uv__queue_move(&loop->async_handles, &queue);
+ while (!uv__queue_empty(&queue)) {
+ q = uv__queue_head(&queue);
+ h = uv__queue_data(q, uv_async_t, queue);
+
+ uv__queue_remove(q);
+ uv__queue_insert_tail(&loop->async_handles, q);
+
+ uv__async_spin(h);
+ }
+
if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd);
@@ -251,3 +273,58 @@
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
}
+
+
+int uv__async_fork(uv_loop_t* loop) {
+ struct uv__queue queue;
+ struct uv__queue* q;
+ uv_async_t* h;
+
+ if (loop->async_io_watcher.fd == -1) /* never started */
+ return 0;
+
+ uv__queue_move(&loop->async_handles, &queue);
+ while (!uv__queue_empty(&queue)) {
+ q = uv__queue_head(&queue);
+ h = uv__queue_data(q, uv_async_t, queue);
+
+ uv__queue_remove(q);
+ uv__queue_insert_tail(&loop->async_handles, q);
+
+ /* The state of any thread that set pending is now likely corrupt in this
+ * child because the user called fork, so just clear these flags and move
+ * on. Calling most libc functions after `fork` is declared to be undefined
+ * behavior anyways, unless async-signal-safe, for multithreaded programs
+ * like libuv, and nothing interesting in pthreads is async-signal-safe.
+ */
+ h->pending = 0;
+ /* This is the busy flag, and we just abruptly lost all other threads. */
+ h->u.fd = 0;
+ }
+
+ /* Recreate these, since they still exist, but belong to the wrong pid now. */
+ if (loop->async_wfd != -1) {
+ if (loop->async_wfd != loop->async_io_watcher.fd)
+ uv__close(loop->async_wfd);
+ loop->async_wfd = -1;
+ }
+
+ uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
+ uv__close(loop->async_io_watcher.fd);
+ loop->async_io_watcher.fd = -1;
+
+ return uv__async_start(loop);
+}
+
+
+static void uv__cpu_relax(void) {
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
+#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
+ __asm__ __volatile__ ("yield" ::: "memory");
+#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
+ __asm volatile ("" : : : "memory");
+#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
+ __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
+#endif
+}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/atomic-ops.h b/wpinet/src/main/native/thirdparty/libuv/src/unix/atomic-ops.h
deleted file mode 100644
index 58043c4..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/atomic-ops.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef UV_ATOMIC_OPS_H_
-#define UV_ATOMIC_OPS_H_
-
-#include "internal.h" /* UV_UNUSED */
-
-#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-#include <atomic.h>
-#endif
-
-UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
-UV_UNUSED(static void cpu_relax(void));
-
-/* Prefer hand-rolled assembly over the gcc builtins because the latter also
- * issue full memory barriers.
- */
-UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
-#if defined(__i386__) || defined(__x86_64__)
- int out;
- __asm__ __volatile__ ("lock; cmpxchg %2, %1;"
- : "=a" (out), "+m" (*(volatile int*) ptr)
- : "r" (newval), "0" (oldval)
- : "memory");
- return out;
-#elif defined(__MVS__)
- /* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
- * a runtime bug.
- */
- __asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
- return oldval;
-#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
-#else
- return __sync_val_compare_and_swap(ptr, oldval, newval);
-#endif
-}
-
-UV_UNUSED(static void cpu_relax(void)) {
-#if defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
-#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
- __asm__ __volatile__ ("yield" ::: "memory");
-#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
- __asm volatile ("" : : : "memory");
-#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
- __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
-#endif
-}
-
-#endif /* UV_ATOMIC_OPS_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/core.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/core.cpp
index 4c23f60..ce7fd2c 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/core.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/core.cpp
@@ -41,12 +41,14 @@
#include <sys/uio.h> /* writev */
#include <sys/resource.h> /* getrusage */
#include <pwd.h>
+#include <grp.h>
#include <sys/utsname.h>
#include <sys/time.h>
+#include <time.h> /* clock_gettime */
+#include <atomic>
#ifdef __sun
# include <sys/filio.h>
-# include <sys/types.h>
# include <sys/wait.h>
#endif
@@ -66,13 +68,14 @@
#if defined(__DragonFly__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__)
# include <sys/sysctl.h>
# include <sys/filio.h>
# include <sys/wait.h>
+# include <sys/param.h>
# if defined(__FreeBSD__)
+# include <sys/cpuset.h>
# define uv__accept4 accept4
# endif
# if defined(__NetBSD__)
@@ -107,6 +110,35 @@
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
+/* https://github.com/libuv/libuv/issues/1674 */
+int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
+ struct timespec t;
+ int r;
+
+ if (ts == NULL)
+ return UV_EFAULT;
+
+ switch (clock_id) {
+ default:
+ return UV_EINVAL;
+ case UV_CLOCK_MONOTONIC:
+ r = clock_gettime(CLOCK_MONOTONIC, &t);
+ break;
+ case UV_CLOCK_REALTIME:
+ r = clock_gettime(CLOCK_REALTIME, &t);
+ break;
+ }
+
+ if (r)
+ return UV__ERR(errno);
+
+ ts->tv_sec = t.tv_sec;
+ ts->tv_nsec = t.tv_nsec;
+
+ return 0;
+}
+
+
uint64_t uv_hrtime(void) {
return uv__hrtime(UV_CLOCK_PRECISE);
}
@@ -232,10 +264,10 @@
#if defined(IOV_MAX)
return IOV_MAX;
#elif defined(_SC_IOV_MAX)
- static int iovmax_cached = -1;
+ static std::atomic<int> iovmax_cached = -1;
int iovmax;
- iovmax = uv__load_relaxed(&iovmax_cached);
+ iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
if (iovmax != -1)
return iovmax;
@@ -247,7 +279,7 @@
if (iovmax == -1)
iovmax = 1;
- uv__store_relaxed(&iovmax_cached, iovmax);
+ atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
return iovmax;
#else
@@ -313,7 +345,7 @@
}
uv__handle_unref(handle);
- QUEUE_REMOVE(&handle->handle_queue);
+ uv__queue_remove(&handle->handle_queue);
if (handle->close_cb) {
handle->close_cb(handle);
@@ -349,7 +381,7 @@
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
- !QUEUE_EMPTY(&loop->pending_queue) ||
+ !uv__queue_empty(&loop->pending_queue) ||
loop->closing_handles != NULL;
}
@@ -358,8 +390,9 @@
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
- QUEUE_EMPTY(&loop->pending_queue) &&
- QUEUE_EMPTY(&loop->idle_handles) &&
+ uv__queue_empty(&loop->pending_queue) &&
+ uv__queue_empty(&loop->idle_handles) &&
+ (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
loop->closing_handles == NULL)
return uv__next_timeout(loop);
return 0;
@@ -367,7 +400,7 @@
int uv_backend_timeout(const uv_loop_t* loop) {
- if (QUEUE_EMPTY(&loop->watcher_queue))
+ if (uv__queue_empty(&loop->watcher_queue))
return uv__backend_timeout(loop);
/* Need to call uv_run to update the backend fd state. */
return 0;
@@ -388,12 +421,19 @@
if (!r)
uv__update_time(loop);
- while (r != 0 && loop->stop_flag == 0) {
+ /* Maintain backwards compatibility by processing timers before entering the
+ * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
+ * once, which should be done after polling in order to maintain proper
+ * execution order of the conceptual event loop. */
+ if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
uv__update_time(loop);
uv__run_timers(loop);
+ }
+ while (r != 0 && loop->stop_flag == 0) {
can_sleep =
- QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
+ uv__queue_empty(&loop->pending_queue) &&
+ uv__queue_empty(&loop->idle_handles);
uv__run_pending(loop);
uv__run_idle(loop);
@@ -403,11 +443,13 @@
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv__backend_timeout(loop);
+ uv__metrics_inc_loop_count(loop);
+
uv__io_poll(loop, timeout);
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
* times to avoid loop starvation.*/
- for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++)
+ for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
uv__run_pending(loop);
/* Run one final update on the provider_idle_time in case uv__io_poll
@@ -420,18 +462,8 @@
uv__run_check(loop);
uv__run_closing_handles(loop);
- if (mode == UV_RUN_ONCE) {
- /* UV_RUN_ONCE implies forward progress: at least one callback must have
- * been invoked when it returns. uv__io_poll() can return without doing
- * I/O (meaning: no callbacks) when its timeout expires - which means we
- * have pending timers that satisfy the forward progress constraint.
- *
- * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
- * the check.
- */
- uv__update_time(loop);
- uv__run_timers(loop);
- }
+ uv__update_time(loop);
+ uv__run_timers(loop);
r = uv__loop_alive(loop);
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
@@ -805,17 +837,17 @@
static void uv__run_pending(uv_loop_t* loop) {
- QUEUE* q;
- QUEUE pq;
+ struct uv__queue* q;
+ struct uv__queue pq;
uv__io_t* w;
- QUEUE_MOVE(&loop->pending_queue, &pq);
+ uv__queue_move(&loop->pending_queue, &pq);
- while (!QUEUE_EMPTY(&pq)) {
- q = QUEUE_HEAD(&pq);
- QUEUE_REMOVE(q);
- QUEUE_INIT(q);
- w = QUEUE_DATA(q, uv__io_t, pending_queue);
+ while (!uv__queue_empty(&pq)) {
+ q = uv__queue_head(&pq);
+ uv__queue_remove(q);
+ uv__queue_init(q);
+ w = uv__queue_data(q, uv__io_t, pending_queue);
w->cb(loop, w, POLLOUT);
}
}
@@ -862,7 +894,7 @@
watchers[nwatchers] = fake_watcher_list;
watchers[nwatchers + 1] = fake_watcher_count;
- loop->watchers = watchers;
+ loop->watchers = (uv__io_t**)watchers;
loop->nwatchers = nwatchers;
}
@@ -870,17 +902,12 @@
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
assert(cb != NULL);
assert(fd >= -1);
- QUEUE_INIT(&w->pending_queue);
- QUEUE_INIT(&w->watcher_queue);
+ uv__queue_init(&w->pending_queue);
+ uv__queue_init(&w->watcher_queue);
w->cb = cb;
w->fd = fd;
w->events = 0;
w->pevents = 0;
-
-#if defined(UV_HAVE_KQUEUE)
- w->rcount = 0;
- w->wcount = 0;
-#endif /* defined(UV_HAVE_KQUEUE) */
}
@@ -902,8 +929,8 @@
return;
#endif
- if (QUEUE_EMPTY(&w->watcher_queue))
- QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+ if (uv__queue_empty(&w->watcher_queue))
+ uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
if (loop->watchers[w->fd] == NULL) {
loop->watchers[w->fd] = w;
@@ -928,8 +955,8 @@
w->pevents &= ~events;
if (w->pevents == 0) {
- QUEUE_REMOVE(&w->watcher_queue);
- QUEUE_INIT(&w->watcher_queue);
+ uv__queue_remove(&w->watcher_queue);
+ uv__queue_init(&w->watcher_queue);
w->events = 0;
if (w == loop->watchers[w->fd]) {
@@ -938,14 +965,14 @@
loop->nfds--;
}
}
- else if (QUEUE_EMPTY(&w->watcher_queue))
- QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+ else if (uv__queue_empty(&w->watcher_queue))
+ uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
- QUEUE_REMOVE(&w->pending_queue);
+ uv__queue_remove(&w->pending_queue);
/* Remove stale events for this file descriptor */
if (w->fd != -1)
@@ -954,8 +981,8 @@
void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
- if (QUEUE_EMPTY(&w->pending_queue))
- QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
+ if (uv__queue_empty(&w->pending_queue))
+ uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
}
@@ -1000,6 +1027,15 @@
rusage->ru_nivcsw = usage.ru_nivcsw;
#endif
+ /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
+ * the outliers because of course they are.
+ */
+#if defined(__APPLE__)
+ rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */
+#elif defined(__sun)
+ rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
+#endif
+
return 0;
}
@@ -1099,8 +1135,8 @@
if (r != UV_ENOENT)
return r;
- /* HOME is not set, so call uv__getpwuid_r() */
- r = uv__getpwuid_r(&pwd);
+ /* HOME is not set, so call uv_os_get_passwd() */
+ r = uv_os_get_passwd(&pwd);
if (r != 0) {
return r;
@@ -1173,11 +1209,10 @@
}
-int uv__getpwuid_r(uv_passwd_t* pwd) {
+static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
struct passwd pw;
struct passwd* result;
char* buf;
- uid_t uid;
size_t bufsize;
size_t name_size;
size_t homedir_size;
@@ -1187,8 +1222,6 @@
if (pwd == NULL)
return UV_EINVAL;
- uid = geteuid();
-
/* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
* is frequently 1024 or 4096, so we can just use that directly. The pwent
* will not usually be large. */
@@ -1247,24 +1280,98 @@
}
-void uv_os_free_passwd(uv_passwd_t* pwd) {
- if (pwd == NULL)
- return;
+int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
+#if defined(__ANDROID__) && __ANDROID_API__ < 24
+ /* This function getgrgid_r() was added in Android N (level 24) */
+ return UV_ENOSYS;
+#else
+ struct group gp;
+ struct group* result;
+ char* buf;
+ char* gr_mem;
+ size_t bufsize;
+ size_t name_size;
+ long members;
+ size_t mem_size;
+ int r;
- /*
- The memory for name, shell, and homedir are allocated in a single
- uv__malloc() call. The base of the pointer is stored in pwd->username, so
- that is the field that needs to be freed.
- */
- uv__free(pwd->username);
- pwd->username = NULL;
- pwd->shell = NULL;
- pwd->homedir = NULL;
+ if (grp == NULL)
+ return UV_EINVAL;
+
+ /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
+ * is frequently 1024 or 4096, so we can just use that directly. The pwent
+ * will not usually be large. */
+ for (bufsize = 2000;; bufsize *= 2) {
+ buf = (char*)uv__malloc(bufsize);
+
+ if (buf == NULL)
+ return UV_ENOMEM;
+
+ do
+ r = getgrgid_r(gid, &gp, buf, bufsize, &result);
+ while (r == EINTR);
+
+ if (r != 0 || result == NULL)
+ uv__free(buf);
+
+ if (r != ERANGE)
+ break;
+ }
+
+ if (r != 0)
+ return UV__ERR(r);
+
+ if (result == NULL)
+ return UV_ENOENT;
+
+ /* Allocate memory for the groupname and members. */
+ name_size = strlen(gp.gr_name) + 1;
+ members = 0;
+ mem_size = sizeof(char*);
+ for (r = 0; gp.gr_mem[r] != NULL; r++) {
+ mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
+ members++;
+ }
+
+ gr_mem = (char*)uv__malloc(name_size + mem_size);
+ if (gr_mem == NULL) {
+ uv__free(buf);
+ return UV_ENOMEM;
+ }
+
+ /* Copy the members */
+ grp->members = (char**) gr_mem;
+ grp->members[members] = NULL;
+ gr_mem = (char*) &grp->members[members + 1];
+ for (r = 0; r < members; r++) {
+ grp->members[r] = gr_mem;
+ strcpy(gr_mem, gp.gr_mem[r]);
+ gr_mem += strlen(gr_mem) + 1;
+ }
+ assert(gr_mem == (char*)grp->members + mem_size);
+
+ /* Copy the groupname */
+ grp->groupname = gr_mem;
+ memcpy(grp->groupname, gp.gr_name, name_size);
+ gr_mem += name_size;
+
+ /* Copy the gid */
+ grp->gid = gp.gr_gid;
+
+ uv__free(buf);
+
+ return 0;
+#endif
}
int uv_os_get_passwd(uv_passwd_t* pwd) {
- return uv__getpwuid_r(pwd);
+ return uv__getpwuid_r(pwd, geteuid());
+}
+
+
+int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
+ return uv__getpwuid_r(pwd, uid);
}
@@ -1425,6 +1532,13 @@
return getppid();
}
+int uv_cpumask_size(void) {
+#if UV__CPU_AFFINITY_SUPPORTED
+ return CPU_SETSIZE;
+#else
+ return UV_ENOTSUP;
+#endif
+}
int uv_os_getpriority(uv_pid_t pid, int* priority) {
int r;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/cygwin.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/cygwin.cpp
index 169958d..4e54139 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/cygwin.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/cygwin.cpp
@@ -51,3 +51,7 @@
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
+
+uint64_t uv_get_available_memory(void) {
+ return uv_get_free_memory();
+}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin-stub.h b/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin-stub.h
index 433e3ef..b93cf67 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin-stub.h
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin-stub.h
@@ -27,7 +27,6 @@
struct CFArrayCallBacks;
struct CFRunLoopSourceContext;
struct FSEventStreamContext;
-struct CFRange;
typedef double CFAbsoluteTime;
typedef double CFTimeInterval;
@@ -43,23 +42,13 @@
typedef void* CFAllocatorRef;
typedef void* CFArrayRef;
typedef void* CFBundleRef;
-typedef void* CFDataRef;
typedef void* CFDictionaryRef;
-typedef void* CFMutableDictionaryRef;
-typedef struct CFRange CFRange;
typedef void* CFRunLoopRef;
typedef void* CFRunLoopSourceRef;
typedef void* CFStringRef;
typedef void* CFTypeRef;
typedef void* FSEventStreamRef;
-typedef uint32_t IOOptionBits;
-typedef unsigned int io_iterator_t;
-typedef unsigned int io_object_t;
-typedef unsigned int io_service_t;
-typedef unsigned int io_registry_entry_t;
-
-
typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
void*,
size_t,
@@ -80,11 +69,6 @@
void* pad[3];
};
-struct CFRange {
- CFIndex location;
- CFIndex length;
-};
-
static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
static const OSStatus noErr = 0;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin.cpp
index ed51a6a..9ee5cd8 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/darwin.cpp
@@ -33,13 +33,10 @@
#include <sys/sysctl.h>
#include <unistd.h> /* sysconf */
-#include "darwin-stub.h"
-
static uv_once_t once = UV_ONCE_INIT;
static uint64_t (*time_func)(void);
static mach_timebase_info_data_t timebase;
-typedef unsigned char UInt8;
int uv__platform_loop_init(uv_loop_t* loop) {
loop->cf_state = NULL;
@@ -110,7 +107,7 @@
if (host_statistics(mach_host_self(), HOST_VM_INFO,
(host_info_t)&info, &count) != KERN_SUCCESS) {
- return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
+ return 0;
}
return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
@@ -123,7 +120,7 @@
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
- return UV__ERR(errno);
+ return 0;
return (uint64_t) info;
}
@@ -134,6 +131,11 @@
}
+uint64_t uv_get_available_memory(void) {
+ return uv_get_free_memory();
+}
+
+
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
@@ -183,164 +185,17 @@
return 0;
}
-static int uv__get_cpu_speed(uint64_t* speed) {
- /* IOKit */
- void (*pIOObjectRelease)(io_object_t);
- kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*);
- CFMutableDictionaryRef (*pIOServiceMatching)(const char*);
- kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t,
- CFMutableDictionaryRef,
- io_iterator_t*);
- io_service_t (*pIOIteratorNext)(io_iterator_t);
- CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t,
- CFStringRef,
- CFAllocatorRef,
- IOOptionBits);
-
- /* CoreFoundation */
- CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
- const char*,
- CFStringEncoding);
- CFStringEncoding (*pCFStringGetSystemEncoding)(void);
- UInt8 *(*pCFDataGetBytePtr)(CFDataRef);
- CFIndex (*pCFDataGetLength)(CFDataRef);
- void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*);
- void (*pCFRelease)(CFTypeRef);
-
- void* core_foundation_handle;
- void* iokit_handle;
- int err;
-
- kern_return_t kr;
- mach_port_t mach_port;
- io_iterator_t it;
- io_object_t service;
-
- mach_port = 0;
-
- err = UV_ENOENT;
- core_foundation_handle = dlopen("/System/Library/Frameworks/"
- "CoreFoundation.framework/"
- "CoreFoundation",
- RTLD_LAZY | RTLD_LOCAL);
- iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
- "IOKit",
- RTLD_LAZY | RTLD_LOCAL);
-
- if (core_foundation_handle == NULL || iokit_handle == NULL)
- goto out;
-
-#define V(handle, symbol) \
- do { \
- *(void **)(&p ## symbol) = dlsym((handle), #symbol); \
- if (p ## symbol == NULL) \
- goto out; \
- } \
- while (0)
- V(iokit_handle, IOMasterPort);
- V(iokit_handle, IOServiceMatching);
- V(iokit_handle, IOServiceGetMatchingServices);
- V(iokit_handle, IOIteratorNext);
- V(iokit_handle, IOObjectRelease);
- V(iokit_handle, IORegistryEntryCreateCFProperty);
- V(core_foundation_handle, CFStringCreateWithCString);
- V(core_foundation_handle, CFStringGetSystemEncoding);
- V(core_foundation_handle, CFDataGetBytePtr);
- V(core_foundation_handle, CFDataGetLength);
- V(core_foundation_handle, CFDataGetBytes);
- V(core_foundation_handle, CFRelease);
-#undef V
-
-#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
-
- // Braces ensure goto doesn't jump into device_type_str's and
- // clock_frequency_str's lifetimes after their initialization
- {
- kr = pIOMasterPort(MACH_PORT_NULL, &mach_port);
- (void) kr;
- assert(kr == KERN_SUCCESS);
- CFMutableDictionaryRef classes_to_match
- = pIOServiceMatching("IOPlatformDevice");
- kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it);
- assert(kr == KERN_SUCCESS);
- service = pIOIteratorNext(it);
-
- CFStringRef device_type_str = S("device_type");
- CFStringRef clock_frequency_str = S("clock-frequency");
-
- while (service != 0) {
- CFDataRef data;
- data = pIORegistryEntryCreateCFProperty(service,
- device_type_str,
- NULL,
- 0);
- if (data) {
- const UInt8* raw = pCFDataGetBytePtr(data);
- if (strncmp((char*)raw, "cpu", 3) == 0 ||
- strncmp((char*)raw, "processor", 9) == 0) {
- CFDataRef freq_ref;
- freq_ref = pIORegistryEntryCreateCFProperty(service,
- clock_frequency_str,
- NULL,
- 0);
- if (freq_ref) {
- const UInt8* freq_ref_ptr = pCFDataGetBytePtr(freq_ref);
- CFIndex len = pCFDataGetLength(freq_ref);
- if (len == 8)
- memcpy(speed, freq_ref_ptr, 8);
- else if (len == 4) {
- uint32_t v;
- memcpy(&v, freq_ref_ptr, 4);
- *speed = v;
- } else {
- *speed = 0;
- }
-
- pCFRelease(freq_ref);
- pCFRelease(data);
- break;
- }
- }
- pCFRelease(data);
- }
-
- service = pIOIteratorNext(it);
- }
-
- pIOObjectRelease(it);
-
- err = 0;
-
- if (device_type_str != NULL)
- pCFRelease(device_type_str);
- if (clock_frequency_str != NULL)
- pCFRelease(clock_frequency_str);
- }
-
-out:
- if (core_foundation_handle != NULL)
- dlclose(core_foundation_handle);
-
- if (iokit_handle != NULL)
- dlclose(iokit_handle);
-
- mach_port_deallocate(mach_task_self(), mach_port);
-
- return err;
-}
-
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks);
char model[512];
+ uint64_t cpuspeed;
size_t size;
unsigned int i;
natural_t numcpus;
mach_msg_type_number_t msg_type;
processor_cpu_load_info_data_t *info;
uv_cpu_info_t* cpu_info;
- uint64_t cpuspeed;
- int err;
size = sizeof(model);
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
@@ -348,9 +203,13 @@
return UV__ERR(errno);
}
- err = uv__get_cpu_speed(&cpuspeed);
- if (err < 0)
- return err;
+ cpuspeed = 0;
+ size = sizeof(cpuspeed);
+ sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0);
+ if (cpuspeed == 0)
+ /* If sysctl hw.cputype == CPU_TYPE_ARM64, the correct value is unavailable
+ * from Apple, but we can hard-code it here to a plausible value. */
+ cpuspeed = 2400000000;
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
(processor_info_array_t*)&info,
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/epoll.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/epoll.cpp
deleted file mode 100644
index 4c057fb..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/epoll.cpp
+++ /dev/null
@@ -1,422 +0,0 @@
-/* Copyright libuv contributors. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "uv.h"
-#include "internal.h"
-#include <errno.h>
-#include <sys/epoll.h>
-
-int uv__epoll_init(uv_loop_t* loop) {
- int fd;
- fd = epoll_create1(O_CLOEXEC);
-
- /* epoll_create1() can fail either because it's not implemented (old kernel)
- * or because it doesn't understand the O_CLOEXEC flag.
- */
- if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
- fd = epoll_create(256);
-
- if (fd != -1)
- uv__cloexec(fd, 1);
- }
-
- loop->backend_fd = fd;
- if (fd == -1)
- return UV__ERR(errno);
-
- return 0;
-}
-
-
-void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
- struct epoll_event* events;
- struct epoll_event dummy;
- uintptr_t i;
- uintptr_t nfds;
-
- assert(loop->watchers != NULL);
- assert(fd >= 0);
-
- events = (struct epoll_event*) loop->watchers[loop->nwatchers];
- nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
- if (events != NULL)
- /* Invalidate events with same file descriptor */
- for (i = 0; i < nfds; i++)
- if (events[i].data.fd == fd)
- events[i].data.fd = -1;
-
- /* Remove the file descriptor from the epoll.
- * This avoids a problem where the same file description remains open
- * in another process, causing repeated junk epoll events.
- *
- * We pass in a dummy epoll_event, to work around a bug in old kernels.
- */
- if (loop->backend_fd >= 0) {
- /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
- * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
- */
- memset(&dummy, 0, sizeof(dummy));
- epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
- }
-}
-
-
-int uv__io_check_fd(uv_loop_t* loop, int fd) {
- struct epoll_event e;
- int rc;
-
- memset(&e, 0, sizeof(e));
- e.events = POLLIN;
- e.data.fd = -1;
-
- rc = 0;
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
- if (errno != EEXIST)
- rc = UV__ERR(errno);
-
- if (rc == 0)
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
- abort();
-
- return rc;
-}
-
-
-void uv__io_poll(uv_loop_t* loop, int timeout) {
- /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
- * effectively infinite on 32 bits architectures. To avoid blocking
- * indefinitely, we cap the timeout and poll again if necessary.
- *
- * Note that "30 minutes" is a simplification because it depends on
- * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
- * that being the largest value I have seen in the wild (and only once.)
- */
- static const int max_safe_timeout = 1789569;
- static int no_epoll_pwait_cached;
- static int no_epoll_wait_cached;
- int no_epoll_pwait;
- int no_epoll_wait;
- struct epoll_event events[1024];
- struct epoll_event* pe;
- struct epoll_event e;
- int real_timeout;
- QUEUE* q;
- uv__io_t* w;
- sigset_t sigset;
- uint64_t sigmask;
- uint64_t base;
- int have_signals;
- int nevents;
- int count;
- int nfds;
- int fd;
- int op;
- int i;
- int user_timeout;
- int reset_timeout;
-
- if (loop->nfds == 0) {
- assert(QUEUE_EMPTY(&loop->watcher_queue));
- return;
- }
-
- memset(&e, 0, sizeof(e));
-
- while (!QUEUE_EMPTY(&loop->watcher_queue)) {
- q = QUEUE_HEAD(&loop->watcher_queue);
- QUEUE_REMOVE(q);
- QUEUE_INIT(q);
-
- w = QUEUE_DATA(q, uv__io_t, watcher_queue);
- assert(w->pevents != 0);
- assert(w->fd >= 0);
- assert(w->fd < (int) loop->nwatchers);
-
- e.events = w->pevents;
- e.data.fd = w->fd;
-
- if (w->events == 0)
- op = EPOLL_CTL_ADD;
- else
- op = EPOLL_CTL_MOD;
-
- /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
- * events, skip the syscall and squelch the events after epoll_wait().
- */
- if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
- if (errno != EEXIST)
- abort();
-
- assert(op == EPOLL_CTL_ADD);
-
- /* We've reactivated a file descriptor that's been watched before. */
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
- abort();
- }
-
- w->events = w->pevents;
- }
-
- sigmask = 0;
- if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
- sigemptyset(&sigset);
- sigaddset(&sigset, SIGPROF);
- sigmask |= 1 << (SIGPROF - 1);
- }
-
- assert(timeout >= -1);
- base = loop->time;
- count = 48; /* Benchmarks suggest this gives the best throughput. */
- real_timeout = timeout;
-
- if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
- reset_timeout = 1;
- user_timeout = timeout;
- timeout = 0;
- } else {
- reset_timeout = 0;
- user_timeout = 0;
- }
-
- /* You could argue there is a dependency between these two but
- * ultimately we don't care about their ordering with respect
- * to one another. Worst case, we make a few system calls that
- * could have been avoided because another thread already knows
- * they fail with ENOSYS. Hardly the end of the world.
- */
- no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
- no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
-
- for (;;) {
- /* Only need to set the provider_entry_time if timeout != 0. The function
- * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
- */
- if (timeout != 0)
- uv__metrics_set_provider_entry_time(loop);
-
- /* See the comment for max_safe_timeout for an explanation of why
- * this is necessary. Executive summary: kernel bug workaround.
- */
- if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
- timeout = max_safe_timeout;
-
- if (sigmask != 0 && no_epoll_pwait != 0)
- if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
- abort();
-
- if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
- nfds = epoll_pwait(loop->backend_fd,
- events,
- ARRAY_SIZE(events),
- timeout,
- &sigset);
- if (nfds == -1 && errno == ENOSYS) {
- uv__store_relaxed(&no_epoll_pwait_cached, 1);
- no_epoll_pwait = 1;
- }
- } else {
- nfds = epoll_wait(loop->backend_fd,
- events,
- ARRAY_SIZE(events),
- timeout);
- if (nfds == -1 && errno == ENOSYS) {
- uv__store_relaxed(&no_epoll_wait_cached, 1);
- no_epoll_wait = 1;
- }
- }
-
- if (sigmask != 0 && no_epoll_pwait != 0)
- if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
- abort();
-
- /* Update loop->time unconditionally. It's tempting to skip the update when
- * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
- * operating system didn't reschedule our process while in the syscall.
- */
- SAVE_ERRNO(uv__update_time(loop));
-
- if (nfds == 0) {
- assert(timeout != -1);
-
- if (reset_timeout != 0) {
- timeout = user_timeout;
- reset_timeout = 0;
- }
-
- if (timeout == -1)
- continue;
-
- if (timeout == 0)
- return;
-
- /* We may have been inside the system call for longer than |timeout|
- * milliseconds so we need to update the timestamp to avoid drift.
- */
- goto update_timeout;
- }
-
- if (nfds == -1) {
- if (errno == ENOSYS) {
- /* epoll_wait() or epoll_pwait() failed, try the other system call. */
- assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
- continue;
- }
-
- if (errno != EINTR)
- abort();
-
- if (reset_timeout != 0) {
- timeout = user_timeout;
- reset_timeout = 0;
- }
-
- if (timeout == -1)
- continue;
-
- if (timeout == 0)
- return;
-
- /* Interrupted by a signal. Update timeout and poll again. */
- goto update_timeout;
- }
-
- have_signals = 0;
- nevents = 0;
-
- {
- /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
- union {
- struct epoll_event* events;
- uv__io_t* watchers;
- } x;
-
- x.events = events;
- assert(loop->watchers != NULL);
- loop->watchers[loop->nwatchers] = x.watchers;
- loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
- }
-
- for (i = 0; i < nfds; i++) {
- pe = events + i;
- fd = pe->data.fd;
-
- /* Skip invalidated events, see uv__platform_invalidate_fd */
- if (fd == -1)
- continue;
-
- assert(fd >= 0);
- assert((unsigned) fd < loop->nwatchers);
-
- w = (uv__io_t*)loop->watchers[fd];
-
- if (w == NULL) {
- /* File descriptor that we've stopped watching, disarm it.
- *
- * Ignore all errors because we may be racing with another thread
- * when the file descriptor is closed.
- */
- epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
- continue;
- }
-
- /* Give users only events they're interested in. Prevents spurious
- * callbacks when previous callback invocation in this loop has stopped
- * the current watcher. Also, filters out events that users has not
- * requested us to watch.
- */
- pe->events &= w->pevents | POLLERR | POLLHUP;
-
- /* Work around an epoll quirk where it sometimes reports just the
- * EPOLLERR or EPOLLHUP event. In order to force the event loop to
- * move forward, we merge in the read/write events that the watcher
- * is interested in; uv__read() and uv__write() will then deal with
- * the error or hangup in the usual fashion.
- *
- * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
- * reads the available data, calls uv_read_stop(), then sometime later
- * calls uv_read_start() again. By then, libuv has forgotten about the
- * hangup and the kernel won't report EPOLLIN again because there's
- * nothing left to read. If anything, libuv is to blame here. The
- * current hack is just a quick bandaid; to properly fix it, libuv
- * needs to remember the error/hangup event. We should get that for
- * free when we switch over to edge-triggered I/O.
- */
- if (pe->events == POLLERR || pe->events == POLLHUP)
- pe->events |=
- w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
-
- if (pe->events != 0) {
- /* Run signal watchers last. This also affects child process watchers
- * because those are implemented in terms of signal watchers.
- */
- if (w == &loop->signal_io_watcher) {
- have_signals = 1;
- } else {
- uv__metrics_update_idle_time(loop);
- w->cb(loop, w, pe->events);
- }
-
- nevents++;
- }
- }
-
- if (reset_timeout != 0) {
- timeout = user_timeout;
- reset_timeout = 0;
- }
-
- if (have_signals != 0) {
- uv__metrics_update_idle_time(loop);
- loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
- }
-
- loop->watchers[loop->nwatchers] = NULL;
- loop->watchers[loop->nwatchers + 1] = NULL;
-
- if (have_signals != 0)
- return; /* Event loop should cycle now so don't poll again. */
-
- if (nevents != 0) {
- if (nfds == ARRAY_SIZE(events) && --count != 0) {
- /* Poll for more events but don't block this time. */
- timeout = 0;
- continue;
- }
- return;
- }
-
- if (timeout == 0)
- return;
-
- if (timeout == -1)
- continue;
-
-update_timeout:
- assert(timeout > 0);
-
- real_timeout -= (loop->time - base);
- if (real_timeout <= 0)
- return;
-
- timeout = real_timeout;
- }
-}
-
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/freebsd.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/freebsd.cpp
index 6700ff6..1bd6388 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/freebsd.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/freebsd.cpp
@@ -91,7 +91,7 @@
size_t size = sizeof(freecount);
if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
- return UV__ERR(errno);
+ return 0;
return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
@@ -105,7 +105,7 @@
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
- return UV__ERR(errno);
+ return 0;
return (uint64_t) info;
}
@@ -116,6 +116,11 @@
}
+uint64_t uv_get_available_memory(void) {
+ return uv_get_free_memory();
+}
+
+
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
@@ -264,30 +269,6 @@
}
-int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
-#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
- return sendmmsg(fd,
- (struct mmsghdr*) mmsg,
- vlen,
- 0 /* flags */);
-#else
- return errno = ENOSYS, -1;
-#endif
-}
-
-
-int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
-#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
- return recvmmsg(fd,
- (struct mmsghdr*) mmsg,
- vlen,
- 0 /* flags */,
- NULL /* timeout */);
-#else
- return errno = ENOSYS, -1;
-#endif
-}
-
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/fs.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/fs.cpp
index 1a61524..aba190a 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/fs.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/fs.cpp
@@ -46,9 +46,10 @@
#include <fcntl.h>
#include <poll.h>
+#include <atomic>
+
#if defined(__DragonFly__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# define HAVE_PREADV 1
@@ -56,11 +57,16 @@
# define HAVE_PREADV 0
#endif
-#if defined(__linux__)
-# include "sys/utsname.h"
+/* preadv() and pwritev() were added in Android N (level 24) */
+#if defined(__linux__) && !(defined(__ANDROID__) && __ANDROID_API__ < 24)
+# define TRY_PREADV 1
#endif
-#if defined(__linux__) || defined(__sun)
+#if defined(__linux__)
+# include <sys/sendfile.h>
+#endif
+
+#if defined(__sun)
# include <sys/sendfile.h>
# include <sys/sysmacros.h>
#endif
@@ -79,7 +85,6 @@
#if defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# include <sys/param.h>
@@ -256,7 +261,6 @@
#elif defined(__APPLE__) \
|| defined(__DragonFly__) \
|| defined(__FreeBSD__) \
- || defined(__FreeBSD_kernel__) \
|| defined(__NetBSD__) \
|| defined(__OpenBSD__) \
|| defined(__sun)
@@ -311,7 +315,7 @@
static uv_once_t once = UV_ONCE_INIT;
int r;
#ifdef O_CLOEXEC
- static int no_cloexec_support;
+ static std::atomic<int> no_cloexec_support;
#endif
static const char pattern[] = "XXXXXX";
static const size_t pattern_size = sizeof(pattern) - 1;
@@ -336,7 +340,8 @@
uv_once(&once, uv__mkostemp_initonce);
#ifdef O_CLOEXEC
- if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
+ if (atomic_load_explicit(&no_cloexec_support, std::memory_order_relaxed) == 0 &&
+ uv__mkostemp != NULL) {
r = uv__mkostemp(path, O_CLOEXEC);
if (r >= 0)
@@ -349,7 +354,7 @@
/* We set the static variable so that next calls don't even
try to use mkostemp. */
- uv__store_relaxed(&no_cloexec_support, 1);
+ atomic_store_explicit(&no_cloexec_support, 1, std::memory_order_relaxed);
}
#endif /* O_CLOEXEC */
@@ -458,8 +463,8 @@
static ssize_t uv__fs_read(uv_fs_t* req) {
-#if defined(__linux__)
- static int no_preadv;
+#if TRY_PREADV
+ static std::atomic<int> no_preadv;
#endif
unsigned int iovmax;
ssize_t result;
@@ -482,20 +487,20 @@
#if HAVE_PREADV
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else
-# if defined(__linux__)
- if (uv__load_relaxed(&no_preadv)) retry:
+# if TRY_PREADV
+ if (atomic_load_explicit(&no_preadv, std::memory_order_relaxed)) retry:
# endif
{
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
}
-# if defined(__linux__)
+# if TRY_PREADV
else {
- result = uv__preadv(req->file,
- (struct iovec*)req->bufs,
- req->nbufs,
- req->off);
+ result = preadv(req->file,
+ (struct iovec*) req->bufs,
+ req->nbufs,
+ req->off);
if (result == -1 && errno == ENOSYS) {
- uv__store_relaxed(&no_preadv, 1);
+ atomic_store_explicit(&no_preadv, 1, std::memory_order_relaxed);
goto retry;
}
}
@@ -516,7 +521,7 @@
if (result == -1 && errno == EOPNOTSUPP) {
struct stat buf;
ssize_t rc;
- rc = fstat(req->file, &buf);
+ rc = uv__fstat(req->file, &buf);
if (rc == 0 && S_ISDIR(buf.st_mode)) {
errno = EISDIR;
}
@@ -527,19 +532,12 @@
}
-#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
-#define UV_CONST_DIRENT uv__dirent_t
-#else
-#define UV_CONST_DIRENT const uv__dirent_t
-#endif
-
-
-static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
+static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
}
-static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
+static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
return strcmp((*a)->d_name, (*b)->d_name);
}
@@ -715,7 +713,7 @@
/* We may not have a real PATH_MAX. Read size of link. */
struct stat st;
int ret;
- ret = lstat(req->path, &st);
+ ret = uv__lstat(req->path, &st);
if (ret != 0)
return -1;
if (!S_ISLNK(st.st_mode)) {
@@ -907,31 +905,6 @@
#ifdef __linux__
-static unsigned uv__kernel_version(void) {
- static unsigned cached_version;
- struct utsname u;
- unsigned version;
- unsigned major;
- unsigned minor;
- unsigned patch;
-
- version = uv__load_relaxed(&cached_version);
- if (version != 0)
- return version;
-
- if (-1 == uname(&u))
- return 0;
-
- if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
- return 0;
-
- version = major * 65536 + minor * 256 + patch;
- uv__store_relaxed(&cached_version, version);
-
- return version;
-}
-
-
/* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
* in copy_file_range() when it shouldn't. There is no workaround except to
* fall back to a regular copy.
@@ -968,10 +941,10 @@
static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
int out_fd, size_t len) {
- static int no_copy_file_range_support;
+ static std::atomic<int> no_copy_file_range_support;
ssize_t r;
- if (uv__load_relaxed(&no_copy_file_range_support)) {
+ if (atomic_load_explicit(&no_copy_file_range_support, std::memory_order_relaxed)) {
errno = ENOSYS;
return -1;
}
@@ -990,7 +963,7 @@
errno = ENOSYS; /* Use fallback. */
break;
case ENOSYS:
- uv__store_relaxed(&no_copy_file_range_support, 1);
+ atomic_store_explicit(&no_copy_file_range_support, 1, std::memory_order_relaxed);
break;
case EPERM:
/* It's been reported that CIFS spuriously fails.
@@ -1061,10 +1034,7 @@
return -1;
}
-#elif defined(__APPLE__) || \
- defined(__DragonFly__) || \
- defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__)
+#elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
{
off_t len;
ssize_t r;
@@ -1088,15 +1058,6 @@
#endif
len = 0;
r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
-#elif defined(__FreeBSD_kernel__)
- len = 0;
- r = bsd_sendfile(in_fd,
- out_fd,
- req->off,
- req->bufsml[0].len,
- NULL,
- &len,
- 0);
#else
/* The darwin sendfile takes len as an input for the length to send,
* so make sure to initialize it with the caller's value. */
@@ -1148,7 +1109,6 @@
#elif defined(__APPLE__) \
|| defined(__DragonFly__) \
|| defined(__FreeBSD__) \
- || defined(__FreeBSD_kernel__) \
|| defined(__NetBSD__) \
|| defined(__OpenBSD__)
struct timeval tv[2];
@@ -1190,7 +1150,6 @@
#elif defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
defined(__NetBSD__)
struct timeval tv[2];
tv[0] = uv__fs_to_timeval(req->atime);
@@ -1204,8 +1163,8 @@
static ssize_t uv__fs_write(uv_fs_t* req) {
-#if defined(__linux__)
- static int no_pwritev;
+#if TRY_PREADV
+ static std::atomic<int> no_pwritev;
#endif
ssize_t r;
@@ -1233,20 +1192,20 @@
#if HAVE_PREADV
r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else
-# if defined(__linux__)
- if (no_pwritev) retry:
+# if TRY_PREADV
+ if (atomic_load_explicit(&no_pwritev, std::memory_order_relaxed)) retry:
# endif
{
r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
}
-# if defined(__linux__)
+# if TRY_PREADV
else {
- r = uv__pwritev(req->file,
- (struct iovec*) req->bufs,
- req->nbufs,
- req->off);
+ r = pwritev(req->file,
+ (struct iovec*) req->bufs,
+ req->nbufs,
+ req->off);
if (r == -1 && errno == ENOSYS) {
- no_pwritev = 1;
+ atomic_store_explicit(&no_pwritev, 1, std::memory_order_relaxed);
goto retry;
}
}
@@ -1288,7 +1247,7 @@
return srcfd;
/* Get the source file's mode. */
- if (fstat(srcfd, &src_statsbuf)) {
+ if (uv__fstat(srcfd, &src_statsbuf)) {
err = UV__ERR(errno);
goto out;
}
@@ -1316,7 +1275,7 @@
destination are not the same file. If they are the same, bail out early. */
if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
/* Get the destination file's mode. */
- if (fstat(dstfd, &dst_statsbuf)) {
+ if (uv__fstat(dstfd, &dst_statsbuf)) {
err = UV__ERR(errno);
goto out;
}
@@ -1330,7 +1289,19 @@
/* Truncate the file in case the destination already existed. */
if (ftruncate(dstfd, 0) != 0) {
err = UV__ERR(errno);
- goto out;
+
+ /* ftruncate() on ceph-fuse fails with EACCES when the file is created
+ * with read only permissions. Since ftruncate() on a newly created
+ * file is a meaningless operation anyway, detect that condition
+ * and squelch the error.
+ */
+ if (err != UV_EACCES)
+ goto out;
+
+ if (dst_statsbuf.st_size > 0)
+ goto out;
+
+ err = 0;
}
}
@@ -1514,14 +1485,14 @@
uv_stat_t* buf) {
STATIC_ASSERT(UV_ENOSYS != -1);
#ifdef __linux__
- static int no_statx;
+ static std::atomic<int> no_statx;
struct uv__statx statxbuf;
int dirfd;
int flags;
int mode;
int rc;
- if (uv__load_relaxed(&no_statx))
+ if (atomic_load_explicit(&no_statx, std::memory_order_relaxed))
return UV_ENOSYS;
dirfd = AT_FDCWD;
@@ -1555,30 +1526,11 @@
* implemented, rc might return 1 with 0 set as the error code in which
* case we return ENOSYS.
*/
- uv__store_relaxed(&no_statx, 1);
+ atomic_store_explicit(&no_statx, 1, std::memory_order_relaxed);
return UV_ENOSYS;
}
- buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
- buf->st_mode = statxbuf.stx_mode;
- buf->st_nlink = statxbuf.stx_nlink;
- buf->st_uid = statxbuf.stx_uid;
- buf->st_gid = statxbuf.stx_gid;
- buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
- buf->st_ino = statxbuf.stx_ino;
- buf->st_size = statxbuf.stx_size;
- buf->st_blksize = statxbuf.stx_blksize;
- buf->st_blocks = statxbuf.stx_blocks;
- buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
- buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
- buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
- buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
- buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
- buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
- buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
- buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
- buf->st_flags = 0;
- buf->st_gen = 0;
+ uv__statx_to_stat(&statxbuf, buf);
return 0;
#else
@@ -1595,7 +1547,7 @@
if (ret != UV_ENOSYS)
return ret;
- ret = stat(path, &pbuf);
+ ret = uv__stat(path, &pbuf);
if (ret == 0)
uv__to_stat(&pbuf, buf);
@@ -1611,7 +1563,7 @@
if (ret != UV_ENOSYS)
return ret;
- ret = lstat(path, &pbuf);
+ ret = uv__lstat(path, &pbuf);
if (ret == 0)
uv__to_stat(&pbuf, buf);
@@ -1627,7 +1579,7 @@
if (ret != UV_ENOSYS)
return ret;
- ret = fstat(fd, &pbuf);
+ ret = uv__fstat(fd, &pbuf);
if (ret == 0)
uv__to_stat(&pbuf, buf);
@@ -1822,6 +1774,9 @@
int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(CLOSE);
req->file = file;
+ if (cb != NULL)
+ if (uv__iou_fs_close(loop, req))
+ return 0;
POST;
}
@@ -1869,6 +1824,9 @@
int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FDATASYNC);
req->file = file;
+ if (cb != NULL)
+ if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
+ return 0;
POST;
}
@@ -1876,6 +1834,9 @@
int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSTAT);
req->file = file;
+ if (cb != NULL)
+ if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
+ return 0;
POST;
}
@@ -1883,6 +1844,9 @@
int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSYNC);
req->file = file;
+ if (cb != NULL)
+ if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
+ return 0;
POST;
}
@@ -1929,6 +1893,9 @@
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(LSTAT);
PATH;
+ if (cb != NULL)
+ if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
+ return 0;
POST;
}
@@ -1940,6 +1907,9 @@
uv_fs_cb cb) {
INIT(LINK);
PATH2;
+ if (cb != NULL)
+ if (uv__iou_fs_link(loop, req))
+ return 0;
POST;
}
@@ -1952,6 +1922,9 @@
INIT(MKDIR);
PATH;
req->mode = mode;
+ if (cb != NULL)
+ if (uv__iou_fs_mkdir(loop, req))
+ return 0;
POST;
}
@@ -1990,6 +1963,9 @@
PATH;
req->flags = flags;
req->mode = mode;
+ if (cb != NULL)
+ if (uv__iou_fs_open(loop, req))
+ return 0;
POST;
}
@@ -2018,6 +1994,11 @@
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
req->off = off;
+
+ if (cb != NULL)
+ if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
+ return 0;
+
POST;
}
@@ -2095,6 +2076,9 @@
uv_fs_cb cb) {
INIT(RENAME);
PATH2;
+ if (cb != NULL)
+ if (uv__iou_fs_rename(loop, req))
+ return 0;
POST;
}
@@ -2125,6 +2109,9 @@
int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(STAT);
PATH;
+ if (cb != NULL)
+ if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
+ return 0;
POST;
}
@@ -2138,6 +2125,9 @@
INIT(SYMLINK);
PATH2;
req->flags = flags;
+ if (cb != NULL)
+ if (uv__iou_fs_symlink(loop, req))
+ return 0;
POST;
}
@@ -2145,6 +2135,9 @@
int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(UNLINK);
PATH;
+ if (cb != NULL)
+ if (uv__iou_fs_unlink(loop, req))
+ return 0;
POST;
}
@@ -2188,6 +2181,11 @@
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
req->off = off;
+
+ if (cb != NULL)
+ if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
+ return 0;
+
POST;
}
@@ -2196,7 +2194,7 @@
if (req == NULL)
return;
- /* Only necessary for asychronous requests, i.e., requests with a callback.
+ /* Only necessary for asynchronous requests, i.e., requests with a callback.
* Synchronous ones don't copy their arguments and have req->path and
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
* UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/fsevents.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/fsevents.cpp
index 648c8a9..c31d08b 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/fsevents.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/fsevents.cpp
@@ -80,13 +80,13 @@
typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t;
struct uv__cf_loop_signal_s {
- QUEUE member;
+ struct uv__queue member;
uv_fs_event_t* handle;
uv__cf_loop_signal_type_t type;
};
struct uv__fsevents_event_s {
- QUEUE member;
+ struct uv__queue member;
int events;
char path[1];
};
@@ -98,7 +98,7 @@
FSEventStreamRef fsevent_stream;
uv_sem_t fsevent_sem;
uv_mutex_t fsevent_mutex;
- void* fsevent_handles[2];
+ struct uv__queue fsevent_handles;
unsigned int fsevent_handle_count;
};
@@ -132,7 +132,6 @@
static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
CFAllocatorRef,
const char*);
-static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
static CFStringRef (*pkCFRunLoopDefaultMode);
static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
FSEventStreamCallback,
@@ -141,7 +140,6 @@
FSEventStreamEventId,
CFTimeInterval,
FSEventStreamCreateFlags);
-static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
static void (*pFSEventStreamRelease)(FSEventStreamRef);
static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
@@ -152,22 +150,22 @@
#define UV__FSEVENTS_PROCESS(handle, block) \
do { \
- QUEUE events; \
- QUEUE* q; \
+ struct uv__queue events; \
+ struct uv__queue* q; \
uv__fsevents_event_t* event; \
int err; \
uv_mutex_lock(&(handle)->cf_mutex); \
/* Split-off all events and empty original queue */ \
- QUEUE_MOVE(&(handle)->cf_events, &events); \
+ uv__queue_move(&(handle)->cf_events, &events); \
/* Get error (if any) and zero original one */ \
err = (handle)->cf_error; \
(handle)->cf_error = 0; \
uv_mutex_unlock(&(handle)->cf_mutex); \
/* Loop through events, deallocating each after processing */ \
- while (!QUEUE_EMPTY(&events)) { \
- q = QUEUE_HEAD(&events); \
- event = QUEUE_DATA(q, uv__fsevents_event_t, member); \
- QUEUE_REMOVE(q); \
+ while (!uv__queue_empty(&events)) { \
+ q = uv__queue_head(&events); \
+ event = uv__queue_data(q, uv__fsevents_event_t, member); \
+ uv__queue_remove(q); \
/* NOTE: Checking uv__is_active() is required here, because handle \
* callback may close handle and invoking it after it will lead to \
* incorrect behaviour */ \
@@ -195,14 +193,14 @@
/* Runs in CF thread, pushed event into handle's event list */
static void uv__fsevents_push_event(uv_fs_event_t* handle,
- QUEUE* events,
+ struct uv__queue* events,
int err) {
assert(events != NULL || err != 0);
uv_mutex_lock(&handle->cf_mutex);
/* Concatenate two queues */
if (events != NULL)
- QUEUE_ADD(&handle->cf_events, events);
+ uv__queue_add(&handle->cf_events, events);
/* Propagate error */
if (err != 0)
@@ -226,12 +224,12 @@
char* path;
char* pos;
uv_fs_event_t* handle;
- QUEUE* q;
+ struct uv__queue* q;
uv_loop_t* loop;
uv__cf_loop_state_t* state;
uv__fsevents_event_t* event;
FSEventStreamEventFlags flags;
- QUEUE head;
+ struct uv__queue head;
loop = (uv_loop_t*)info;
state = (uv__cf_loop_state_t*)loop->cf_state;
@@ -240,9 +238,9 @@
/* For each handle */
uv_mutex_lock(&state->fsevent_mutex);
- QUEUE_FOREACH(q, &state->fsevent_handles) {
- handle = QUEUE_DATA(q, uv_fs_event_t, cf_member);
- QUEUE_INIT(&head);
+ uv__queue_foreach(q, &state->fsevent_handles) {
+ handle = uv__queue_data(q, uv_fs_event_t, cf_member);
+ uv__queue_init(&head);
/* Process and filter out events */
for (i = 0; i < numEvents; i++) {
@@ -320,10 +318,10 @@
event->events = UV_CHANGE;
}
- QUEUE_INSERT_TAIL(&head, &event->member);
+ uv__queue_insert_tail(&head, &event->member);
}
- if (!QUEUE_EMPTY(&head))
+ if (!uv__queue_empty(&head))
uv__fsevents_push_event(handle, &head, 0);
}
uv_mutex_unlock(&state->fsevent_mutex);
@@ -331,8 +329,9 @@
/* Runs in CF thread */
-static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
- uv__cf_loop_state_t* state;
+static int uv__fsevents_create_stream(uv__cf_loop_state_t* state,
+ uv_loop_t* loop,
+ CFArrayRef paths) {
FSEventStreamContext ctx;
FSEventStreamRef ref;
CFAbsoluteTime latency;
@@ -373,10 +372,7 @@
flags);
assert(ref != NULL);
- state = (uv__cf_loop_state_t*)loop->cf_state;
- pFSEventStreamScheduleWithRunLoop(ref,
- state->loop,
- *pkCFRunLoopDefaultMode);
+ pFSEventStreamScheduleWithRunLoop(ref, state->loop, *pkCFRunLoopDefaultMode);
if (!pFSEventStreamStart(ref)) {
pFSEventStreamInvalidate(ref);
pFSEventStreamRelease(ref);
@@ -389,11 +385,7 @@
/* Runs in CF thread */
-static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
- uv__cf_loop_state_t* state;
-
- state = (uv__cf_loop_state_t*)loop->cf_state;
-
+static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) {
if (state->fsevent_stream == NULL)
return;
@@ -408,10 +400,10 @@
/* Runs in CF thread, when there're new fsevent handles to add to stream */
-static void uv__fsevents_reschedule(uv_fs_event_t* handle,
+static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
+ uv_loop_t* loop,
uv__cf_loop_signal_type_t type) {
- uv__cf_loop_state_t* state;
- QUEUE* q;
+ struct uv__queue* q;
uv_fs_event_t* curr;
CFArrayRef cf_paths;
CFStringRef* paths;
@@ -419,7 +411,6 @@
int err;
unsigned int path_count;
- state = (uv__cf_loop_state_t*)handle->loop->cf_state;
paths = NULL;
cf_paths = NULL;
err = 0;
@@ -438,7 +429,7 @@
uv_mutex_unlock(&state->fsevent_mutex);
/* Destroy previous FSEventStream */
- uv__fsevents_destroy_stream(handle->loop);
+ uv__fsevents_destroy_stream(state);
/* Any failure below will be a memory failure */
err = UV_ENOMEM;
@@ -455,9 +446,9 @@
q = &state->fsevent_handles;
for (; i < path_count; i++) {
- q = QUEUE_NEXT(q);
+ q = uv__queue_next(q);
assert(q != &state->fsevent_handles);
- curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+ curr = uv__queue_data(q, uv_fs_event_t, cf_member);
assert(curr->realpath != NULL);
paths[i] =
@@ -478,7 +469,7 @@
err = UV_ENOMEM;
goto final;
}
- err = uv__fsevents_create_stream(handle->loop, cf_paths);
+ err = uv__fsevents_create_stream(state, loop, cf_paths);
}
final:
@@ -495,8 +486,8 @@
/* Broadcast error to all handles */
uv_mutex_lock(&state->fsevent_mutex);
- QUEUE_FOREACH(q, &state->fsevent_handles) {
- curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+ uv__queue_foreach(q, &state->fsevent_handles) {
+ curr = uv__queue_data(q, uv_fs_event_t, cf_member);
uv__fsevents_push_event(curr, NULL, err);
}
uv_mutex_unlock(&state->fsevent_mutex);
@@ -563,10 +554,8 @@
V(core_foundation_handle, CFRunLoopStop);
V(core_foundation_handle, CFRunLoopWakeUp);
V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
- V(core_foundation_handle, CFStringGetSystemEncoding);
V(core_foundation_handle, kCFRunLoopDefaultMode);
V(core_services_handle, FSEventStreamCreate);
- V(core_services_handle, FSEventStreamFlushSync);
V(core_services_handle, FSEventStreamInvalidate);
V(core_services_handle, FSEventStreamRelease);
V(core_services_handle, FSEventStreamScheduleWithRunLoop);
@@ -617,7 +606,7 @@
if (err)
goto fail_sem_init;
- QUEUE_INIT(&loop->cf_signals);
+ uv__queue_init(&loop->cf_signals);
err = uv_sem_init(&state->fsevent_sem, 0);
if (err)
@@ -627,7 +616,7 @@
if (err)
goto fail_fsevent_mutex_init;
- QUEUE_INIT(&state->fsevent_handles);
+ uv__queue_init(&state->fsevent_handles);
state->fsevent_need_reschedule = 0;
state->fsevent_handle_count = 0;
@@ -686,7 +675,7 @@
void uv__fsevents_loop_delete(uv_loop_t* loop) {
uv__cf_loop_signal_t* s;
uv__cf_loop_state_t* state;
- QUEUE* q;
+ struct uv__queue* q;
if (loop->cf_state == NULL)
return;
@@ -699,10 +688,10 @@
uv_mutex_destroy(&loop->cf_mutex);
/* Free any remaining data */
- while (!QUEUE_EMPTY(&loop->cf_signals)) {
- q = QUEUE_HEAD(&loop->cf_signals);
- s = QUEUE_DATA(q, uv__cf_loop_signal_t, member);
- QUEUE_REMOVE(q);
+ while (!uv__queue_empty(&loop->cf_signals)) {
+ q = uv__queue_head(&loop->cf_signals);
+ s = uv__queue_data(q, uv__cf_loop_signal_t, member);
+ uv__queue_remove(q);
uv__free(s);
}
@@ -746,28 +735,28 @@
static void uv__cf_loop_cb(void* arg) {
uv_loop_t* loop;
uv__cf_loop_state_t* state;
- QUEUE* item;
- QUEUE split_head;
+ struct uv__queue* item;
+ struct uv__queue split_head;
uv__cf_loop_signal_t* s;
loop = (uv_loop_t*)arg;
state = (uv__cf_loop_state_t*)loop->cf_state;
uv_mutex_lock(&loop->cf_mutex);
- QUEUE_MOVE(&loop->cf_signals, &split_head);
+ uv__queue_move(&loop->cf_signals, &split_head);
uv_mutex_unlock(&loop->cf_mutex);
- while (!QUEUE_EMPTY(&split_head)) {
- item = QUEUE_HEAD(&split_head);
- QUEUE_REMOVE(item);
+ while (!uv__queue_empty(&split_head)) {
+ item = uv__queue_head(&split_head);
+ uv__queue_remove(item);
- s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
+ s = uv__queue_data(item, uv__cf_loop_signal_t, member);
/* This was a termination signal */
if (s->handle == NULL)
pCFRunLoopStop(state->loop);
else
- uv__fsevents_reschedule(s->handle, s->type);
+ uv__fsevents_reschedule(state, loop, s->type);
uv__free(s);
}
@@ -789,7 +778,7 @@
item->type = type;
uv_mutex_lock(&loop->cf_mutex);
- QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member);
+ uv__queue_insert_tail(&loop->cf_signals, &item->member);
state = (uv__cf_loop_state_t*)loop->cf_state;
assert(state != NULL);
@@ -818,7 +807,7 @@
handle->realpath_len = strlen(handle->realpath);
/* Initialize event queue */
- QUEUE_INIT(&handle->cf_events);
+ uv__queue_init(&handle->cf_events);
handle->cf_error = 0;
/*
@@ -843,7 +832,7 @@
/* Insert handle into the list */
state = (uv__cf_loop_state_t*)handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
- QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member);
+ uv__queue_insert_tail(&state->fsevent_handles, &handle->cf_member);
state->fsevent_handle_count++;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);
@@ -883,7 +872,7 @@
/* Remove handle from the list */
state = (uv__cf_loop_state_t*)handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
- QUEUE_REMOVE(&handle->cf_member);
+ uv__queue_remove(&handle->cf_member);
state->fsevent_handle_count--;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/ibmi.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/ibmi.cpp
index 56af31e..5e0fa98 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/ibmi.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/ibmi.cpp
@@ -249,6 +249,11 @@
}
+uint64_t uv_get_available_memory(void) {
+ return uv_get_free_memory();
+}
+
+
void uv_loadavg(double avg[3]) {
SSTS0200 rcvr;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/internal.h b/wpinet/src/main/native/thirdparty/libuv/src/unix/internal.h
index 2b65415..854d98a 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/internal.h
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/internal.h
@@ -26,21 +26,34 @@
#include <assert.h>
#include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */
+#include <stdint.h>
#include <stdlib.h> /* abort */
#include <string.h> /* strrchr */
#include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */
#include <stdio.h>
#include <errno.h>
#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#define uv__msan_unpoison(p, n) \
+ do { \
+ (void) (p); \
+ (void) (n); \
+ } while (0)
+
+#if defined(__has_feature)
+# if __has_feature(memory_sanitizer)
+# include <sanitizer/msan_interface.h>
+# undef uv__msan_unpoison
+# define uv__msan_unpoison __msan_unpoison
+# endif
+#endif
#if defined(__STRICT_ANSI__)
# define inline __inline
#endif
-#if defined(__linux__)
-# include "linux-syscalls.h"
-#endif /* __linux__ */
-
#if defined(__MVS__)
# include "os390-syscalls.h"
#endif /* __MVS__ */
@@ -79,13 +92,11 @@
# define UV__PATH_MAX 8192
#endif
-#if defined(__ANDROID__)
-int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
-# ifdef pthread_sigmask
-# undef pthread_sigmask
-# endif
-# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
-#endif
+union uv__sockaddr {
+ struct sockaddr_in6 in6;
+ struct sockaddr_in in;
+ struct sockaddr addr;
+};
#define ACCESS_ONCE(type, var) \
(*(volatile type*) &(var))
@@ -166,12 +177,42 @@
int fds[1];
};
+#ifdef __linux__
+struct uv__statx_timestamp {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t unused0;
+};
+
+struct uv__statx {
+ uint32_t stx_mask;
+ uint32_t stx_blksize;
+ uint64_t stx_attributes;
+ uint32_t stx_nlink;
+ uint32_t stx_uid;
+ uint32_t stx_gid;
+ uint16_t stx_mode;
+ uint16_t unused0;
+ uint64_t stx_ino;
+ uint64_t stx_size;
+ uint64_t stx_blocks;
+ uint64_t stx_attributes_mask;
+ struct uv__statx_timestamp stx_atime;
+ struct uv__statx_timestamp stx_btime;
+ struct uv__statx_timestamp stx_ctime;
+ struct uv__statx_timestamp stx_mtime;
+ uint32_t stx_rdev_major;
+ uint32_t stx_rdev_minor;
+ uint32_t stx_dev_major;
+ uint32_t stx_dev_minor;
+ uint64_t unused1[14];
+};
+#endif /* __linux__ */
#if defined(_AIX) || \
defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
defined(__linux__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
@@ -260,10 +301,10 @@
/* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop);
-int uv__epoll_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop);
void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
+int uv__process_init(uv_loop_t* loop);
/* various */
void uv__async_close(uv_async_t* handle);
@@ -280,7 +321,6 @@
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
FILE* uv__open_file(const char* path);
-int uv__getpwuid_r(uv_passwd_t* pwd);
int uv__search_path(const char* prog, char* buf, size_t* buflen);
void uv__wait_children(uv_loop_t* loop);
@@ -291,6 +331,38 @@
int uv__random_readpath(const char* path, void* buf, size_t buflen);
int uv__random_sysctl(void* buf, size_t buflen);
+/* io_uring */
+#ifdef __linux__
+int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req);
+int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
+ uv_fs_t* req,
+ uint32_t fsync_flags);
+int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req);
+int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req);
+int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req);
+int uv__iou_fs_read_or_write(uv_loop_t* loop,
+ uv_fs_t* req,
+ int is_read);
+int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req);
+int uv__iou_fs_statx(uv_loop_t* loop,
+ uv_fs_t* req,
+ int is_fstat,
+ int is_lstat);
+int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req);
+int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req);
+#else
+#define uv__iou_fs_close(loop, req) 0
+#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0
+#define uv__iou_fs_link(loop, req) 0
+#define uv__iou_fs_mkdir(loop, req) 0
+#define uv__iou_fs_open(loop, req) 0
+#define uv__iou_fs_read_or_write(loop, req, is_read) 0
+#define uv__iou_fs_rename(loop, req) 0
+#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0
+#define uv__iou_fs_symlink(loop, req) 0
+#define uv__iou_fs_unlink(loop, req) 0
+#endif
+
#if defined(__APPLE__)
int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
@@ -324,8 +396,52 @@
return s + 1;
}
+UV_UNUSED(static int uv__fstat(int fd, struct stat* s)) {
+ int rc;
+
+ rc = fstat(fd, s);
+ if (rc >= 0)
+ uv__msan_unpoison(s, sizeof(*s));
+
+ return rc;
+}
+
+UV_UNUSED(static int uv__lstat(const char* path, struct stat* s)) {
+ int rc;
+
+ rc = lstat(path, s);
+ if (rc >= 0)
+ uv__msan_unpoison(s, sizeof(*s));
+
+ return rc;
+}
+
+UV_UNUSED(static int uv__stat(const char* path, struct stat* s)) {
+ int rc;
+
+ rc = stat(path, s);
+ if (rc >= 0)
+ uv__msan_unpoison(s, sizeof(*s));
+
+ return rc;
+}
+
#if defined(__linux__)
-int uv__inotify_fork(uv_loop_t* loop, void* old_watchers);
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ off_t* off_in,
+ int fd_out,
+ off_t* off_out,
+ size_t len,
+ unsigned int flags);
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf);
+void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf);
+ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
+unsigned uv__kernel_version(void);
#endif
typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
@@ -335,22 +451,6 @@
struct sockaddr* name,
int* namelen);
-#if defined(__linux__) || \
- defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || \
- defined(__DragonFly__)
-#define HAVE_MMSG 1
-struct uv__mmsghdr {
- struct msghdr msg_hdr;
- unsigned int msg_len;
-};
-
-int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
-int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
-#else
-#define HAVE_MMSG 0
-#endif
-
#if defined(__sun)
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
size_t strnlen(const char* s, size_t maxlen);
@@ -367,5 +467,10 @@
unsigned int flags);
#endif
+#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 1301000)
+#define UV__CPU_AFFINITY_SUPPORTED 1
+#else
+#define UV__CPU_AFFINITY_SUPPORTED 0
+#endif
#endif /* UV_UNIX_INTERNAL_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/kqueue.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/kqueue.cpp
index 86eb529..ffe0f91 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/kqueue.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/kqueue.cpp
@@ -34,6 +34,8 @@
#include <fcntl.h>
#include <time.h>
+#include <atomic>
+
/*
* Required on
* - Until at least FreeBSD 11.0
@@ -60,7 +62,7 @@
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
-static int uv__has_forked_with_cfrunloop;
+static std::atomic<int> uv__has_forked_with_cfrunloop;
#endif
int uv__io_fork(uv_loop_t* loop) {
@@ -82,7 +84,9 @@
process. So we sidestep the issue by pretending like we never
started it in the first place.
*/
- uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
+ atomic_store_explicit(&uv__has_forked_with_cfrunloop,
+ 1,
+ std::memory_order_relaxed);
uv__free(loop->cf_state);
loop->cf_state = NULL;
}
@@ -109,13 +113,29 @@
}
+static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
+ struct kevent change;
+
+ EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0);
+
+ if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL))
+ return;
+
+ if (errno == EBADF || errno == ENOENT)
+ return;
+
+ abort();
+}
+
+
void uv__io_poll(uv_loop_t* loop, int timeout) {
+ uv__loop_internal_fields_t* lfields;
struct kevent events[1024];
struct kevent* ev;
struct timespec spec;
unsigned int nevents;
unsigned int revents;
- QUEUE* q;
+ struct uv__queue* q;
uv__io_t* w;
uv_process_t* process;
sigset_t* pset;
@@ -134,18 +154,19 @@
int reset_timeout;
if (loop->nfds == 0) {
- assert(QUEUE_EMPTY(&loop->watcher_queue));
+ assert(uv__queue_empty(&loop->watcher_queue));
return;
}
+ lfields = uv__get_internal_fields(loop);
nevents = 0;
- while (!QUEUE_EMPTY(&loop->watcher_queue)) {
- q = QUEUE_HEAD(&loop->watcher_queue);
- QUEUE_REMOVE(q);
- QUEUE_INIT(q);
+ while (!uv__queue_empty(&loop->watcher_queue)) {
+ q = uv__queue_head(&loop->watcher_queue);
+ uv__queue_remove(q);
+ uv__queue_init(q);
- w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
@@ -205,7 +226,7 @@
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
- if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@@ -228,6 +249,12 @@
if (pset != NULL)
pthread_sigmask(SIG_BLOCK, pset, NULL);
+ /* Store the current timeout in a location that's globally accessible so
+ * other locations like uv__work_done() can determine whether the queue
+ * of events in the callback were waiting when poll was called.
+ */
+ lfields->current_timeout = timeout;
+
nfds = kevent(loop->backend_fd,
events,
nevents,
@@ -235,6 +262,9 @@
ARRAY_SIZE(events),
timeout == -1 ? NULL : &spec);
+ if (nfds == -1)
+ assert(errno == EINTR);
+
if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
@@ -242,36 +272,26 @@
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
- SAVE_ERRNO(uv__update_time(loop));
+ uv__update_time(loop);
- if (nfds == 0) {
- if (reset_timeout != 0) {
- timeout = user_timeout;
- reset_timeout = 0;
- if (timeout == -1)
- continue;
- if (timeout > 0)
- goto update_timeout;
+ if (nfds == 0 || nfds == -1) {
+ /* If kqueue is empty or interrupted, we might still have children ready
+ * to reap immediately. */
+ if (loop->flags & UV_LOOP_REAP_CHILDREN) {
+ loop->flags &= ~UV_LOOP_REAP_CHILDREN;
+ uv__wait_children(loop);
+ assert((reset_timeout == 0 ? timeout : user_timeout) == 0);
+ return; /* Equivalent to fall-through behavior. */
}
- assert(timeout != -1);
- return;
- }
-
- if (nfds == -1) {
- if (errno != EINTR)
- abort();
-
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
- }
-
- if (timeout == 0)
+ } else if (nfds == 0) {
+ /* Reached the user timeout value. */
+ assert(timeout != -1);
return;
-
- if (timeout == -1)
- continue;
+ }
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
@@ -289,8 +309,8 @@
/* Handle kevent NOTE_EXIT results */
if (ev->filter == EVFILT_PROC) {
- QUEUE_FOREACH(q, &loop->process_handles) {
- process = QUEUE_DATA(q, uv_process_t, queue);
+ uv__queue_foreach(q, &loop->process_handles) {
+ process = uv__queue_data(q, uv_process_t, queue);
if (process->pid == fd) {
process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN;
@@ -307,15 +327,8 @@
w = (uv__io_t*)loop->watchers[fd];
if (w == NULL) {
- /* File descriptor that we've stopped watching, disarm it.
- * TODO: batch up. */
- struct kevent events[1];
-
- EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
- if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
- if (errno != EBADF && errno != ENOENT)
- abort();
-
+ /* File descriptor that we've stopped watching, disarm it. */
+ uv__kqueue_delete(loop->backend_fd, ev);
continue;
}
@@ -331,47 +344,27 @@
revents = 0;
if (ev->filter == EVFILT_READ) {
- if (w->pevents & POLLIN) {
+ if (w->pevents & POLLIN)
revents |= POLLIN;
- w->rcount = ev->data;
- } else {
- /* TODO batch up */
- struct kevent events[1];
- EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
- if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
- if (errno != ENOENT)
- abort();
- }
+ else
+ uv__kqueue_delete(loop->backend_fd, ev);
+
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP;
}
if (ev->filter == EV_OOBAND) {
- if (w->pevents & UV__POLLPRI) {
+ if (w->pevents & UV__POLLPRI)
revents |= UV__POLLPRI;
- w->rcount = ev->data;
- } else {
- /* TODO batch up */
- struct kevent events[1];
- EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
- if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
- if (errno != ENOENT)
- abort();
- }
+ else
+ uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->filter == EVFILT_WRITE) {
- if (w->pevents & POLLOUT) {
+ if (w->pevents & POLLOUT)
revents |= POLLOUT;
- w->wcount = ev->data;
- } else {
- /* TODO batch up */
- struct kevent events[1];
- EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
- if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
- if (errno != ENOENT)
- abort();
- }
+ else
+ uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->flags & EV_ERROR)
@@ -398,9 +391,11 @@
uv__wait_children(loop);
}
+ uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
+ uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
@@ -423,13 +418,13 @@
return;
}
+update_timeout:
if (timeout == 0)
return;
if (timeout == -1)
continue;
-update_timeout:
assert(timeout > 0);
diff = loop->time - base;
@@ -541,13 +536,14 @@
handle->realpath_len = 0;
handle->cf_flags = flags;
- if (fstat(fd, &statbuf))
+ if (uv__fstat(fd, &statbuf))
goto fallback;
/* FSEvents works only with directories */
if (!(statbuf.st_mode & S_IFDIR))
goto fallback;
- if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
+ if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
+ std::memory_order_relaxed)) {
int r;
/* The fallback fd is no longer needed */
uv__close_nocheckstdio(fd);
@@ -582,7 +578,8 @@
uv__handle_stop(handle);
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
- if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
+ if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
+ std::memory_order_relaxed))
if (handle->cf_cb != NULL)
r = uv__fsevents_close(handle);
#endif
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-core.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-core.cpp
deleted file mode 100644
index 12ed7ff..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-core.cpp
+++ /dev/null
@@ -1,841 +0,0 @@
-/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
- * EPOLL* counterparts. We use the POLL* variants in this file because that
- * is what libuv uses elsewhere.
- */
-
-#include "uv.h"
-#include "internal.h"
-
-#include <inttypes.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-#include <errno.h>
-
-#include <net/if.h>
-#include <sys/epoll.h>
-#include <sys/param.h>
-#include <sys/prctl.h>
-#include <sys/sysinfo.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <time.h>
-
-#define HAVE_IFADDRS_H 1
-
-# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
-# undef HAVE_IFADDRS_H
-#endif
-
-#ifdef __UCLIBC__
-# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
-# undef HAVE_IFADDRS_H
-# endif
-#endif
-
-#ifdef HAVE_IFADDRS_H
-# include <ifaddrs.h>
-# include <sys/socket.h>
-# include <net/ethernet.h>
-# include <netpacket/packet.h>
-#endif /* HAVE_IFADDRS_H */
-
-/* Available from 2.6.32 onwards. */
-#ifndef CLOCK_MONOTONIC_COARSE
-# define CLOCK_MONOTONIC_COARSE 6
-#endif
-
-#ifdef __FRC_ROBORIO__
-#include "wpi/timestamp.h"
-#endif
-
-/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
- * include that file because it conflicts with <time.h>. We'll just have to
- * define it ourselves.
- */
-#ifndef CLOCK_BOOTTIME
-# define CLOCK_BOOTTIME 7
-#endif
-
-static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
-static int read_times(FILE* statfile_fp,
- unsigned int numcpus,
- uv_cpu_info_t* ci);
-static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
-static uint64_t read_cpufreq(unsigned int cpunum);
-
-int uv__platform_loop_init(uv_loop_t* loop) {
-
- loop->inotify_fd = -1;
- loop->inotify_watchers = NULL;
-
- return uv__epoll_init(loop);
-}
-
-
-int uv__io_fork(uv_loop_t* loop) {
- int err;
- void* old_watchers;
-
- old_watchers = loop->inotify_watchers;
-
- uv__close(loop->backend_fd);
- loop->backend_fd = -1;
- uv__platform_loop_delete(loop);
-
- err = uv__platform_loop_init(loop);
- if (err)
- return err;
-
- return uv__inotify_fork(loop, old_watchers);
-}
-
-
-void uv__platform_loop_delete(uv_loop_t* loop) {
- if (loop->inotify_fd == -1) return;
- uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
- uv__close(loop->inotify_fd);
- loop->inotify_fd = -1;
-}
-
-
-uint64_t uv__hrtime(uv_clocktype_t type) {
-#ifdef __FRC_ROBORIO__
- return wpi::Now() * 1000u;
-#else
- static clock_t fast_clock_id = -1;
- struct timespec t;
- clock_t clock_id;
-
- /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
- * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
- * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
- * decide to make a costly system call.
- */
- /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
- * when it has microsecond granularity or better (unlikely).
- */
- clock_id = CLOCK_MONOTONIC;
- if (type != UV_CLOCK_FAST)
- goto done;
-
- clock_id = uv__load_relaxed(&fast_clock_id);
- if (clock_id != -1)
- goto done;
-
- clock_id = CLOCK_MONOTONIC;
- if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
- if (t.tv_nsec <= 1 * 1000 * 1000)
- clock_id = CLOCK_MONOTONIC_COARSE;
-
- uv__store_relaxed(&fast_clock_id, clock_id);
-
-done:
-
- if (clock_gettime(clock_id, &t))
- return 0; /* Not really possible. */
-
- return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
-#endif
-}
-
-
-int uv_resident_set_memory(size_t* rss) {
- char buf[1024];
- const char* s;
- ssize_t n;
- long val;
- int fd;
- int i;
-
- do
- fd = open("/proc/self/stat", O_RDONLY);
- while (fd == -1 && errno == EINTR);
-
- if (fd == -1)
- return UV__ERR(errno);
-
- do
- n = read(fd, buf, sizeof(buf) - 1);
- while (n == -1 && errno == EINTR);
-
- uv__close(fd);
- if (n == -1)
- return UV__ERR(errno);
- buf[n] = '\0';
-
- s = strchr(buf, ' ');
- if (s == NULL)
- goto err;
-
- s += 1;
- if (*s != '(')
- goto err;
-
- s = strchr(s, ')');
- if (s == NULL)
- goto err;
-
- for (i = 1; i <= 22; i++) {
- s = strchr(s + 1, ' ');
- if (s == NULL)
- goto err;
- }
-
- errno = 0;
- val = strtol(s, NULL, 10);
- if (errno != 0)
- goto err;
- if (val < 0)
- goto err;
-
- *rss = val * getpagesize();
- return 0;
-
-err:
- return UV_EINVAL;
-}
-
-int uv_uptime(double* uptime) {
- static volatile int no_clock_boottime;
- char buf[128];
- struct timespec now;
- int r;
-
- /* Try /proc/uptime first, then fallback to clock_gettime(). */
-
- if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
- if (1 == sscanf(buf, "%lf", uptime))
- return 0;
-
- /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
- * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
- * is suspended.
- */
- if (no_clock_boottime) {
- retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
- }
- else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
- no_clock_boottime = 1;
- goto retry_clock_gettime;
- }
-
- if (r)
- return UV__ERR(errno);
-
- *uptime = now.tv_sec;
- return 0;
-}
-
-
-static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
- unsigned int num;
- char buf[1024];
-
- if (!fgets(buf, sizeof(buf), statfile_fp))
- return UV_EIO;
-
- num = 0;
- while (fgets(buf, sizeof(buf), statfile_fp)) {
- if (strncmp(buf, "cpu", 3))
- break;
- num++;
- }
-
- if (num == 0)
- return UV_EIO;
-
- *numcpus = num;
- return 0;
-}
-
-
-int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
- unsigned int numcpus;
- uv_cpu_info_t* ci;
- int err;
- FILE* statfile_fp;
-
- *cpu_infos = NULL;
- *count = 0;
-
- statfile_fp = uv__open_file("/proc/stat");
- if (statfile_fp == NULL)
- return UV__ERR(errno);
-
- err = uv__cpu_num(statfile_fp, &numcpus);
- if (err < 0)
- goto out;
-
- err = UV_ENOMEM;
- ci = (uv_cpu_info_t*)uv__calloc(numcpus, sizeof(*ci));
- if (ci == NULL)
- goto out;
-
- err = read_models(numcpus, ci);
- if (err == 0)
- err = read_times(statfile_fp, numcpus, ci);
-
- if (err) {
- uv_free_cpu_info(ci, numcpus);
- goto out;
- }
-
- /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
- * We don't check for errors here. Worst case, the field is left zero.
- */
- if (ci[0].speed == 0)
- read_speeds(numcpus, ci);
-
- *cpu_infos = ci;
- *count = numcpus;
- err = 0;
-
-out:
-
- if (fclose(statfile_fp))
- if (errno != EINTR && errno != EINPROGRESS)
- abort();
-
- return err;
-}
-
-
-static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
- unsigned int num;
-
- for (num = 0; num < numcpus; num++)
- ci[num].speed = read_cpufreq(num) / 1000;
-}
-
-
-/* Also reads the CPU frequency on ppc and x86. The other architectures only
- * have a BogoMIPS field, which may not be very accurate.
- *
- * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
- */
-static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
-#if defined(__PPC__)
- static const char model_marker[] = "cpu\t\t: ";
- static const char speed_marker[] = "clock\t\t: ";
-#else
- static const char model_marker[] = "model name\t: ";
- static const char speed_marker[] = "cpu MHz\t\t: ";
-#endif
- const char* inferred_model;
- unsigned int model_idx;
- unsigned int speed_idx;
- unsigned int part_idx;
- char buf[1024];
- char* model;
- FILE* fp;
- int model_id;
-
- /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
- (void) &model_marker;
- (void) &speed_marker;
- (void) &speed_idx;
- (void) &part_idx;
- (void) &model;
- (void) &buf;
- (void) &fp;
- (void) &model_id;
-
- model_idx = 0;
- speed_idx = 0;
- part_idx = 0;
-
-#if defined(__arm__) || \
- defined(__i386__) || \
- defined(__mips__) || \
- defined(__aarch64__) || \
- defined(__PPC__) || \
- defined(__x86_64__)
- fp = uv__open_file("/proc/cpuinfo");
- if (fp == NULL)
- return UV__ERR(errno);
-
- while (fgets(buf, sizeof(buf), fp)) {
- if (model_idx < numcpus) {
- if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
- model = buf + sizeof(model_marker) - 1;
- model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
- if (model == NULL) {
- fclose(fp);
- return UV_ENOMEM;
- }
- ci[model_idx++].model = model;
- continue;
- }
- }
-#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
- if (model_idx < numcpus) {
-#if defined(__arm__)
- /* Fallback for pre-3.8 kernels. */
- static const char model_marker[] = "Processor\t: ";
-#elif defined(__aarch64__)
- static const char part_marker[] = "CPU part\t: ";
-
- /* Adapted from: https://github.com/karelzak/util-linux */
- struct vendor_part {
- const int id;
- const char* name;
- };
-
- static const struct vendor_part arm_chips[] = {
- { 0x811, "ARM810" },
- { 0x920, "ARM920" },
- { 0x922, "ARM922" },
- { 0x926, "ARM926" },
- { 0x940, "ARM940" },
- { 0x946, "ARM946" },
- { 0x966, "ARM966" },
- { 0xa20, "ARM1020" },
- { 0xa22, "ARM1022" },
- { 0xa26, "ARM1026" },
- { 0xb02, "ARM11 MPCore" },
- { 0xb36, "ARM1136" },
- { 0xb56, "ARM1156" },
- { 0xb76, "ARM1176" },
- { 0xc05, "Cortex-A5" },
- { 0xc07, "Cortex-A7" },
- { 0xc08, "Cortex-A8" },
- { 0xc09, "Cortex-A9" },
- { 0xc0d, "Cortex-A17" }, /* Originally A12 */
- { 0xc0f, "Cortex-A15" },
- { 0xc0e, "Cortex-A17" },
- { 0xc14, "Cortex-R4" },
- { 0xc15, "Cortex-R5" },
- { 0xc17, "Cortex-R7" },
- { 0xc18, "Cortex-R8" },
- { 0xc20, "Cortex-M0" },
- { 0xc21, "Cortex-M1" },
- { 0xc23, "Cortex-M3" },
- { 0xc24, "Cortex-M4" },
- { 0xc27, "Cortex-M7" },
- { 0xc60, "Cortex-M0+" },
- { 0xd01, "Cortex-A32" },
- { 0xd03, "Cortex-A53" },
- { 0xd04, "Cortex-A35" },
- { 0xd05, "Cortex-A55" },
- { 0xd06, "Cortex-A65" },
- { 0xd07, "Cortex-A57" },
- { 0xd08, "Cortex-A72" },
- { 0xd09, "Cortex-A73" },
- { 0xd0a, "Cortex-A75" },
- { 0xd0b, "Cortex-A76" },
- { 0xd0c, "Neoverse-N1" },
- { 0xd0d, "Cortex-A77" },
- { 0xd0e, "Cortex-A76AE" },
- { 0xd13, "Cortex-R52" },
- { 0xd20, "Cortex-M23" },
- { 0xd21, "Cortex-M33" },
- { 0xd41, "Cortex-A78" },
- { 0xd42, "Cortex-A78AE" },
- { 0xd4a, "Neoverse-E1" },
- { 0xd4b, "Cortex-A78C" },
- };
-
- if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
- model = buf + sizeof(part_marker) - 1;
-
- errno = 0;
- model_id = strtol(model, NULL, 16);
- if ((errno != 0) || model_id < 0) {
- fclose(fp);
- return UV_EINVAL;
- }
-
- for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
- if (model_id == arm_chips[part_idx].id) {
- model = uv__strdup(arm_chips[part_idx].name);
- if (model == NULL) {
- fclose(fp);
- return UV_ENOMEM;
- }
- ci[model_idx++].model = model;
- break;
- }
- }
- }
-#else /* defined(__mips__) */
- static const char model_marker[] = "cpu model\t\t: ";
-#endif
- if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
- model = buf + sizeof(model_marker) - 1;
- model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
- if (model == NULL) {
- fclose(fp);
- return UV_ENOMEM;
- }
- ci[model_idx++].model = model;
- continue;
- }
- }
-#else /* !__arm__ && !__mips__ && !__aarch64__ */
- if (speed_idx < numcpus) {
- if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
- ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
- continue;
- }
- }
-#endif /* __arm__ || __mips__ || __aarch64__ */
- }
-
- fclose(fp);
-#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
-
- /* Now we want to make sure that all the models contain *something* because
- * it's not safe to leave them as null. Copy the last entry unless there
- * isn't one, in that case we simply put "unknown" into everything.
- */
- inferred_model = "unknown";
- if (model_idx > 0)
- inferred_model = ci[model_idx - 1].model;
-
- while (model_idx < numcpus) {
- model = uv__strndup(inferred_model, strlen(inferred_model));
- if (model == NULL)
- return UV_ENOMEM;
- ci[model_idx++].model = model;
- }
-
- return 0;
-}
-
-
-static int read_times(FILE* statfile_fp,
- unsigned int numcpus,
- uv_cpu_info_t* ci) {
- struct uv_cpu_times_s ts;
- unsigned int ticks;
- unsigned int multiplier;
- uint64_t user;
- uint64_t nice;
- uint64_t sys;
- uint64_t idle;
- uint64_t dummy;
- uint64_t irq;
- uint64_t num;
- uint64_t len;
- char buf[1024];
-
- ticks = (unsigned int)sysconf(_SC_CLK_TCK);
- assert(ticks != (unsigned int) -1);
- assert(ticks != 0);
- multiplier = ((uint64_t)1000L / ticks);
-
- rewind(statfile_fp);
-
- if (!fgets(buf, sizeof(buf), statfile_fp))
- abort();
-
- num = 0;
-
- while (fgets(buf, sizeof(buf), statfile_fp)) {
- if (num >= numcpus)
- break;
-
- if (strncmp(buf, "cpu", 3))
- break;
-
- /* skip "cpu<num> " marker */
- {
- unsigned int n;
- int r = sscanf(buf, "cpu%u ", &n);
- assert(r == 1);
- (void) r; /* silence build warning */
- for (len = sizeof("cpu0"); n /= 10; len++);
- }
-
- /* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
- * guest, guest_nice but we're only interested in the first four + irq.
- *
- * Don't use %*s to skip fields or %ll to read straight into the uint64_t
- * fields, they're not allowed in C89 mode.
- */
- if (6 != sscanf(buf + len,
- "%" PRIu64 " %" PRIu64 " %" PRIu64
- "%" PRIu64 " %" PRIu64 " %" PRIu64,
- &user,
- &nice,
- &sys,
- &idle,
- &dummy,
- &irq))
- abort();
-
- ts.user = user * multiplier;
- ts.nice = nice * multiplier;
- ts.sys = sys * multiplier;
- ts.idle = idle * multiplier;
- ts.irq = irq * multiplier;
- ci[num++].cpu_times = ts;
- }
- assert(num == numcpus);
-
- return 0;
-}
-
-
-static uint64_t read_cpufreq(unsigned int cpunum) {
- uint64_t val;
- char buf[1024];
- FILE* fp;
-
- snprintf(buf,
- sizeof(buf),
- "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
- cpunum);
-
- fp = uv__open_file(buf);
- if (fp == NULL)
- return 0;
-
- if (fscanf(fp, "%" PRIu64, &val) != 1)
- val = 0;
-
- fclose(fp);
-
- return val;
-}
-
-
-#ifdef HAVE_IFADDRS_H
-static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
- if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
- return 1;
- if (ent->ifa_addr == NULL)
- return 1;
- /*
- * On Linux getifaddrs returns information related to the raw underlying
- * devices. We're not interested in this information yet.
- */
- if (ent->ifa_addr->sa_family == PF_PACKET)
- return exclude_type;
- return !exclude_type;
-}
-#endif
-
-int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
-#ifndef HAVE_IFADDRS_H
- *count = 0;
- *addresses = NULL;
- return UV_ENOSYS;
-#else
- struct ifaddrs *addrs, *ent;
- uv_interface_address_t* address;
- int i;
- struct sockaddr_ll *sll;
-
- *count = 0;
- *addresses = NULL;
-
- if (getifaddrs(&addrs))
- return UV__ERR(errno);
-
- /* Count the number of interfaces */
- for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
- continue;
-
- (*count)++;
- }
-
- if (*count == 0) {
- freeifaddrs(addrs);
- return 0;
- }
-
- /* Make sure the memory is initiallized to zero using calloc() */
- *addresses = (uv_interface_address_t*)uv__calloc(*count, sizeof(**addresses));
- if (!(*addresses)) {
- freeifaddrs(addrs);
- return UV_ENOMEM;
- }
-
- address = *addresses;
-
- for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
- continue;
-
- address->name = uv__strdup(ent->ifa_name);
-
- if (ent->ifa_addr->sa_family == AF_INET6) {
- address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
- } else {
- address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
- }
-
- if (ent->ifa_netmask->sa_family == AF_INET6) {
- address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
- } else {
- address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
- }
-
- address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
-
- address++;
- }
-
- /* Fill in physical addresses for each interface */
- for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
- continue;
-
- address = *addresses;
-
- for (i = 0; i < (*count); i++) {
- size_t namelen = strlen(ent->ifa_name);
- /* Alias interface share the same physical address */
- if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
- (address->name[namelen] == 0 || address->name[namelen] == ':')) {
- sll = (struct sockaddr_ll*)ent->ifa_addr;
- memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
- }
- address++;
- }
- }
-
- freeifaddrs(addrs);
-
- return 0;
-#endif
-}
-
-
-void uv_free_interface_addresses(uv_interface_address_t* addresses,
- int count) {
- int i;
-
- for (i = 0; i < count; i++) {
- uv__free(addresses[i].name);
- }
-
- uv__free(addresses);
-}
-
-
-void uv__set_process_title(const char* title) {
-#if defined(PR_SET_NAME)
- prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
-#endif
-}
-
-
-static uint64_t uv__read_proc_meminfo(const char* what) {
- uint64_t rc;
- char* p;
- char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
-
- if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
- return 0;
-
- p = strstr(buf, what);
-
- if (p == NULL)
- return 0;
-
- p += strlen(what);
-
- rc = 0;
- sscanf(p, "%" PRIu64 " kB", &rc);
-
- return rc * 1024;
-}
-
-
-uint64_t uv_get_free_memory(void) {
- struct sysinfo info;
- uint64_t rc;
-
- rc = uv__read_proc_meminfo("MemAvailable:");
-
- if (rc != 0)
- return rc;
-
- if (0 == sysinfo(&info))
- return (uint64_t) info.freeram * info.mem_unit;
-
- return 0;
-}
-
-
-uint64_t uv_get_total_memory(void) {
- struct sysinfo info;
- uint64_t rc;
-
- rc = uv__read_proc_meminfo("MemTotal:");
-
- if (rc != 0)
- return rc;
-
- if (0 == sysinfo(&info))
- return (uint64_t) info.totalram * info.mem_unit;
-
- return 0;
-}
-
-
-static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
- char filename[256];
- char buf[32]; /* Large enough to hold an encoded uint64_t. */
- uint64_t rc;
-
- rc = 0;
- snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
- if (0 == uv__slurp(filename, buf, sizeof(buf)))
- sscanf(buf, "%" PRIu64, &rc);
-
- return rc;
-}
-
-
-uint64_t uv_get_constrained_memory(void) {
- /*
- * This might return 0 if there was a problem getting the memory limit from
- * cgroups. This is OK because a return value of 0 signifies that the memory
- * limit is unknown.
- */
- return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
-}
-
-
-void uv_loadavg(double avg[3]) {
- struct sysinfo info;
- char buf[128]; /* Large enough to hold all of /proc/loadavg. */
-
- if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
- if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
- return;
-
- if (sysinfo(&info) < 0)
- return;
-
- avg[0] = (double) info.loads[0] / 65536.0;
- avg[1] = (double) info.loads[1] / 65536.0;
- avg[2] = (double) info.loads[2] / 65536.0;
-}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-inotify.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-inotify.cpp
deleted file mode 100644
index f5366e9..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-inotify.cpp
+++ /dev/null
@@ -1,327 +0,0 @@
-/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "uv.h"
-#include "uv/tree.h"
-#include "internal.h"
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-#include <errno.h>
-
-#include <sys/inotify.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-struct watcher_list {
- RB_ENTRY(watcher_list) entry;
- QUEUE watchers;
- int iterating;
- char* path;
- int wd;
-};
-
-struct watcher_root {
- struct watcher_list* rbh_root;
-};
-#define CAST(p) ((struct watcher_root*)(p))
-
-
-static int compare_watchers(const struct watcher_list* a,
- const struct watcher_list* b) {
- if (a->wd < b->wd) return -1;
- if (a->wd > b->wd) return 1;
- return 0;
-}
-
-
-RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
-
-
-static void uv__inotify_read(uv_loop_t* loop,
- uv__io_t* w,
- unsigned int revents);
-
-static void maybe_free_watcher_list(struct watcher_list* w,
- uv_loop_t* loop);
-
-static int init_inotify(uv_loop_t* loop) {
- int fd;
-
- if (loop->inotify_fd != -1)
- return 0;
-
- fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
- if (fd < 0)
- return UV__ERR(errno);
-
- loop->inotify_fd = fd;
- uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
- uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
-
- return 0;
-}
-
-
-int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
- /* Open the inotify_fd, and re-arm all the inotify watchers. */
- int err;
- struct watcher_list* tmp_watcher_list_iter;
- struct watcher_list* watcher_list;
- struct watcher_list tmp_watcher_list;
- QUEUE queue;
- QUEUE* q;
- uv_fs_event_t* handle;
- char* tmp_path;
-
- if (old_watchers != NULL) {
- /* We must restore the old watcher list to be able to close items
- * out of it.
- */
- loop->inotify_watchers = old_watchers;
-
- QUEUE_INIT(&tmp_watcher_list.watchers);
- /* Note that the queue we use is shared with the start and stop()
- * functions, making QUEUE_FOREACH unsafe to use. So we use the
- * QUEUE_MOVE trick to safely iterate. Also don't free the watcher
- * list until we're done iterating. c.f. uv__inotify_read.
- */
- RB_FOREACH_SAFE(watcher_list, watcher_root,
- CAST(&old_watchers), tmp_watcher_list_iter) {
- watcher_list->iterating = 1;
- QUEUE_MOVE(&watcher_list->watchers, &queue);
- while (!QUEUE_EMPTY(&queue)) {
- q = QUEUE_HEAD(&queue);
- handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
- /* It's critical to keep a copy of path here, because it
- * will be set to NULL by stop() and then deallocated by
- * maybe_free_watcher_list
- */
- tmp_path = uv__strdup(handle->path);
- assert(tmp_path != NULL);
- QUEUE_REMOVE(q);
- QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
- uv_fs_event_stop(handle);
-
- QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
- handle->path = tmp_path;
- }
- watcher_list->iterating = 0;
- maybe_free_watcher_list(watcher_list, loop);
- }
-
- QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
- while (!QUEUE_EMPTY(&queue)) {
- q = QUEUE_HEAD(&queue);
- QUEUE_REMOVE(q);
- handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
- tmp_path = handle->path;
- handle->path = NULL;
- err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
- uv__free(tmp_path);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-
-static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
- struct watcher_list w;
- w.wd = wd;
- return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
-}
-
-static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
- /* if the watcher_list->watchers is being iterated over, we can't free it. */
- if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
- /* No watchers left for this path. Clean up. */
- RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
- inotify_rm_watch(loop->inotify_fd, w->wd);
- uv__free(w);
- }
-}
-
-static void uv__inotify_read(uv_loop_t* loop,
- uv__io_t* dummy,
- unsigned int events) {
- const struct inotify_event* e;
- struct watcher_list* w;
- uv_fs_event_t* h;
- QUEUE queue;
- QUEUE* q;
- const char* path;
- ssize_t size;
- const char *p;
- /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
- char buf[4096];
-
- for (;;) {
- do
- size = read(loop->inotify_fd, buf, sizeof(buf));
- while (size == -1 && errno == EINTR);
-
- if (size == -1) {
- assert(errno == EAGAIN || errno == EWOULDBLOCK);
- break;
- }
-
- assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
-
- /* Now we have one or more inotify_event structs. */
- for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
- e = (const struct inotify_event*) p;
-
- events = 0;
- if (e->mask & (IN_ATTRIB|IN_MODIFY))
- events |= UV_CHANGE;
- if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
- events |= UV_RENAME;
-
- w = find_watcher(loop, e->wd);
- if (w == NULL)
- continue; /* Stale event, no watchers left. */
-
- /* inotify does not return the filename when monitoring a single file
- * for modifications. Repurpose the filename for API compatibility.
- * I'm not convinced this is a good thing, maybe it should go.
- */
- path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
-
- /* We're about to iterate over the queue and call user's callbacks.
- * What can go wrong?
- * A callback could call uv_fs_event_stop()
- * and the queue can change under our feet.
- * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
- * And we don't free the watcher_list until we're done iterating.
- *
- * First,
- * tell uv_fs_event_stop() (that could be called from a user's callback)
- * not to free watcher_list.
- */
- w->iterating = 1;
- QUEUE_MOVE(&w->watchers, &queue);
- while (!QUEUE_EMPTY(&queue)) {
- q = QUEUE_HEAD(&queue);
- h = QUEUE_DATA(q, uv_fs_event_t, watchers);
-
- QUEUE_REMOVE(q);
- QUEUE_INSERT_TAIL(&w->watchers, q);
-
- h->cb(h, path, events, 0);
- }
- /* done iterating, time to (maybe) free empty watcher_list */
- w->iterating = 0;
- maybe_free_watcher_list(w, loop);
- }
- }
-}
-
-
-int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
- uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
- return 0;
-}
-
-
-int uv_fs_event_start(uv_fs_event_t* handle,
- uv_fs_event_cb cb,
- const char* path,
- unsigned int flags) {
- struct watcher_list* w;
- size_t len;
- int events;
- int err;
- int wd;
-
- if (uv__is_active(handle))
- return UV_EINVAL;
-
- err = init_inotify(handle->loop);
- if (err)
- return err;
-
- events = IN_ATTRIB
- | IN_CREATE
- | IN_MODIFY
- | IN_DELETE
- | IN_DELETE_SELF
- | IN_MOVE_SELF
- | IN_MOVED_FROM
- | IN_MOVED_TO;
-
- wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
- if (wd == -1)
- return UV__ERR(errno);
-
- w = find_watcher(handle->loop, wd);
- if (w)
- goto no_insert;
-
- len = strlen(path) + 1;
- w = (watcher_list*)uv__malloc(sizeof(*w) + len);
- if (w == NULL)
- return UV_ENOMEM;
-
- w->wd = wd;
- w->path = (char*)memcpy(w + 1, path, len);
- QUEUE_INIT(&w->watchers);
- w->iterating = 0;
- RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
-
-no_insert:
- uv__handle_start(handle);
- QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
- handle->path = w->path;
- handle->cb = cb;
- handle->wd = wd;
-
- return 0;
-}
-
-
-int uv_fs_event_stop(uv_fs_event_t* handle) {
- struct watcher_list* w;
-
- if (!uv__is_active(handle))
- return 0;
-
- w = find_watcher(handle->loop, handle->wd);
- assert(w != NULL);
-
- handle->wd = -1;
- handle->path = NULL;
- uv__handle_stop(handle);
- QUEUE_REMOVE(&handle->watchers);
-
- maybe_free_watcher_list(w, handle->loop);
-
- return 0;
-}
-
-
-void uv__fs_event_close(uv_fs_event_t* handle) {
- uv_fs_event_stop(handle);
-}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-syscalls.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-syscalls.cpp
deleted file mode 100644
index 5071cd5..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-syscalls.cpp
+++ /dev/null
@@ -1,264 +0,0 @@
-/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "linux-syscalls.h"
-#include <unistd.h>
-#include <signal.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <errno.h>
-
-#if defined(__arm__)
-# if defined(__thumb__) || defined(__ARM_EABI__)
-# define UV_SYSCALL_BASE 0
-# else
-# define UV_SYSCALL_BASE 0x900000
-# endif
-#endif /* __arm__ */
-
-#ifndef __NR_recvmmsg
-# if defined(__x86_64__)
-# define __NR_recvmmsg 299
-# elif defined(__arm__)
-# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
-# endif
-#endif /* __NR_recvmsg */
-
-#ifndef __NR_sendmmsg
-# if defined(__x86_64__)
-# define __NR_sendmmsg 307
-# elif defined(__arm__)
-# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
-# endif
-#endif /* __NR_sendmmsg */
-
-#ifndef __NR_utimensat
-# if defined(__x86_64__)
-# define __NR_utimensat 280
-# elif defined(__i386__)
-# define __NR_utimensat 320
-# elif defined(__arm__)
-# define __NR_utimensat (UV_SYSCALL_BASE + 348)
-# endif
-#endif /* __NR_utimensat */
-
-#ifndef __NR_preadv
-# if defined(__x86_64__)
-# define __NR_preadv 295
-# elif defined(__i386__)
-# define __NR_preadv 333
-# elif defined(__arm__)
-# define __NR_preadv (UV_SYSCALL_BASE + 361)
-# endif
-#endif /* __NR_preadv */
-
-#ifndef __NR_pwritev
-# if defined(__x86_64__)
-# define __NR_pwritev 296
-# elif defined(__i386__)
-# define __NR_pwritev 334
-# elif defined(__arm__)
-# define __NR_pwritev (UV_SYSCALL_BASE + 362)
-# endif
-#endif /* __NR_pwritev */
-
-#ifndef __NR_dup3
-# if defined(__x86_64__)
-# define __NR_dup3 292
-# elif defined(__i386__)
-# define __NR_dup3 330
-# elif defined(__arm__)
-# define __NR_dup3 (UV_SYSCALL_BASE + 358)
-# endif
-#endif /* __NR_pwritev */
-
-#ifndef __NR_copy_file_range
-# if defined(__x86_64__)
-# define __NR_copy_file_range 326
-# elif defined(__i386__)
-# define __NR_copy_file_range 377
-# elif defined(__s390__)
-# define __NR_copy_file_range 375
-# elif defined(__arm__)
-# define __NR_copy_file_range (UV_SYSCALL_BASE + 391)
-# elif defined(__aarch64__)
-# define __NR_copy_file_range 285
-# elif defined(__powerpc__)
-# define __NR_copy_file_range 379
-# elif defined(__arc__)
-# define __NR_copy_file_range 285
-# endif
-#endif /* __NR_copy_file_range */
-
-#ifndef __NR_statx
-# if defined(__x86_64__)
-# define __NR_statx 332
-# elif defined(__i386__)
-# define __NR_statx 383
-# elif defined(__aarch64__)
-# define __NR_statx 397
-# elif defined(__arm__)
-# define __NR_statx (UV_SYSCALL_BASE + 397)
-# elif defined(__ppc__)
-# define __NR_statx 383
-# elif defined(__s390__)
-# define __NR_statx 379
-# endif
-#endif /* __NR_statx */
-
-#ifndef __NR_getrandom
-# if defined(__x86_64__)
-# define __NR_getrandom 318
-# elif defined(__i386__)
-# define __NR_getrandom 355
-# elif defined(__aarch64__)
-# define __NR_getrandom 384
-# elif defined(__arm__)
-# define __NR_getrandom (UV_SYSCALL_BASE + 384)
-# elif defined(__ppc__)
-# define __NR_getrandom 359
-# elif defined(__s390__)
-# define __NR_getrandom 349
-# endif
-#endif /* __NR_getrandom */
-
-struct uv__mmsghdr;
-
-int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
-#if defined(__i386__)
- unsigned long args[4];
- int rc;
-
- args[0] = (unsigned long) fd;
- args[1] = (unsigned long) mmsg;
- args[2] = (unsigned long) vlen;
- args[3] = /* flags */ 0;
-
- /* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */
- rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args);
- if (rc == -1)
- if (errno == EINVAL)
- errno = ENOSYS;
-
- return rc;
-#elif defined(__NR_sendmmsg)
- return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0);
-#else
- return errno = ENOSYS, -1;
-#endif
-}
-
-
-int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
-#if defined(__i386__)
- unsigned long args[5];
- int rc;
-
- args[0] = (unsigned long) fd;
- args[1] = (unsigned long) mmsg;
- args[2] = (unsigned long) vlen;
- args[3] = /* flags */ 0;
- args[4] = /* timeout */ 0;
-
- /* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */
- rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args);
- if (rc == -1)
- if (errno == EINVAL)
- errno = ENOSYS;
-
- return rc;
-#elif defined(__NR_recvmmsg)
- return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0);
-#else
- return errno = ENOSYS, -1;
-#endif
-}
-
-
-ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
-#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
- return errno = ENOSYS, -1;
-#else
- return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
-#endif
-}
-
-
-ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
-#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
- return errno = ENOSYS, -1;
-#else
- return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
-#endif
-}
-
-
-int uv__dup3(int oldfd, int newfd, int flags) {
-#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
- return errno = ENOSYS, -1;
-#else
- return syscall(__NR_dup3, oldfd, newfd, flags);
-#endif
-}
-
-
-ssize_t
-uv__fs_copy_file_range(int fd_in,
- off_t* off_in,
- int fd_out,
- off_t* off_out,
- size_t len,
- unsigned int flags)
-{
-#ifdef __NR_copy_file_range
- return syscall(__NR_copy_file_range,
- fd_in,
- off_in,
- fd_out,
- off_out,
- len,
- flags);
-#else
- return errno = ENOSYS, -1;
-#endif
-}
-
-
-int uv__statx(int dirfd,
- const char* path,
- int flags,
- unsigned int mask,
- struct uv__statx* statxbuf) {
-#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
- return errno = ENOSYS, -1;
-#else
- return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
-#endif
-}
-
-
-ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
-#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
- return errno = ENOSYS, -1;
-#else
- return syscall(__NR_getrandom, buf, buflen, flags);
-#endif
-}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-syscalls.h b/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-syscalls.h
deleted file mode 100644
index b4d9082..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux-syscalls.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef UV_LINUX_SYSCALL_H_
-#define UV_LINUX_SYSCALL_H_
-
-#include <stdint.h>
-#include <signal.h>
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/socket.h>
-
-struct uv__statx_timestamp {
- int64_t tv_sec;
- uint32_t tv_nsec;
- int32_t unused0;
-};
-
-struct uv__statx {
- uint32_t stx_mask;
- uint32_t stx_blksize;
- uint64_t stx_attributes;
- uint32_t stx_nlink;
- uint32_t stx_uid;
- uint32_t stx_gid;
- uint16_t stx_mode;
- uint16_t unused0;
- uint64_t stx_ino;
- uint64_t stx_size;
- uint64_t stx_blocks;
- uint64_t stx_attributes_mask;
- struct uv__statx_timestamp stx_atime;
- struct uv__statx_timestamp stx_btime;
- struct uv__statx_timestamp stx_ctime;
- struct uv__statx_timestamp stx_mtime;
- uint32_t stx_rdev_major;
- uint32_t stx_rdev_minor;
- uint32_t stx_dev_major;
- uint32_t stx_dev_minor;
- uint64_t unused1[14];
-};
-
-ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
-ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
-int uv__dup3(int oldfd, int newfd, int flags);
-ssize_t
-uv__fs_copy_file_range(int fd_in,
- off_t* off_in,
- int fd_out,
- off_t* off_out,
- size_t len,
- unsigned int flags);
-int uv__statx(int dirfd,
- const char* path,
- int flags,
- unsigned int mask,
- struct uv__statx* statxbuf);
-ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
-
-#endif /* UV_LINUX_SYSCALL_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/linux.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/linux.cpp
new file mode 100644
index 0000000..d365b62
--- /dev/null
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/linux.cpp
@@ -0,0 +1,2526 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
+ * EPOLL* counterparts. We use the POLL* variants in this file because that
+ * is what libuv uses elsewhere.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <inttypes.h>
+#include <stddef.h> /* offsetof */
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <fcntl.h>
+#include <net/if.h>
+#include <sys/epoll.h>
+#include <sys/inotify.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/sysinfo.h>
+#include <sys/sysmacros.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <time.h>
+#include <unistd.h>
+
+#ifndef __NR_io_uring_setup
+# define __NR_io_uring_setup 425
+#endif
+
+#ifndef __NR_io_uring_enter
+# define __NR_io_uring_enter 426
+#endif
+
+#ifndef __NR_io_uring_register
+# define __NR_io_uring_register 427
+#endif
+
+#ifndef __NR_copy_file_range
+# if defined(__x86_64__)
+# define __NR_copy_file_range 326
+# elif defined(__i386__)
+# define __NR_copy_file_range 377
+# elif defined(__s390__)
+# define __NR_copy_file_range 375
+# elif defined(__arm__)
+# define __NR_copy_file_range 391
+# elif defined(__aarch64__)
+# define __NR_copy_file_range 285
+# elif defined(__powerpc__)
+# define __NR_copy_file_range 379
+# elif defined(__arc__)
+# define __NR_copy_file_range 285
+# endif
+#endif /* __NR_copy_file_range */
+
+#ifndef __NR_statx
+# if defined(__x86_64__)
+# define __NR_statx 332
+# elif defined(__i386__)
+# define __NR_statx 383
+# elif defined(__aarch64__)
+# define __NR_statx 397
+# elif defined(__arm__)
+# define __NR_statx 397
+# elif defined(__ppc__)
+# define __NR_statx 383
+# elif defined(__s390__)
+# define __NR_statx 379
+# endif
+#endif /* __NR_statx */
+
+#ifndef __NR_getrandom
+# if defined(__x86_64__)
+# define __NR_getrandom 318
+# elif defined(__i386__)
+# define __NR_getrandom 355
+# elif defined(__aarch64__)
+# define __NR_getrandom 384
+# elif defined(__arm__)
+# define __NR_getrandom 384
+# elif defined(__ppc__)
+# define __NR_getrandom 359
+# elif defined(__s390__)
+# define __NR_getrandom 349
+# endif
+#endif /* __NR_getrandom */
+
+#define HAVE_IFADDRS_H 1
+
+# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
+# undef HAVE_IFADDRS_H
+#endif
+
+#ifdef __UCLIBC__
+# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
+# undef HAVE_IFADDRS_H
+# endif
+#endif
+
+#ifdef HAVE_IFADDRS_H
+# include <ifaddrs.h>
+# include <sys/socket.h>
+# include <net/ethernet.h>
+# include <netpacket/packet.h>
+#endif /* HAVE_IFADDRS_H */
+
+#include <atomic>
+
+enum {
+ UV__IORING_SETUP_SQPOLL = 2u,
+};
+
+enum {
+ UV__IORING_FEAT_SINGLE_MMAP = 1u,
+ UV__IORING_FEAT_NODROP = 2u,
+ UV__IORING_FEAT_RSRC_TAGS = 1024u, /* linux v5.13 */
+};
+
+enum {
+ UV__IORING_OP_READV = 1,
+ UV__IORING_OP_WRITEV = 2,
+ UV__IORING_OP_FSYNC = 3,
+ UV__IORING_OP_OPENAT = 18,
+ UV__IORING_OP_CLOSE = 19,
+ UV__IORING_OP_STATX = 21,
+ UV__IORING_OP_EPOLL_CTL = 29,
+ UV__IORING_OP_RENAMEAT = 35,
+ UV__IORING_OP_UNLINKAT = 36,
+ UV__IORING_OP_MKDIRAT = 37,
+ UV__IORING_OP_SYMLINKAT = 38,
+ UV__IORING_OP_LINKAT = 39,
+};
+
+enum {
+ UV__IORING_ENTER_GETEVENTS = 1u,
+ UV__IORING_ENTER_SQ_WAKEUP = 2u,
+};
+
+enum {
+ UV__IORING_SQ_NEED_WAKEUP = 1u,
+ UV__IORING_SQ_CQ_OVERFLOW = 2u,
+};
+
+enum {
+ UV__MKDIRAT_SYMLINKAT_LINKAT = 1u,
+};
+
+struct uv__io_cqring_offsets {
+ uint32_t head;
+ uint32_t tail;
+ uint32_t ring_mask;
+ uint32_t ring_entries;
+ uint32_t overflow;
+ uint32_t cqes;
+ uint64_t reserved0;
+ uint64_t reserved1;
+};
+
+STATIC_ASSERT(40 == sizeof(struct uv__io_cqring_offsets));
+
+struct uv__io_sqring_offsets {
+ uint32_t head;
+ uint32_t tail;
+ uint32_t ring_mask;
+ uint32_t ring_entries;
+ uint32_t flags;
+ uint32_t dropped;
+ uint32_t array;
+ uint32_t reserved0;
+ uint64_t reserved1;
+};
+
+STATIC_ASSERT(40 == sizeof(struct uv__io_sqring_offsets));
+
+struct uv__io_uring_cqe {
+ uint64_t user_data;
+ int32_t res;
+ uint32_t flags;
+};
+
+STATIC_ASSERT(16 == sizeof(struct uv__io_uring_cqe));
+
+struct uv__io_uring_sqe {
+ uint8_t opcode;
+ uint8_t flags;
+ uint16_t ioprio;
+ int32_t fd;
+ union {
+ uint64_t off;
+ uint64_t addr2;
+ };
+ union {
+ uint64_t addr;
+ };
+ uint32_t len;
+ union {
+ uint32_t rw_flags;
+ uint32_t fsync_flags;
+ uint32_t open_flags;
+ uint32_t statx_flags;
+ };
+ uint64_t user_data;
+ union {
+ uint16_t buf_index;
+ uint64_t pad[3];
+ };
+};
+
+STATIC_ASSERT(64 == sizeof(struct uv__io_uring_sqe));
+STATIC_ASSERT(0 == offsetof(struct uv__io_uring_sqe, opcode));
+STATIC_ASSERT(1 == offsetof(struct uv__io_uring_sqe, flags));
+STATIC_ASSERT(2 == offsetof(struct uv__io_uring_sqe, ioprio));
+STATIC_ASSERT(4 == offsetof(struct uv__io_uring_sqe, fd));
+STATIC_ASSERT(8 == offsetof(struct uv__io_uring_sqe, off));
+STATIC_ASSERT(16 == offsetof(struct uv__io_uring_sqe, addr));
+STATIC_ASSERT(24 == offsetof(struct uv__io_uring_sqe, len));
+STATIC_ASSERT(28 == offsetof(struct uv__io_uring_sqe, rw_flags));
+STATIC_ASSERT(32 == offsetof(struct uv__io_uring_sqe, user_data));
+STATIC_ASSERT(40 == offsetof(struct uv__io_uring_sqe, buf_index));
+
+struct uv__io_uring_params {
+ uint32_t sq_entries;
+ uint32_t cq_entries;
+ uint32_t flags;
+ uint32_t sq_thread_cpu;
+ uint32_t sq_thread_idle;
+ uint32_t features;
+ uint32_t reserved[4];
+ struct uv__io_sqring_offsets sq_off; /* 40 bytes */
+ struct uv__io_cqring_offsets cq_off; /* 40 bytes */
+};
+
+STATIC_ASSERT(40 + 40 + 40 == sizeof(struct uv__io_uring_params));
+STATIC_ASSERT(40 == offsetof(struct uv__io_uring_params, sq_off));
+STATIC_ASSERT(80 == offsetof(struct uv__io_uring_params, cq_off));
+
+STATIC_ASSERT(EPOLL_CTL_ADD < 4);
+STATIC_ASSERT(EPOLL_CTL_DEL < 4);
+STATIC_ASSERT(EPOLL_CTL_MOD < 4);
+
+struct watcher_list {
+ RB_ENTRY(watcher_list) entry;
+ struct uv__queue watchers;
+ int iterating;
+ char* path;
+ int wd;
+};
+
+struct watcher_root {
+ struct watcher_list* rbh_root;
+};
+
+static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root);
+static void uv__inotify_read(uv_loop_t* loop,
+ uv__io_t* w,
+ unsigned int revents);
+static int compare_watchers(const struct watcher_list* a,
+ const struct watcher_list* b);
+static void maybe_free_watcher_list(struct watcher_list* w,
+ uv_loop_t* loop);
+
+static void uv__epoll_ctl_flush(int epollfd,
+ struct uv__iou* ctl,
+ struct epoll_event (*events)[256]);
+
+static void uv__epoll_ctl_prep(int epollfd,
+ struct uv__iou* ctl,
+ struct epoll_event (*events)[256],
+ int op,
+ int fd,
+ struct epoll_event* e);
+
+RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
+
+
+static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) {
+ /* This cast works because watcher_root is a struct with a pointer as its
+ * sole member. Such type punning is unsafe in the presence of strict
+ * pointer aliasing (and is just plain nasty) but that is why libuv
+ * is compiled with -fno-strict-aliasing.
+ */
+ return (struct watcher_root*) &loop->inotify_watchers;
+}
+
+
+unsigned uv__kernel_version(void) {
+ static std::atomic<unsigned int> cached_version;
+ struct utsname u;
+ unsigned version;
+ unsigned major;
+ unsigned minor;
+ unsigned patch;
+
+ version = std::atomic_load_explicit(&cached_version, std::memory_order_relaxed);
+ if (version != 0)
+ return version;
+
+ if (-1 == uname(&u))
+ return 0;
+
+ if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
+ return 0;
+
+ version = major * 65536 + minor * 256 + patch;
+ std::atomic_store_explicit(&cached_version, version, std::memory_order_relaxed);
+
+ return version;
+}
+
+
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ off_t* off_in,
+ int fd_out,
+ off_t* off_out,
+ size_t len,
+ unsigned int flags)
+{
+#ifdef __NR_copy_file_range
+ return syscall(__NR_copy_file_range,
+ fd_in,
+ off_in,
+ fd_out,
+ off_out,
+ len,
+ flags);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf) {
+#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
+ return errno = ENOSYS, -1;
+#else
+ int rc;
+
+ rc = syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
+ if (rc >= 0)
+ uv__msan_unpoison(statxbuf, sizeof(*statxbuf));
+
+ return rc;
+#endif
+}
+
+
+ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
+#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
+ return errno = ENOSYS, -1;
+#else
+ ssize_t rc;
+
+ rc = syscall(__NR_getrandom, buf, buflen, flags);
+ if (rc >= 0)
+ uv__msan_unpoison(buf, buflen);
+
+ return rc;
+#endif
+}
+
+
+int uv__io_uring_setup(int entries, struct uv__io_uring_params* params) {
+ return syscall(__NR_io_uring_setup, entries, params);
+}
+
+
+int uv__io_uring_enter(int fd,
+ unsigned to_submit,
+ unsigned min_complete,
+ unsigned flags) {
+ /* io_uring_enter used to take a sigset_t but it's unused
+ * in newer kernels unless IORING_ENTER_EXT_ARG is set,
+ * in which case it takes a struct io_uring_getevents_arg.
+ */
+ return syscall(__NR_io_uring_enter,
+ fd,
+ to_submit,
+ min_complete,
+ flags,
+ NULL,
+ 0L);
+}
+
+
+int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) {
+ return syscall(__NR_io_uring_register, fd, opcode, arg, nargs);
+}
+
+
+static int uv__use_io_uring(void) {
+#if defined(__ANDROID_API__)
+ return 0; /* Possibly available but blocked by seccomp. */
+#else
+ /* Ternary: unknown=0, yes=1, no=-1 */
+ static std::atomic<int> use_io_uring;
+ char* val;
+ int use;
+
+ use = std::atomic_load_explicit(&use_io_uring, std::memory_order_relaxed);
+
+ if (use == 0) {
+ val = getenv("UV_USE_IO_URING");
+ use = val == NULL || atoi(val) ? 1 : -1;
+ std::atomic_store_explicit(&use_io_uring, use, std::memory_order_relaxed);
+ }
+
+ return use > 0;
+#endif
+}
+
+
+static void uv__iou_init(int epollfd,
+ struct uv__iou* iou,
+ uint32_t entries,
+ uint32_t flags) {
+ struct uv__io_uring_params params;
+ struct epoll_event e;
+ size_t cqlen;
+ size_t sqlen;
+ size_t maxlen;
+ size_t sqelen;
+ uint32_t i;
+ char* sq;
+ char* sqe;
+ int ringfd;
+
+ sq = (char*)MAP_FAILED;
+ sqe = (char*)MAP_FAILED;
+
+ if (!uv__use_io_uring())
+ return;
+
+ /* SQPOLL required CAP_SYS_NICE until linux v5.12 relaxed that requirement.
+ * Mostly academic because we check for a v5.13 kernel afterwards anyway.
+ */
+ memset(¶ms, 0, sizeof(params));
+ params.flags = flags;
+
+ if (flags & UV__IORING_SETUP_SQPOLL)
+ params.sq_thread_idle = 10; /* milliseconds */
+
+ /* Kernel returns a file descriptor with O_CLOEXEC flag set. */
+ ringfd = uv__io_uring_setup(entries, ¶ms);
+ if (ringfd == -1)
+ return;
+
+ /* IORING_FEAT_RSRC_TAGS is used to detect linux v5.13 but what we're
+ * actually detecting is whether IORING_OP_STATX works with SQPOLL.
+ */
+ if (!(params.features & UV__IORING_FEAT_RSRC_TAGS))
+ goto fail;
+
+ /* Implied by IORING_FEAT_RSRC_TAGS but checked explicitly anyway. */
+ if (!(params.features & UV__IORING_FEAT_SINGLE_MMAP))
+ goto fail;
+
+ /* Implied by IORING_FEAT_RSRC_TAGS but checked explicitly anyway. */
+ if (!(params.features & UV__IORING_FEAT_NODROP))
+ goto fail;
+
+ sqlen = params.sq_off.array + params.sq_entries * sizeof(uint32_t);
+ cqlen =
+ params.cq_off.cqes + params.cq_entries * sizeof(struct uv__io_uring_cqe);
+ maxlen = sqlen < cqlen ? cqlen : sqlen;
+ sqelen = params.sq_entries * sizeof(struct uv__io_uring_sqe);
+
+ sq = (char*)mmap(0,
+ maxlen,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ ringfd,
+ 0); /* IORING_OFF_SQ_RING */
+
+ sqe = (char*)mmap(0,
+ sqelen,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ ringfd,
+ 0x10000000ull); /* IORING_OFF_SQES */
+
+ if (sq == MAP_FAILED || sqe == MAP_FAILED)
+ goto fail;
+
+ if (flags & UV__IORING_SETUP_SQPOLL) {
+ /* Only interested in completion events. To get notified when
+ * the kernel pulls items from the submission ring, add POLLOUT.
+ */
+ memset(&e, 0, sizeof(e));
+ e.events = POLLIN;
+ e.data.fd = ringfd;
+
+ if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ringfd, &e))
+ goto fail;
+ }
+
+ iou->sqhead = (uint32_t*) (sq + params.sq_off.head);
+ iou->sqtail = (uint32_t*) (sq + params.sq_off.tail);
+ iou->sqmask = *(uint32_t*) (sq + params.sq_off.ring_mask);
+ iou->sqarray = (uint32_t*) (sq + params.sq_off.array);
+ iou->sqflags = (uint32_t*) (sq + params.sq_off.flags);
+ iou->cqhead = (uint32_t*) (sq + params.cq_off.head);
+ iou->cqtail = (uint32_t*) (sq + params.cq_off.tail);
+ iou->cqmask = *(uint32_t*) (sq + params.cq_off.ring_mask);
+ iou->sq = sq;
+ iou->cqe = sq + params.cq_off.cqes;
+ iou->sqe = sqe;
+ iou->sqlen = sqlen;
+ iou->cqlen = cqlen;
+ iou->maxlen = maxlen;
+ iou->sqelen = sqelen;
+ iou->ringfd = ringfd;
+ iou->in_flight = 0;
+ iou->flags = 0;
+
+ if (uv__kernel_version() >= /* 5.15.0 */ 0x050F00)
+ iou->flags |= UV__MKDIRAT_SYMLINKAT_LINKAT;
+
+ for (i = 0; i <= iou->sqmask; i++)
+ iou->sqarray[i] = i; /* Slot -> sqe identity mapping. */
+
+ return;
+
+fail:
+ if (sq != MAP_FAILED)
+ munmap(sq, maxlen);
+
+ if (sqe != MAP_FAILED)
+ munmap(sqe, sqelen);
+
+ uv__close(ringfd);
+}
+
+
+static void uv__iou_delete(struct uv__iou* iou) {
+ if (iou->ringfd != -1) {
+ munmap(iou->sq, iou->maxlen);
+ munmap(iou->sqe, iou->sqelen);
+ uv__close(iou->ringfd);
+ iou->ringfd = -1;
+ }
+}
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+
+ lfields = uv__get_internal_fields(loop);
+ lfields->ctl.ringfd = -1;
+ lfields->iou.ringfd = -1;
+
+ loop->inotify_watchers = NULL;
+ loop->inotify_fd = -1;
+ loop->backend_fd = epoll_create1(O_CLOEXEC);
+
+ if (loop->backend_fd == -1)
+ return UV__ERR(errno);
+
+ uv__iou_init(loop->backend_fd, &lfields->iou, 64, UV__IORING_SETUP_SQPOLL);
+ uv__iou_init(loop->backend_fd, &lfields->ctl, 256, 0);
+
+ return 0;
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+ int err;
+ struct watcher_list* root;
+
+ root = uv__inotify_watchers(loop)->rbh_root;
+
+ uv__close(loop->backend_fd);
+ loop->backend_fd = -1;
+
+ /* TODO(bnoordhuis) Loses items from the submission and completion rings. */
+ uv__platform_loop_delete(loop);
+
+ err = uv__platform_loop_init(loop);
+ if (err)
+ return err;
+
+ return uv__inotify_fork(loop, root);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+
+ lfields = uv__get_internal_fields(loop);
+ uv__iou_delete(&lfields->ctl);
+ uv__iou_delete(&lfields->iou);
+
+ if (loop->inotify_fd != -1) {
+ uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
+ uv__close(loop->inotify_fd);
+ loop->inotify_fd = -1;
+ }
+}
+
+
+struct uv__invalidate {
+ struct epoll_event (*prep)[256];
+ struct epoll_event* events;
+ int nfds;
+};
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ uv__loop_internal_fields_t* lfields;
+ struct uv__invalidate* inv;
+ struct epoll_event dummy;
+ int i;
+
+ lfields = uv__get_internal_fields(loop);
+ inv = (uv__invalidate*)lfields->inv;
+
+ /* Invalidate events with same file descriptor */
+ if (inv != NULL)
+ for (i = 0; i < inv->nfds; i++)
+ if (inv->events[i].data.fd == fd)
+ inv->events[i].data.fd = -1;
+
+ /* Remove the file descriptor from the epoll.
+ * This avoids a problem where the same file description remains open
+ * in another process, causing repeated junk epoll events.
+ *
+ * We pass in a dummy epoll_event, to work around a bug in old kernels.
+ *
+ * Work around a bug in kernels 3.10 to 3.19 where passing a struct that
+ * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+
+ if (inv == NULL) {
+ epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
+ } else {
+ uv__epoll_ctl_prep(loop->backend_fd,
+ &lfields->ctl,
+ inv->prep,
+ EPOLL_CTL_DEL,
+ fd,
+ &dummy);
+ }
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event e;
+ int rc;
+
+ memset(&e, 0, sizeof(e));
+ e.events = POLLIN;
+ e.data.fd = -1;
+
+ rc = 0;
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
+ if (errno != EEXIST)
+ rc = UV__ERR(errno);
+
+ if (rc == 0)
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
+ abort();
+
+ return rc;
+}
+
+
+/* Caller must initialize SQE and call uv__iou_submit(). */
+static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou,
+ uv_loop_t* loop,
+ uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ uint32_t head;
+ uint32_t tail;
+ uint32_t mask;
+ uint32_t slot;
+
+ if (iou->ringfd == -1)
+ return NULL;
+
+ head = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqhead,
+ std::memory_order_acquire);
+ tail = *iou->sqtail;
+ mask = iou->sqmask;
+
+ if ((head & mask) == ((tail + 1) & mask))
+ return NULL; /* No room in ring buffer. TODO(bnoordhuis) maybe flush it? */
+
+ slot = tail & mask;
+ sqe = (uv__io_uring_sqe*)iou->sqe;
+ sqe = &sqe[slot];
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->user_data = (uintptr_t) req;
+
+ /* Pacify uv_cancel(). */
+ req->work_req.loop = loop;
+ req->work_req.work = NULL;
+ req->work_req.done = NULL;
+ uv__queue_init(&req->work_req.wq);
+
+ uv__req_register(loop, req);
+ iou->in_flight++;
+
+ return sqe;
+}
+
+
+static void uv__iou_submit(struct uv__iou* iou) {
+ uint32_t flags;
+
+ std::atomic_store_explicit((std::atomic<uint32_t>*) iou->sqtail,
+ *iou->sqtail + 1,
+ std::memory_order_release);
+
+ flags = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqflags,
+ std::memory_order_acquire);
+
+ if (flags & UV__IORING_SQ_NEED_WAKEUP)
+ if (uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_SQ_WAKEUP))
+ if (errno != EOWNERDEAD) /* Kernel bug. Harmless, ignore. */
+ perror("libuv: io_uring_enter(wakeup)"); /* Can't happen. */
+}
+
+
+int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ /* Work around a poorly understood bug in older kernels where closing a file
+ * descriptor pointing to /foo/bar results in ETXTBSY errors when trying to
+ * execve("/foo/bar") later on. The bug seems to have been fixed somewhere
+ * between 5.15.85 and 5.15.90. I couldn't pinpoint the responsible commit
+ * but good candidates are the several data race fixes. Interestingly, it
+ * seems to manifest only when running under Docker so the possibility of
+ * a Docker bug can't be completely ruled out either. Yay, computers.
+ */
+ if (uv__kernel_version() < /* 5.15.90 */ 0x050F5A)
+ return 0;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->fd = req->file;
+ sqe->opcode = UV__IORING_OP_CLOSE;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
+ uv_fs_t* req,
+ uint32_t fsync_flags) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ /* Little known fact: setting seq->off and seq->len turns
+ * it into an asynchronous sync_file_range() operation.
+ */
+ sqe->fd = req->file;
+ sqe->fsync_flags = fsync_flags;
+ sqe->opcode = UV__IORING_OP_FSYNC;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
+ return 0;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->addr = (uintptr_t) req->path;
+ sqe->fd = AT_FDCWD;
+ sqe->addr2 = (uintptr_t) req->new_path;
+ sqe->len = AT_FDCWD;
+ sqe->opcode = UV__IORING_OP_LINKAT;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
+ return 0;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->addr = (uintptr_t) req->path;
+ sqe->fd = AT_FDCWD;
+ sqe->len = req->mode;
+ sqe->opcode = UV__IORING_OP_MKDIRAT;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->addr = (uintptr_t) req->path;
+ sqe->fd = AT_FDCWD;
+ sqe->len = req->mode;
+ sqe->opcode = UV__IORING_OP_OPENAT;
+ sqe->open_flags = req->flags | O_CLOEXEC;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->addr = (uintptr_t) req->path;
+ sqe->fd = AT_FDCWD;
+ sqe->addr2 = (uintptr_t) req->new_path;
+ sqe->len = AT_FDCWD;
+ sqe->opcode = UV__IORING_OP_RENAMEAT;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
+ return 0;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->addr = (uintptr_t) req->path;
+ sqe->fd = AT_FDCWD;
+ sqe->addr2 = (uintptr_t) req->new_path;
+ sqe->opcode = UV__IORING_OP_SYMLINKAT;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->addr = (uintptr_t) req->path;
+ sqe->fd = AT_FDCWD;
+ sqe->opcode = UV__IORING_OP_UNLINKAT;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_read_or_write(uv_loop_t* loop,
+ uv_fs_t* req,
+ int is_read) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__iou* iou;
+
+ /* If iovcnt is greater than IOV_MAX, cap it to IOV_MAX on reads and fallback
+ * to the threadpool on writes */
+ if (req->nbufs > IOV_MAX) {
+ if (is_read)
+ req->nbufs = IOV_MAX;
+ else
+ return 0;
+ }
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL)
+ return 0;
+
+ sqe->addr = (uintptr_t) req->bufs;
+ sqe->fd = req->file;
+ sqe->len = req->nbufs;
+ sqe->off = req->off < 0 ? -1 : req->off;
+ sqe->opcode = is_read ? UV__IORING_OP_READV : UV__IORING_OP_WRITEV;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+int uv__iou_fs_statx(uv_loop_t* loop,
+ uv_fs_t* req,
+ int is_fstat,
+ int is_lstat) {
+ struct uv__io_uring_sqe* sqe;
+ struct uv__statx* statxbuf;
+ struct uv__iou* iou;
+
+ statxbuf = (struct uv__statx*)uv__malloc(sizeof(*statxbuf));
+ if (statxbuf == NULL)
+ return 0;
+
+ iou = &uv__get_internal_fields(loop)->iou;
+
+ sqe = uv__iou_get_sqe(iou, loop, req);
+ if (sqe == NULL) {
+ uv__free(statxbuf);
+ return 0;
+ }
+
+ req->ptr = statxbuf;
+
+ sqe->addr = (uintptr_t) req->path;
+ sqe->addr2 = (uintptr_t) statxbuf;
+ sqe->fd = AT_FDCWD;
+ sqe->len = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
+ sqe->opcode = UV__IORING_OP_STATX;
+
+ if (is_fstat) {
+ sqe->addr = (uintptr_t) "";
+ sqe->fd = req->file;
+ sqe->statx_flags |= 0x1000; /* AT_EMPTY_PATH */
+ }
+
+ if (is_lstat)
+ sqe->statx_flags |= AT_SYMLINK_NOFOLLOW;
+
+ uv__iou_submit(iou);
+
+ return 1;
+}
+
+
+void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf) {
+ buf->st_dev = makedev(statxbuf->stx_dev_major, statxbuf->stx_dev_minor);
+ buf->st_mode = statxbuf->stx_mode;
+ buf->st_nlink = statxbuf->stx_nlink;
+ buf->st_uid = statxbuf->stx_uid;
+ buf->st_gid = statxbuf->stx_gid;
+ buf->st_rdev = makedev(statxbuf->stx_rdev_major, statxbuf->stx_rdev_minor);
+ buf->st_ino = statxbuf->stx_ino;
+ buf->st_size = statxbuf->stx_size;
+ buf->st_blksize = statxbuf->stx_blksize;
+ buf->st_blocks = statxbuf->stx_blocks;
+ buf->st_atim.tv_sec = statxbuf->stx_atime.tv_sec;
+ buf->st_atim.tv_nsec = statxbuf->stx_atime.tv_nsec;
+ buf->st_mtim.tv_sec = statxbuf->stx_mtime.tv_sec;
+ buf->st_mtim.tv_nsec = statxbuf->stx_mtime.tv_nsec;
+ buf->st_ctim.tv_sec = statxbuf->stx_ctime.tv_sec;
+ buf->st_ctim.tv_nsec = statxbuf->stx_ctime.tv_nsec;
+ buf->st_birthtim.tv_sec = statxbuf->stx_btime.tv_sec;
+ buf->st_birthtim.tv_nsec = statxbuf->stx_btime.tv_nsec;
+ buf->st_flags = 0;
+ buf->st_gen = 0;
+}
+
+
+static void uv__iou_fs_statx_post(uv_fs_t* req) {
+ struct uv__statx* statxbuf;
+ uv_stat_t* buf;
+
+ buf = &req->statbuf;
+ statxbuf = (struct uv__statx*)req->ptr;
+ req->ptr = NULL;
+
+ if (req->result == 0) {
+ uv__msan_unpoison(statxbuf, sizeof(*statxbuf));
+ uv__statx_to_stat(statxbuf, buf);
+ req->ptr = buf;
+ }
+
+ uv__free(statxbuf);
+}
+
+
+static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) {
+ struct uv__io_uring_cqe* cqe;
+ struct uv__io_uring_cqe* e;
+ uv_fs_t* req;
+ uint32_t head;
+ uint32_t tail;
+ uint32_t mask;
+ uint32_t i;
+ uint32_t flags;
+ int nevents;
+ int rc;
+
+ head = *iou->cqhead;
+ tail = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->cqtail,
+ std::memory_order_acquire);
+ mask = iou->cqmask;
+ cqe = (uv__io_uring_cqe*)iou->cqe;
+ nevents = 0;
+
+ for (i = head; i != tail; i++) {
+ e = &cqe[i & mask];
+
+ req = (uv_fs_t*) (uintptr_t) e->user_data;
+ assert(req->type == UV_FS);
+
+ uv__req_unregister(loop, req);
+ iou->in_flight--;
+
+ /* io_uring stores error codes as negative numbers, same as libuv. */
+ req->result = e->res;
+
+ switch (req->fs_type) {
+ case UV_FS_FSTAT:
+ case UV_FS_LSTAT:
+ case UV_FS_STAT:
+ uv__iou_fs_statx_post(req);
+ break;
+ default: /* Squelch -Wswitch warnings. */
+ break;
+ }
+
+ uv__metrics_update_idle_time(loop);
+ req->cb(req);
+ nevents++;
+ }
+
+ std::atomic_store_explicit((std::atomic<uint32_t>*) iou->cqhead,
+ tail,
+ std::memory_order_release);
+
+ /* Check whether CQE's overflowed, if so enter the kernel to make them
+ * available. Don't grab them immediately but in the next loop iteration to
+ * avoid loop starvation. */
+ flags = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqflags,
+ std::memory_order_acquire);
+
+ if (flags & UV__IORING_SQ_CQ_OVERFLOW) {
+ do
+ rc = uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_GETEVENTS);
+ while (rc == -1 && errno == EINTR);
+
+ if (rc < 0)
+ perror("libuv: io_uring_enter(getevents)"); /* Can't happen. */
+ }
+
+ uv__metrics_inc_events(loop, nevents);
+ if (uv__get_internal_fields(loop)->current_timeout == 0)
+ uv__metrics_inc_events_waiting(loop, nevents);
+}
+
+
+static void uv__epoll_ctl_prep(int epollfd,
+ struct uv__iou* ctl,
+ struct epoll_event (*events)[256],
+ int op,
+ int fd,
+ struct epoll_event* e) {
+ struct uv__io_uring_sqe* sqe;
+ struct epoll_event* pe;
+ uint32_t mask;
+ uint32_t slot;
+
+ if (ctl->ringfd == -1) {
+ if (!epoll_ctl(epollfd, op, fd, e))
+ return;
+
+ if (op == EPOLL_CTL_DEL)
+ return; /* Ignore errors, may be racing with another thread. */
+
+ if (op != EPOLL_CTL_ADD)
+ abort();
+
+ if (errno != EEXIST)
+ abort();
+
+ /* File descriptor that's been watched before, update event mask. */
+ if (!epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, e))
+ return;
+
+ abort();
+ } else {
+ mask = ctl->sqmask;
+ slot = (*ctl->sqtail)++ & mask;
+
+ pe = &(*events)[slot];
+ *pe = *e;
+
+ sqe = (uv__io_uring_sqe*)ctl->sqe;
+ sqe = &sqe[slot];
+
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->addr = (uintptr_t) pe;
+ sqe->fd = epollfd;
+ sqe->len = op;
+ sqe->off = fd;
+ sqe->opcode = UV__IORING_OP_EPOLL_CTL;
+ sqe->user_data = op | slot << 2 | (int64_t) fd << 32;
+
+ if ((*ctl->sqhead & mask) == (*ctl->sqtail & mask))
+ uv__epoll_ctl_flush(epollfd, ctl, events);
+ }
+}
+
+
+static void uv__epoll_ctl_flush(int epollfd,
+ struct uv__iou* ctl,
+ struct epoll_event (*events)[256]) {
+ struct epoll_event oldevents[256];
+ struct uv__io_uring_cqe* cqe;
+ uint32_t oldslot;
+ uint32_t slot;
+ uint32_t n;
+ int fd;
+ int op;
+ int rc;
+
+ STATIC_ASSERT(sizeof(oldevents) == sizeof(*events));
+ assert(ctl->ringfd != -1);
+ assert(*ctl->sqhead != *ctl->sqtail);
+
+ n = *ctl->sqtail - *ctl->sqhead;
+ do
+ rc = uv__io_uring_enter(ctl->ringfd, n, n, UV__IORING_ENTER_GETEVENTS);
+ while (rc == -1 && errno == EINTR);
+
+ if (rc < 0)
+ perror("libuv: io_uring_enter(getevents)"); /* Can't happen. */
+
+ if (rc != (int) n)
+ abort();
+
+ assert(*ctl->sqhead == *ctl->sqtail);
+
+ memcpy(oldevents, *events, sizeof(*events));
+
+ /* Failed submissions are either EPOLL_CTL_DEL commands for file descriptors
+ * that have been closed, or EPOLL_CTL_ADD commands for file descriptors
+ * that we are already watching. Ignore the former and retry the latter
+ * with EPOLL_CTL_MOD.
+ */
+ while (*ctl->cqhead != *ctl->cqtail) {
+ slot = (*ctl->cqhead)++ & ctl->cqmask;
+
+ cqe = (uv__io_uring_cqe*)ctl->cqe;
+ cqe = &cqe[slot];
+
+ if (cqe->res == 0)
+ continue;
+
+ fd = cqe->user_data >> 32;
+ op = 3 & cqe->user_data;
+ oldslot = 255 & (cqe->user_data >> 2);
+
+ if (op == EPOLL_CTL_DEL)
+ continue;
+
+ if (op != EPOLL_CTL_ADD)
+ abort();
+
+ if (cqe->res != -EEXIST)
+ abort();
+
+ uv__epoll_ctl_prep(epollfd,
+ ctl,
+ events,
+ EPOLL_CTL_MOD,
+ fd,
+ &oldevents[oldslot]);
+ }
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ uv__loop_internal_fields_t* lfields;
+ struct epoll_event events[1024];
+ struct epoll_event prep[256];
+ struct uv__invalidate inv;
+ struct epoll_event* pe;
+ struct epoll_event e;
+ struct uv__iou* ctl;
+ struct uv__iou* iou;
+ int real_timeout;
+ struct uv__queue* q;
+ uv__io_t* w;
+ sigset_t* sigmask;
+ sigset_t sigset;
+ uint64_t base;
+ int have_iou_events;
+ int have_signals;
+ int nevents;
+ int epollfd;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ lfields = uv__get_internal_fields(loop);
+ ctl = &lfields->ctl;
+ iou = &lfields->iou;
+
+ sigmask = NULL;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ sigemptyset(&sigset);
+ sigaddset(&sigset, SIGPROF);
+ sigmask = &sigset;
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+ real_timeout = timeout;
+
+ if (lfields->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ user_timeout = 0;
+ }
+
+ epollfd = loop->backend_fd;
+
+ memset(&e, 0, sizeof(e));
+
+ while (!uv__queue_empty(&loop->watcher_queue)) {
+ q = uv__queue_head(&loop->watcher_queue);
+ w = uv__queue_data(q, uv__io_t, watcher_queue);
+ uv__queue_remove(q);
+ uv__queue_init(q);
+
+ op = EPOLL_CTL_MOD;
+ if (w->events == 0)
+ op = EPOLL_CTL_ADD;
+
+ w->events = w->pevents;
+ e.events = w->pevents;
+ e.data.fd = w->fd;
+
+ uv__epoll_ctl_prep(epollfd, ctl, &prep, op, w->fd, &e);
+ }
+
+ inv.events = events;
+ inv.prep = &prep;
+ inv.nfds = -1;
+
+ for (;;) {
+ if (loop->nfds == 0)
+ if (iou->in_flight == 0)
+ break;
+
+ /* All event mask mutations should be visible to the kernel before
+ * we enter epoll_pwait().
+ */
+ if (ctl->ringfd != -1)
+ while (*ctl->sqhead != *ctl->sqtail)
+ uv__epoll_ctl_flush(epollfd, ctl, &prep);
+
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ /* Store the current timeout in a location that's globally accessible so
+ * other locations like uv__work_done() can determine whether the queue
+ * of events in the callback were waiting when poll was called.
+ */
+ lfields->current_timeout = timeout;
+
+ nfds = epoll_pwait(epollfd, events, ARRAY_SIZE(events), timeout, sigmask);
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ assert(timeout != -1);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ break;
+
+ /* We may have been inside the system call for longer than |timeout|
+ * milliseconds so we need to update the timestamp to avoid drift.
+ */
+ goto update_timeout;
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ break;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_iou_events = 0;
+ have_signals = 0;
+ nevents = 0;
+
+ inv.nfds = nfds;
+ lfields->inv = &inv;
+
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->data.fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ if (fd == iou->ringfd) {
+ uv__poll_io_uring(loop, iou);
+ have_iou_events = 1;
+ continue;
+ }
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ uv__epoll_ctl_prep(epollfd, ctl, &prep, EPOLL_CTL_DEL, fd, pe);
+ continue;
+ }
+
+ /* Give users only events they're interested in. Prevents spurious
+ * callbacks when previous callback invocation in this loop has stopped
+ * the current watcher. Also, filters out events that users has not
+ * requested us to watch.
+ */
+ pe->events &= w->pevents | POLLERR | POLLHUP;
+
+ /* Work around an epoll quirk where it sometimes reports just the
+ * EPOLLERR or EPOLLHUP event. In order to force the event loop to
+ * move forward, we merge in the read/write events that the watcher
+ * is interested in; uv__read() and uv__write() will then deal with
+ * the error or hangup in the usual fashion.
+ *
+ * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
+ * reads the available data, calls uv_read_stop(), then sometime later
+ * calls uv_read_start() again. By then, libuv has forgotten about the
+ * hangup and the kernel won't report EPOLLIN again because there's
+ * nothing left to read. If anything, libuv is to blame here. The
+ * current hack is just a quick bandaid; to properly fix it, libuv
+ * needs to remember the error/hangup event. We should get that for
+ * free when we switch over to edge-triggered I/O.
+ */
+ if (pe->events == POLLERR || pe->events == POLLHUP)
+ pe->events |=
+ w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+
+ if (pe->events != 0) {
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->events);
+ }
+
+ nevents++;
+ }
+ }
+
+ uv__metrics_inc_events(loop, nevents);
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ uv__metrics_inc_events_waiting(loop, nevents);
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ lfields->inv = NULL;
+
+ if (have_iou_events != 0)
+ break; /* Event loop should cycle now so don't poll again. */
+
+ if (have_signals != 0)
+ break; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ break;
+ }
+
+ if (timeout == 0)
+ break;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ real_timeout -= (loop->time - base);
+ if (real_timeout <= 0)
+ break;
+
+ timeout = real_timeout;
+ }
+
+ if (ctl->ringfd != -1)
+ while (*ctl->sqhead != *ctl->sqtail)
+ uv__epoll_ctl_flush(epollfd, ctl, &prep);
+}
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ static std::atomic<clock_t> fast_clock_id = -1;
+ struct timespec t;
+ clock_t clock_id;
+
+ /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
+ * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
+ * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
+ * decide to make a costly system call.
+ */
+ /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
+ * when it has microsecond granularity or better (unlikely).
+ */
+ clock_id = CLOCK_MONOTONIC;
+ if (type != UV_CLOCK_FAST)
+ goto done;
+
+ clock_id = std::atomic_load_explicit(&fast_clock_id, std::memory_order_relaxed);
+ if (clock_id != -1)
+ goto done;
+
+ clock_id = CLOCK_MONOTONIC;
+ if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
+ if (t.tv_nsec <= 1 * 1000 * 1000)
+ clock_id = CLOCK_MONOTONIC_COARSE;
+
+ std::atomic_store_explicit(&fast_clock_id, clock_id, std::memory_order_relaxed);
+
+done:
+
+ if (clock_gettime(clock_id, &t))
+ return 0; /* Not really possible. */
+
+ return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ char buf[1024];
+ const char* s;
+ ssize_t n;
+ long val;
+ int fd;
+ int i;
+
+ do
+ fd = open("/proc/self/stat", O_RDONLY);
+ while (fd == -1 && errno == EINTR);
+
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ do
+ n = read(fd, buf, sizeof(buf) - 1);
+ while (n == -1 && errno == EINTR);
+
+ uv__close(fd);
+ if (n == -1)
+ return UV__ERR(errno);
+ buf[n] = '\0';
+
+ s = strchr(buf, ' ');
+ if (s == NULL)
+ goto err;
+
+ s += 1;
+ if (*s != '(')
+ goto err;
+
+ s = strchr(s, ')');
+ if (s == NULL)
+ goto err;
+
+ for (i = 1; i <= 22; i++) {
+ s = strchr(s + 1, ' ');
+ if (s == NULL)
+ goto err;
+ }
+
+ errno = 0;
+ val = strtol(s, NULL, 10);
+ if (errno != 0)
+ goto err;
+ if (val < 0)
+ goto err;
+
+ *rss = val * getpagesize();
+ return 0;
+
+err:
+ return UV_EINVAL;
+}
+
+int uv_uptime(double* uptime) {
+ struct timespec now;
+ char buf[128];
+
+ /* Consult /proc/uptime when present (common case), or fall back to
+ * clock_gettime. Why not always clock_gettime? It doesn't always return the
+ * right result under OpenVZ and possibly other containerized environments.
+ */
+ if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
+ if (1 == sscanf(buf, "%lf", uptime))
+ return 0;
+
+ if (clock_gettime(CLOCK_BOOTTIME, &now))
+ return UV__ERR(errno);
+
+ *uptime = now.tv_sec;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** ci, int* count) {
+#if defined(__PPC__)
+ static const char model_marker[] = "cpu\t\t: ";
+#elif defined(__arm__)
+ static const char model_marker[] = "Processor\t: ";
+#elif defined(__aarch64__)
+ static const char model_marker[] = "CPU part\t: ";
+#elif defined(__mips__)
+ static const char model_marker[] = "cpu model\t\t: ";
+#elif defined(__loongarch__)
+ static const char model_marker[] = "cpu family\t\t: ";
+#else
+ static const char model_marker[] = "model name\t: ";
+#endif
+ static const char parts[] =
+#ifdef __aarch64__
+ "0x811\nARM810\n" "0x920\nARM920\n" "0x922\nARM922\n"
+ "0x926\nARM926\n" "0x940\nARM940\n" "0x946\nARM946\n"
+ "0x966\nARM966\n" "0xa20\nARM1020\n" "0xa22\nARM1022\n"
+ "0xa26\nARM1026\n" "0xb02\nARM11 MPCore\n" "0xb36\nARM1136\n"
+ "0xb56\nARM1156\n" "0xb76\nARM1176\n" "0xc05\nCortex-A5\n"
+ "0xc07\nCortex-A7\n" "0xc08\nCortex-A8\n" "0xc09\nCortex-A9\n"
+ "0xc0d\nCortex-A17\n" /* Originally A12 */
+ "0xc0f\nCortex-A15\n" "0xc0e\nCortex-A17\n" "0xc14\nCortex-R4\n"
+ "0xc15\nCortex-R5\n" "0xc17\nCortex-R7\n" "0xc18\nCortex-R8\n"
+ "0xc20\nCortex-M0\n" "0xc21\nCortex-M1\n" "0xc23\nCortex-M3\n"
+ "0xc24\nCortex-M4\n" "0xc27\nCortex-M7\n" "0xc60\nCortex-M0+\n"
+ "0xd01\nCortex-A32\n" "0xd03\nCortex-A53\n" "0xd04\nCortex-A35\n"
+ "0xd05\nCortex-A55\n" "0xd06\nCortex-A65\n" "0xd07\nCortex-A57\n"
+ "0xd08\nCortex-A72\n" "0xd09\nCortex-A73\n" "0xd0a\nCortex-A75\n"
+ "0xd0b\nCortex-A76\n" "0xd0c\nNeoverse-N1\n" "0xd0d\nCortex-A77\n"
+ "0xd0e\nCortex-A76AE\n" "0xd13\nCortex-R52\n" "0xd20\nCortex-M23\n"
+ "0xd21\nCortex-M33\n" "0xd41\nCortex-A78\n" "0xd42\nCortex-A78AE\n"
+ "0xd4a\nNeoverse-E1\n" "0xd4b\nCortex-A78C\n"
+#endif
+ "";
+ struct cpu {
+ unsigned long long freq, user, nice, sys, idle, irq;
+ unsigned model;
+ };
+ FILE* fp;
+ char* p;
+ int found;
+ int n;
+ unsigned i;
+ unsigned cpu;
+ unsigned maxcpu;
+ unsigned size;
+ unsigned long long skip;
+ struct cpu (*cpus)[8192]; /* Kernel maximum. */
+ struct cpu* c;
+ struct cpu t;
+ char (*model)[64];
+ unsigned char bitmap[ARRAY_SIZE(*cpus) / 8];
+ /* Assumption: even big.LITTLE systems will have only a handful
+ * of different CPU models. Most systems will just have one.
+ */
+ char models[8][64];
+ char buf[1024];
+
+ memset(bitmap, 0, sizeof(bitmap));
+ memset(models, 0, sizeof(models));
+ snprintf(*models, sizeof(*models), "unknown");
+ maxcpu = 0;
+
+ cpus = (decltype(cpus))uv__calloc(ARRAY_SIZE(*cpus), sizeof(**cpus));
+ if (cpus == NULL)
+ return UV_ENOMEM;
+
+ fp = uv__open_file("/proc/stat");
+ if (fp == NULL) {
+ uv__free(cpus);
+ return UV__ERR(errno);
+ }
+
+ /* Skip first line. */
+ if (!fgets(buf, sizeof(buf), fp)) {
+ uv__free(cpus);
+ return UV__ERR(errno);
+ }
+
+ for (;;) {
+ memset(&t, 0, sizeof(t));
+
+ n = fscanf(fp, "cpu%u %llu %llu %llu %llu %llu %llu",
+ &cpu, &t.user, &t.nice, &t.sys, &t.idle, &skip, &t.irq);
+
+ if (n != 7)
+ break;
+
+ /* Skip rest of line. */
+ if (!fgets(buf, sizeof(buf), fp)) {
+ break;
+ }
+
+ if (cpu >= ARRAY_SIZE(*cpus))
+ continue;
+
+ (*cpus)[cpu] = t;
+
+ bitmap[cpu >> 3] |= 1 << (cpu & 7);
+
+ if (cpu >= maxcpu)
+ maxcpu = cpu + 1;
+ }
+
+ fclose(fp);
+
+ fp = uv__open_file("/proc/cpuinfo");
+ if (fp == NULL)
+ goto nocpuinfo;
+
+ for (;;) {
+ if (1 != fscanf(fp, "processor\t: %u\n", &cpu))
+ break; /* Parse error. */
+
+ found = 0;
+ while (!found && fgets(buf, sizeof(buf), fp))
+ found = !strncmp(buf, model_marker, sizeof(model_marker) - 1);
+
+ if (!found)
+ goto next;
+
+ p = buf + sizeof(model_marker) - 1;
+ n = (int) strcspn(p, "\n");
+
+ /* arm64: translate CPU part code to model name. */
+ if (*parts) {
+ p = (char*)memmem(parts, sizeof(parts) - 1, p, n + 1);
+ if (p == NULL)
+ p = const_cast<char*>("unknown");
+ else
+ p += n + 1;
+ n = (int) strcspn(p, "\n");
+ }
+
+ found = 0;
+ for (model = models; !found && model < ARRAY_END(models); model++)
+ found = !strncmp(p, *model, strlen(*model));
+
+ if (!found)
+ goto next;
+
+ if (**model == '\0')
+ snprintf(*model, sizeof(*model), "%.*s", n, p);
+
+ if (cpu < maxcpu)
+ (*cpus)[cpu].model = model - models;
+
+next:
+ while (fgets(buf, sizeof(buf), fp))
+ if (*buf == '\n')
+ break;
+ }
+
+ fclose(fp);
+ fp = NULL;
+
+nocpuinfo:
+
+ n = 0;
+ for (cpu = 0; cpu < maxcpu; cpu++) {
+ if (!(bitmap[cpu >> 3] & (1 << (cpu & 7))))
+ continue;
+
+ n++;
+ snprintf(buf, sizeof(buf),
+ "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq", cpu);
+
+ fp = uv__open_file(buf);
+ if (fp == NULL)
+ continue;
+
+ if (0 > fscanf(fp, "%llu", &(*cpus)[cpu].freq)) {
+ (*cpus)[cpu].freq = 0llu;
+ }
+ fclose(fp);
+ fp = NULL;
+ }
+
+ size = n * sizeof(**ci) + sizeof(models);
+ *ci = (uv_cpu_info_t*)uv__malloc(size);
+ *count = 0;
+
+ if (*ci == NULL) {
+ uv__free(cpus);
+ return UV_ENOMEM;
+ }
+
+ *count = n;
+ p = (char*)memcpy(*ci + n, models, sizeof(models));
+
+ i = 0;
+ for (cpu = 0; cpu < maxcpu; cpu++) {
+ if (!(bitmap[cpu >> 3] & (1 << (cpu & 7))))
+ continue;
+
+ c = *cpus + cpu;
+
+ (*ci)[i++] = uv_cpu_info_t{
+ .model = p + c->model * sizeof(*model),
+ .speed = (int)(c->freq / 1000),
+ /* Note: sysconf(_SC_CLK_TCK) is fixed at 100 Hz,
+ * therefore the multiplier is always 1000/100 = 10.
+ */
+ .cpu_times = {
+ .user = 10 * c->user,
+ .nice = 10 * c->nice,
+ .sys = 10 * c->sys,
+ .idle = 10 * c->idle,
+ .irq = 10 * c->irq,
+ }
+ };
+ }
+
+ uv__free(cpus);
+
+ return 0;
+}
+
+
+#ifdef HAVE_IFADDRS_H
+static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
+ if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+ return 1;
+ if (ent->ifa_addr == NULL)
+ return 1;
+ /*
+ * On Linux getifaddrs returns information related to the raw underlying
+ * devices. We're not interested in this information yet.
+ */
+ if (ent->ifa_addr->sa_family == PF_PACKET)
+ return exclude_type;
+ return !exclude_type;
+}
+#endif
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+#ifndef HAVE_IFADDRS_H
+ *count = 0;
+ *addresses = NULL;
+ return UV_ENOSYS;
+#else
+ struct ifaddrs *addrs, *ent;
+ uv_interface_address_t* address;
+ int i;
+ struct sockaddr_ll *sll;
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (getifaddrs(&addrs))
+ return UV__ERR(errno);
+
+ /* Count the number of interfaces */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ freeifaddrs(addrs);
+ return 0;
+ }
+
+ /* Make sure the memory is initiallized to zero using calloc() */
+ *addresses = (uv_interface_address_t*)uv__calloc(*count, sizeof(**addresses));
+ if (!(*addresses)) {
+ freeifaddrs(addrs);
+ return UV_ENOMEM;
+ }
+
+ address = *addresses;
+
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ address->name = uv__strdup(ent->ifa_name);
+
+ if (ent->ifa_addr->sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+ }
+
+ if (ent->ifa_netmask->sa_family == AF_INET6) {
+ address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+ } else {
+ address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+ }
+
+ address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+ address++;
+ }
+
+ /* Fill in physical addresses for each interface */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
+ continue;
+
+ address = *addresses;
+
+ for (i = 0; i < (*count); i++) {
+ size_t namelen = strlen(ent->ifa_name);
+ /* Alias interface share the same physical address */
+ if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
+ (address->name[namelen] == 0 || address->name[namelen] == ':')) {
+ sll = (struct sockaddr_ll*)ent->ifa_addr;
+ memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
+ }
+ address++;
+ }
+ }
+
+ freeifaddrs(addrs);
+
+ return 0;
+#endif
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+
+void uv__set_process_title(const char* title) {
+#if defined(PR_SET_NAME)
+ prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
+#endif
+}
+
+
+static uint64_t uv__read_proc_meminfo(const char* what) {
+ uint64_t rc;
+ char* p;
+ char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
+
+ if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
+ return 0;
+
+ p = strstr(buf, what);
+
+ if (p == NULL)
+ return 0;
+
+ p += strlen(what);
+
+ rc = 0;
+ sscanf(p, "%" PRIu64 " kB", &rc);
+
+ return rc * 1024;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ struct sysinfo info;
+ uint64_t rc;
+
+ rc = uv__read_proc_meminfo("MemAvailable:");
+
+ if (rc != 0)
+ return rc;
+
+ if (0 == sysinfo(&info))
+ return (uint64_t) info.freeram * info.mem_unit;
+
+ return 0;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ struct sysinfo info;
+ uint64_t rc;
+
+ rc = uv__read_proc_meminfo("MemTotal:");
+
+ if (rc != 0)
+ return rc;
+
+ if (0 == sysinfo(&info))
+ return (uint64_t) info.totalram * info.mem_unit;
+
+ return 0;
+}
+
+
+static uint64_t uv__read_uint64(const char* filename) {
+ char buf[32]; /* Large enough to hold an encoded uint64_t. */
+ uint64_t rc;
+
+ rc = 0;
+ if (0 == uv__slurp(filename, buf, sizeof(buf)))
+ if (1 != sscanf(buf, "%" PRIu64, &rc))
+ if (0 == strcmp(buf, "max\n"))
+ rc = UINT64_MAX;
+
+ return rc;
+}
+
+
+/* Given a buffer with the contents of a cgroup1 /proc/self/cgroups,
+ * finds the location and length of the memory controller mount path.
+ * This disregards the leading / for easy concatenation of paths.
+ * Returns NULL if the memory controller wasn't found. */
+static char* uv__cgroup1_find_memory_controller(char buf[1024],
+ int* n) {
+ char* p;
+
+ /* Seek to the memory controller line. */
+ p = strchr(buf, ':');
+ while (p != NULL && strncmp(p, ":memory:", 8)) {
+ p = strchr(p, '\n');
+ if (p != NULL)
+ p = strchr(p, ':');
+ }
+
+ if (p != NULL) {
+ /* Determine the length of the mount path. */
+ p = p + strlen(":memory:/");
+ *n = (int) strcspn(p, "\n");
+ }
+
+ return p;
+}
+
+static void uv__get_cgroup1_memory_limits(char buf[1024], uint64_t* high,
+ uint64_t* max) {
+ char filename[4097];
+ char* p;
+ int n;
+ uint64_t cgroup1_max;
+
+ /* Find out where the controller is mounted. */
+ p = uv__cgroup1_find_memory_controller(buf, &n);
+ if (p != NULL) {
+ snprintf(filename, sizeof(filename),
+ "/sys/fs/cgroup/memory/%.*s/memory.soft_limit_in_bytes", n, p);
+ *high = uv__read_uint64(filename);
+
+ snprintf(filename, sizeof(filename),
+ "/sys/fs/cgroup/memory/%.*s/memory.limit_in_bytes", n, p);
+ *max = uv__read_uint64(filename);
+
+ /* If the controller wasn't mounted, the reads above will have failed,
+ * as indicated by uv__read_uint64 returning 0.
+ */
+ if (*high != 0 && *max != 0)
+ goto update_limits;
+ }
+
+ /* Fall back to the limits of the global memory controller. */
+ *high = uv__read_uint64("/sys/fs/cgroup/memory/memory.soft_limit_in_bytes");
+ *max = uv__read_uint64("/sys/fs/cgroup/memory/memory.limit_in_bytes");
+
+ /* uv__read_uint64 detects cgroup2's "max", so we need to separately detect
+ * cgroup1's maximum value (which is derived from LONG_MAX and PAGE_SIZE).
+ */
+update_limits:
+ cgroup1_max = LONG_MAX & ~(sysconf(_SC_PAGESIZE) - 1);
+ if (*high == cgroup1_max)
+ *high = UINT64_MAX;
+ if (*max == cgroup1_max)
+ *max = UINT64_MAX;
+}
+
+static void uv__get_cgroup2_memory_limits(char buf[1024], uint64_t* high,
+ uint64_t* max) {
+ char filename[4097];
+ char* p;
+ int n;
+
+ /* Find out where the controller is mounted. */
+ p = buf + strlen("0::/");
+ n = (int) strcspn(p, "\n");
+
+ /* Read the memory limits of the controller. */
+ snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%.*s/memory.max", n, p);
+ *max = uv__read_uint64(filename);
+ snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%.*s/memory.high", n, p);
+ *high = uv__read_uint64(filename);
+}
+
+static uint64_t uv__get_cgroup_constrained_memory(char buf[1024]) {
+ uint64_t high;
+ uint64_t max;
+
+ /* In the case of cgroupv2, we'll only have a single entry. */
+ if (strncmp(buf, "0::/", 4))
+ uv__get_cgroup1_memory_limits(buf, &high, &max);
+ else
+ uv__get_cgroup2_memory_limits(buf, &high, &max);
+
+ if (high == 0 || max == 0)
+ return 0;
+
+ return high < max ? high : max;
+}
+
+uint64_t uv_get_constrained_memory(void) {
+ char buf[1024];
+
+ if (uv__slurp("/proc/self/cgroup", buf, sizeof(buf)))
+ return 0;
+
+ return uv__get_cgroup_constrained_memory(buf);
+}
+
+
+static uint64_t uv__get_cgroup1_current_memory(char buf[1024]) {
+ char filename[4097];
+ uint64_t current;
+ char* p;
+ int n;
+
+ /* Find out where the controller is mounted. */
+ p = uv__cgroup1_find_memory_controller(buf, &n);
+ if (p != NULL) {
+ snprintf(filename, sizeof(filename),
+ "/sys/fs/cgroup/memory/%.*s/memory.usage_in_bytes", n, p);
+ current = uv__read_uint64(filename);
+
+ /* If the controller wasn't mounted, the reads above will have failed,
+ * as indicated by uv__read_uint64 returning 0.
+ */
+ if (current != 0)
+ return current;
+ }
+
+ /* Fall back to the usage of the global memory controller. */
+ return uv__read_uint64("/sys/fs/cgroup/memory/memory.usage_in_bytes");
+}
+
+static uint64_t uv__get_cgroup2_current_memory(char buf[1024]) {
+ char filename[4097];
+ char* p;
+ int n;
+
+ /* Find out where the controller is mounted. */
+ p = buf + strlen("0::/");
+ n = (int) strcspn(p, "\n");
+
+ snprintf(filename, sizeof(filename),
+ "/sys/fs/cgroup/%.*s/memory.current", n, p);
+ return uv__read_uint64(filename);
+}
+
+uint64_t uv_get_available_memory(void) {
+ char buf[1024];
+ uint64_t constrained;
+ uint64_t current;
+ uint64_t total;
+
+ if (uv__slurp("/proc/self/cgroup", buf, sizeof(buf)))
+ return 0;
+
+ constrained = uv__get_cgroup_constrained_memory(buf);
+ if (constrained == 0)
+ return uv_get_free_memory();
+
+ total = uv_get_total_memory();
+ if (constrained > total)
+ return uv_get_free_memory();
+
+ /* In the case of cgroupv2, we'll only have a single entry. */
+ if (strncmp(buf, "0::/", 4))
+ current = uv__get_cgroup1_current_memory(buf);
+ else
+ current = uv__get_cgroup2_current_memory(buf);
+
+ /* memory usage can be higher than the limit (for short bursts of time) */
+ if (constrained < current)
+ return 0;
+
+ return constrained - current;
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct sysinfo info;
+ char buf[128]; /* Large enough to hold all of /proc/loadavg. */
+
+ if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
+ if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
+ return;
+
+ if (sysinfo(&info) < 0)
+ return;
+
+ avg[0] = (double) info.loads[0] / 65536.0;
+ avg[1] = (double) info.loads[1] / 65536.0;
+ avg[2] = (double) info.loads[2] / 65536.0;
+}
+
+
+static int compare_watchers(const struct watcher_list* a,
+ const struct watcher_list* b) {
+ if (a->wd < b->wd) return -1;
+ if (a->wd > b->wd) return 1;
+ return 0;
+}
+
+
+static int init_inotify(uv_loop_t* loop) {
+ int fd;
+
+ if (loop->inotify_fd != -1)
+ return 0;
+
+ fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
+ if (fd < 0)
+ return UV__ERR(errno);
+
+ loop->inotify_fd = fd;
+ uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
+ uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
+
+ return 0;
+}
+
+
+static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) {
+ /* Open the inotify_fd, and re-arm all the inotify watchers. */
+ int err;
+ struct watcher_list* tmp_watcher_list_iter;
+ struct watcher_list* watcher_list;
+ struct watcher_list tmp_watcher_list;
+ struct uv__queue queue;
+ struct uv__queue* q;
+ uv_fs_event_t* handle;
+ char* tmp_path;
+
+ if (root == NULL)
+ return 0;
+
+ /* We must restore the old watcher list to be able to close items
+ * out of it.
+ */
+ loop->inotify_watchers = root;
+
+ uv__queue_init(&tmp_watcher_list.watchers);
+ /* Note that the queue we use is shared with the start and stop()
+ * functions, making uv__queue_foreach unsafe to use. So we use the
+ * uv__queue_move trick to safely iterate. Also don't free the watcher
+ * list until we're done iterating. c.f. uv__inotify_read.
+ */
+ RB_FOREACH_SAFE(watcher_list, watcher_root,
+ uv__inotify_watchers(loop), tmp_watcher_list_iter) {
+ watcher_list->iterating = 1;
+ uv__queue_move(&watcher_list->watchers, &queue);
+ while (!uv__queue_empty(&queue)) {
+ q = uv__queue_head(&queue);
+ handle = uv__queue_data(q, uv_fs_event_t, watchers);
+ /* It's critical to keep a copy of path here, because it
+ * will be set to NULL by stop() and then deallocated by
+ * maybe_free_watcher_list
+ */
+ tmp_path = uv__strdup(handle->path);
+ assert(tmp_path != NULL);
+ uv__queue_remove(q);
+ uv__queue_insert_tail(&watcher_list->watchers, q);
+ uv_fs_event_stop(handle);
+
+ uv__queue_insert_tail(&tmp_watcher_list.watchers, &handle->watchers);
+ handle->path = tmp_path;
+ }
+ watcher_list->iterating = 0;
+ maybe_free_watcher_list(watcher_list, loop);
+ }
+
+ uv__queue_move(&tmp_watcher_list.watchers, &queue);
+ while (!uv__queue_empty(&queue)) {
+ q = uv__queue_head(&queue);
+ uv__queue_remove(q);
+ handle = uv__queue_data(q, uv_fs_event_t, watchers);
+ tmp_path = handle->path;
+ handle->path = NULL;
+ err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
+ uv__free(tmp_path);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+
+static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
+ struct watcher_list w;
+ w.wd = wd;
+ return RB_FIND(watcher_root, uv__inotify_watchers(loop), &w);
+}
+
+
+static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
+ /* if the watcher_list->watchers is being iterated over, we can't free it. */
+ if ((!w->iterating) && uv__queue_empty(&w->watchers)) {
+ /* No watchers left for this path. Clean up. */
+ RB_REMOVE(watcher_root, uv__inotify_watchers(loop), w);
+ inotify_rm_watch(loop->inotify_fd, w->wd);
+ uv__free(w);
+ }
+}
+
+
+static void uv__inotify_read(uv_loop_t* loop,
+ uv__io_t* dummy,
+ unsigned int events) {
+ const struct inotify_event* e;
+ struct watcher_list* w;
+ uv_fs_event_t* h;
+ struct uv__queue queue;
+ struct uv__queue* q;
+ const char* path;
+ ssize_t size;
+ const char *p;
+ /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
+ char buf[4096];
+
+ for (;;) {
+ do
+ size = read(loop->inotify_fd, buf, sizeof(buf));
+ while (size == -1 && errno == EINTR);
+
+ if (size == -1) {
+ assert(errno == EAGAIN || errno == EWOULDBLOCK);
+ break;
+ }
+
+ assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
+
+ /* Now we have one or more inotify_event structs. */
+ for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
+ e = (const struct inotify_event*) p;
+
+ events = 0;
+ if (e->mask & (IN_ATTRIB|IN_MODIFY))
+ events |= UV_CHANGE;
+ if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
+ events |= UV_RENAME;
+
+ w = find_watcher(loop, e->wd);
+ if (w == NULL)
+ continue; /* Stale event, no watchers left. */
+
+ /* inotify does not return the filename when monitoring a single file
+ * for modifications. Repurpose the filename for API compatibility.
+ * I'm not convinced this is a good thing, maybe it should go.
+ */
+ path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
+
+ /* We're about to iterate over the queue and call user's callbacks.
+ * What can go wrong?
+ * A callback could call uv_fs_event_stop()
+ * and the queue can change under our feet.
+ * So, we use uv__queue_move() trick to safely iterate over the queue.
+ * And we don't free the watcher_list until we're done iterating.
+ *
+ * First,
+ * tell uv_fs_event_stop() (that could be called from a user's callback)
+ * not to free watcher_list.
+ */
+ w->iterating = 1;
+ uv__queue_move(&w->watchers, &queue);
+ while (!uv__queue_empty(&queue)) {
+ q = uv__queue_head(&queue);
+ h = uv__queue_data(q, uv_fs_event_t, watchers);
+
+ uv__queue_remove(q);
+ uv__queue_insert_tail(&w->watchers, q);
+
+ h->cb(h, path, events, 0);
+ }
+ /* done iterating, time to (maybe) free empty watcher_list */
+ w->iterating = 0;
+ maybe_free_watcher_list(w, loop);
+ }
+ }
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ struct watcher_list* w;
+ uv_loop_t* loop;
+ size_t len;
+ int events;
+ int err;
+ int wd;
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ loop = handle->loop;
+
+ err = init_inotify(loop);
+ if (err)
+ return err;
+
+ events = IN_ATTRIB
+ | IN_CREATE
+ | IN_MODIFY
+ | IN_DELETE
+ | IN_DELETE_SELF
+ | IN_MOVE_SELF
+ | IN_MOVED_FROM
+ | IN_MOVED_TO;
+
+ wd = inotify_add_watch(loop->inotify_fd, path, events);
+ if (wd == -1)
+ return UV__ERR(errno);
+
+ w = find_watcher(loop, wd);
+ if (w)
+ goto no_insert;
+
+ len = strlen(path) + 1;
+ w = (watcher_list*)uv__malloc(sizeof(*w) + len);
+ if (w == NULL)
+ return UV_ENOMEM;
+
+ w->wd = wd;
+ w->path = (char*)memcpy(w + 1, path, len);
+ uv__queue_init(&w->watchers);
+ w->iterating = 0;
+ RB_INSERT(watcher_root, uv__inotify_watchers(loop), w);
+
+no_insert:
+ uv__handle_start(handle);
+ uv__queue_insert_tail(&w->watchers, &handle->watchers);
+ handle->path = w->path;
+ handle->cb = cb;
+ handle->wd = wd;
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ struct watcher_list* w;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ w = find_watcher(handle->loop, handle->wd);
+ assert(w != NULL);
+
+ handle->wd = -1;
+ handle->path = NULL;
+ uv__handle_stop(handle);
+ uv__queue_remove(&handle->watchers);
+
+ maybe_free_watcher_list(w, handle->loop);
+
+ return 0;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/loop-watcher.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/loop-watcher.cpp
index b8c1c2a..2db8b51 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/loop-watcher.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/loop-watcher.cpp
@@ -32,7 +32,7 @@
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
if (uv__is_active(handle)) return 0; \
if (cb == NULL) return UV_EINVAL; \
- QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \
+ uv__queue_insert_head(&handle->loop->name##_handles, &handle->queue); \
handle->name##_cb = cb; \
uv__handle_start(handle); \
return 0; \
@@ -40,21 +40,21 @@
\
int uv_##name##_stop(uv_##name##_t* handle) { \
if (!uv__is_active(handle)) return 0; \
- QUEUE_REMOVE(&handle->queue); \
+ uv__queue_remove(&handle->queue); \
uv__handle_stop(handle); \
return 0; \
} \
\
void uv__run_##name(uv_loop_t* loop) { \
uv_##name##_t* h; \
- QUEUE queue; \
- QUEUE* q; \
- QUEUE_MOVE(&loop->name##_handles, &queue); \
- while (!QUEUE_EMPTY(&queue)) { \
- q = QUEUE_HEAD(&queue); \
- h = QUEUE_DATA(q, uv_##name##_t, queue); \
- QUEUE_REMOVE(q); \
- QUEUE_INSERT_TAIL(&loop->name##_handles, q); \
+ struct uv__queue queue; \
+ struct uv__queue* q; \
+ uv__queue_move(&loop->name##_handles, &queue); \
+ while (!uv__queue_empty(&queue)) { \
+ q = uv__queue_head(&queue); \
+ h = uv__queue_data(q, uv_##name##_t, queue); \
+ uv__queue_remove(q); \
+ uv__queue_insert_tail(&loop->name##_handles, q); \
h->name##_cb(h); \
} \
} \
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/loop.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/loop.cpp
index 2e819cd..3babe4d 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/loop.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/loop.cpp
@@ -45,22 +45,25 @@
err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err)
goto fail_metrics_mutex_init;
+ memset(&lfields->loop_metrics.metrics,
+ 0,
+ sizeof(lfields->loop_metrics.metrics));
heap_init((struct heap*) &loop->timer_heap);
- QUEUE_INIT(&loop->wq);
- QUEUE_INIT(&loop->idle_handles);
- QUEUE_INIT(&loop->async_handles);
- QUEUE_INIT(&loop->check_handles);
- QUEUE_INIT(&loop->prepare_handles);
- QUEUE_INIT(&loop->handle_queue);
+ uv__queue_init(&loop->wq);
+ uv__queue_init(&loop->idle_handles);
+ uv__queue_init(&loop->async_handles);
+ uv__queue_init(&loop->check_handles);
+ uv__queue_init(&loop->prepare_handles);
+ uv__queue_init(&loop->handle_queue);
loop->active_handles = 0;
loop->active_reqs.count = 0;
loop->nfds = 0;
loop->watchers = NULL;
loop->nwatchers = 0;
- QUEUE_INIT(&loop->pending_queue);
- QUEUE_INIT(&loop->watcher_queue);
+ uv__queue_init(&loop->pending_queue);
+ uv__queue_init(&loop->watcher_queue);
loop->closing_handles = NULL;
uv__update_time(loop);
@@ -79,13 +82,10 @@
goto fail_platform_init;
uv__signal_global_once_init();
- err = uv_signal_init(loop, &loop->child_watcher);
+ err = uv__process_init(loop);
if (err)
goto fail_signal_init;
-
- uv__handle_unref(&loop->child_watcher);
- loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
- QUEUE_INIT(&loop->process_handles);
+ uv__queue_init(&loop->process_handles);
err = uv_rwlock_init(&loop->cloexec_lock);
if (err)
@@ -152,9 +152,9 @@
if (w == NULL)
continue;
- if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) {
+ if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) {
w->events = 0; /* Force re-registration in uv__io_poll. */
- QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+ uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
}
@@ -180,7 +180,7 @@
}
uv_mutex_lock(&loop->wq_mutex);
- assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
+ assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
@@ -192,8 +192,8 @@
uv_rwlock_destroy(&loop->cloexec_lock);
#if 0
- assert(QUEUE_EMPTY(&loop->pending_queue));
- assert(QUEUE_EMPTY(&loop->watcher_queue));
+ assert(uv__queue_empty(&loop->pending_queue));
+ assert(uv__queue_empty(&loop->watcher_queue));
assert(loop->nfds == 0);
#endif
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/netbsd.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/netbsd.cpp
index b6886a1..4c6d5a2 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/netbsd.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/netbsd.cpp
@@ -103,7 +103,7 @@
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
- return UV__ERR(errno);
+ return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
@@ -120,7 +120,7 @@
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
- return UV__ERR(errno);
+ return 0;
return (uint64_t) info;
}
@@ -131,6 +131,11 @@
}
+uint64_t uv_get_available_memory(void) {
+ return uv_get_free_memory();
+}
+
+
int uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc2 *kinfo = NULL;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/openbsd.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/openbsd.cpp
index 62740f7..2aa61e2 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/openbsd.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/openbsd.cpp
@@ -116,7 +116,7 @@
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
- return UV__ERR(errno);
+ return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
@@ -128,7 +128,7 @@
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
- return UV__ERR(errno);
+ return 0;
return (uint64_t) info;
}
@@ -139,6 +139,11 @@
}
+uint64_t uv_get_available_memory(void) {
+ return uv_get_free_memory();
+}
+
+
int uv_resident_set_memory(size_t* rss) {
struct kinfo_proc kinfo;
size_t page_size = getpagesize();
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/pipe.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/pipe.cpp
index c8ba31d..a60b1a0 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/pipe.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/pipe.cpp
@@ -41,26 +41,60 @@
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
+ return uv_pipe_bind2(handle, name, strlen(name), 0);
+}
+
+
+int uv_pipe_bind2(uv_pipe_t* handle,
+ const char* name,
+ size_t namelen,
+ unsigned int flags) {
struct sockaddr_un saddr;
- const char* pipe_fname;
+ char* pipe_fname;
int sockfd;
int err;
pipe_fname = NULL;
+ if (flags & ~UV_PIPE_NO_TRUNCATE)
+ return UV_EINVAL;
+
+ if (name == NULL)
+ return UV_EINVAL;
+
+ if (namelen == 0)
+ return UV_EINVAL;
+
+#ifndef __linux__
+ /* Abstract socket namespace only works on Linux. */
+ if (*name == '\0')
+ return UV_EINVAL;
+#endif
+
+ if (flags & UV_PIPE_NO_TRUNCATE)
+ if (namelen > sizeof(saddr.sun_path))
+ return UV_EINVAL;
+
+ /* Truncate long paths. Documented behavior. */
+ if (namelen > sizeof(saddr.sun_path))
+ namelen = sizeof(saddr.sun_path);
+
/* Already bound? */
if (uv__stream_fd(handle) >= 0)
return UV_EINVAL;
- if (uv__is_closing(handle)) {
- return UV_EINVAL;
- }
- /* Make a copy of the file name, it outlives this function's scope. */
- pipe_fname = uv__strdup(name);
- if (pipe_fname == NULL)
- return UV_ENOMEM;
- /* We've got a copy, don't touch the original any more. */
- name = NULL;
+ if (uv__is_closing(handle))
+ return UV_EINVAL;
+
+ /* Make a copy of the file path unless it is an abstract socket.
+ * We unlink the file later but abstract sockets disappear
+ * automatically since they're not real file system entities.
+ */
+ if (*name != '\0') {
+ pipe_fname = uv__strdup(name);
+ if (pipe_fname == NULL)
+ return UV_ENOMEM;
+ }
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
@@ -68,7 +102,7 @@
sockfd = err;
memset(&saddr, 0, sizeof saddr);
- uv__strscpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path));
+ memcpy(&saddr.sun_path, name, namelen);
saddr.sun_family = AF_UNIX;
if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
@@ -83,12 +117,12 @@
/* Success. */
handle->flags |= UV_HANDLE_BOUND;
- handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
+ handle->pipe_fname = pipe_fname; /* NULL or a strdup'ed copy. */
handle->io_watcher.fd = sockfd;
return 0;
err_socket:
- uv__free((void*)pipe_fname);
+ uv__free(pipe_fname);
return err;
}
@@ -176,11 +210,44 @@
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb) {
+ uv_pipe_connect2(req, handle, name, strlen(name), 0, cb);
+}
+
+
+int uv_pipe_connect2(uv_connect_t* req,
+ uv_pipe_t* handle,
+ const char* name,
+ size_t namelen,
+ unsigned int flags,
+ uv_connect_cb cb) {
struct sockaddr_un saddr;
int new_sock;
int err;
int r;
+ if (flags & ~UV_PIPE_NO_TRUNCATE)
+ return UV_EINVAL;
+
+ if (name == NULL)
+ return UV_EINVAL;
+
+ if (namelen == 0)
+ return UV_EINVAL;
+
+#ifndef __linux__
+ /* Abstract socket namespace only works on Linux. */
+ if (*name == '\0')
+ return UV_EINVAL;
+#endif
+
+ if (flags & UV_PIPE_NO_TRUNCATE)
+ if (namelen > sizeof(saddr.sun_path))
+ return UV_EINVAL;
+
+ /* Truncate long paths. Documented behavior. */
+ if (namelen > sizeof(saddr.sun_path))
+ namelen = sizeof(saddr.sun_path);
+
new_sock = (uv__stream_fd(handle) == -1);
if (new_sock) {
@@ -191,7 +258,7 @@
}
memset(&saddr, 0, sizeof saddr);
- uv__strscpy(saddr.sun_path, name, sizeof(saddr.sun_path));
+ memcpy(&saddr.sun_path, name, namelen);
saddr.sun_family = AF_UNIX;
do {
@@ -230,12 +297,13 @@
uv__req_init(handle->loop, req, UV_CONNECT);
req->handle = (uv_stream_t*)handle;
req->cb = cb;
- QUEUE_INIT(&req->queue);
+ uv__queue_init(&req->queue);
/* Force callback to run on next tick in case of error. */
if (err)
uv__io_feed(handle->loop, &handle->io_watcher);
+ return 0;
}
@@ -357,7 +425,7 @@
}
/* stat must be used as fstat has a bug on Darwin */
- if (stat(name_buffer, &pipe_stat) == -1) {
+ if (uv__stat(name_buffer, &pipe_stat) == -1) {
uv__free(name_buffer);
return -errno;
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/poll.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/poll.cpp
index 7364731..c21722b 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/poll.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/poll.cpp
@@ -125,7 +125,7 @@
UV_PRIORITIZED)) == 0);
assert(!uv__is_closing(handle));
- watchers = handle->loop->watchers;
+ watchers = (void**)handle->loop->watchers;
w = &handle->io_watcher;
if (uv__fd_exists(handle->loop, w->fd))
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-hrtime.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-hrtime.cpp
index 323dfc2..7b45c01 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-hrtime.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-hrtime.cpp
@@ -23,13 +23,14 @@
#include "internal.h"
#include <stdint.h>
+#include <stdlib.h>
#include <time.h>
-#undef NANOSEC
-#define NANOSEC ((uint64_t) 1e9)
-
uint64_t uv__hrtime(uv_clocktype_t type) {
- struct timespec ts;
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
+ struct timespec t;
+
+ if (clock_gettime(CLOCK_MONOTONIC, &t))
+ abort();
+
+ return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-poll.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-poll.cpp
index 8da038d..b71eee3 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-poll.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/posix-poll.cpp
@@ -132,11 +132,12 @@
void uv__io_poll(uv_loop_t* loop, int timeout) {
+ uv__loop_internal_fields_t* lfields;
sigset_t* pset;
sigset_t set;
uint64_t time_base;
uint64_t time_diff;
- QUEUE* q;
+ struct uv__queue* q;
uv__io_t* w;
size_t i;
unsigned int nevents;
@@ -148,17 +149,19 @@
int reset_timeout;
if (loop->nfds == 0) {
- assert(QUEUE_EMPTY(&loop->watcher_queue));
+ assert(uv__queue_empty(&loop->watcher_queue));
return;
}
- /* Take queued watchers and add their fds to our poll fds array. */
- while (!QUEUE_EMPTY(&loop->watcher_queue)) {
- q = QUEUE_HEAD(&loop->watcher_queue);
- QUEUE_REMOVE(q);
- QUEUE_INIT(q);
+ lfields = uv__get_internal_fields(loop);
- w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ /* Take queued watchers and add their fds to our poll fds array. */
+ while (!uv__queue_empty(&loop->watcher_queue)) {
+ q = uv__queue_head(&loop->watcher_queue);
+ uv__queue_remove(q);
+ uv__queue_init(q);
+
+ w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
@@ -179,7 +182,7 @@
assert(timeout >= -1);
time_base = loop->time;
- if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@@ -198,6 +201,12 @@
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
+ /* Store the current timeout in a location that's globally accessible so
+ * other locations like uv__work_done() can determine whether the queue
+ * of events in the callback were waiting when poll was called.
+ */
+ lfields->current_timeout = timeout;
+
if (pset != NULL)
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
abort();
@@ -292,9 +301,11 @@
}
}
+ uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
+ uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/process.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/process.cpp
index 0916aa4..2d622c9 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/process.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/process.cpp
@@ -55,7 +55,7 @@
extern char **environ;
#endif
-#if defined(__linux__) || defined(__GLIBC__)
+#if defined(__linux__)
# include <grp.h>
#endif
@@ -79,8 +79,28 @@
assert(signum == SIGCHLD);
uv__wait_children(handle->loop);
}
+
+
+int uv__process_init(uv_loop_t* loop) {
+ int err;
+
+ err = uv_signal_init(loop, &loop->child_watcher);
+ if (err)
+ return err;
+ uv__handle_unref(&loop->child_watcher);
+ loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
+ return 0;
+}
+
+
+#else
+int uv__process_init(uv_loop_t* loop) {
+ memset(&loop->child_watcher, 0, sizeof(loop->child_watcher));
+ return 0;
+}
#endif
+
void uv__wait_children(uv_loop_t* loop) {
uv_process_t* process;
int exit_status;
@@ -88,23 +108,24 @@
int status;
int options;
pid_t pid;
- QUEUE pending;
- QUEUE* q;
- QUEUE* h;
+ struct uv__queue pending;
+ struct uv__queue* q;
+ struct uv__queue* h;
- QUEUE_INIT(&pending);
+ uv__queue_init(&pending);
h = &loop->process_handles;
- q = QUEUE_HEAD(h);
+ q = uv__queue_head(h);
while (q != h) {
- process = QUEUE_DATA(q, uv_process_t, queue);
- q = QUEUE_NEXT(q);
+ process = uv__queue_data(q, uv_process_t, queue);
+ q = uv__queue_next(q);
#ifndef UV_USE_SIGCHLD
if ((process->flags & UV_HANDLE_REAP) == 0)
continue;
options = 0;
process->flags &= ~UV_HANDLE_REAP;
+ loop->nfds--;
#else
options = WNOHANG;
#endif
@@ -128,18 +149,18 @@
assert(pid == process->pid);
process->status = status;
- QUEUE_REMOVE(&process->queue);
- QUEUE_INSERT_TAIL(&pending, &process->queue);
+ uv__queue_remove(&process->queue);
+ uv__queue_insert_tail(&pending, &process->queue);
}
h = &pending;
- q = QUEUE_HEAD(h);
+ q = uv__queue_head(h);
while (q != h) {
- process = QUEUE_DATA(q, uv_process_t, queue);
- q = QUEUE_NEXT(q);
+ process = uv__queue_data(q, uv_process_t, queue);
+ q = uv__queue_next(q);
- QUEUE_REMOVE(&process->queue);
- QUEUE_INIT(&process->queue);
+ uv__queue_remove(&process->queue);
+ uv__queue_init(&process->queue);
uv__handle_stop(process);
if (process->exit_cb == NULL)
@@ -155,13 +176,18 @@
process->exit_cb(process, exit_status, term_signal);
}
- assert(QUEUE_EMPTY(&pending));
+ assert(uv__queue_empty(&pending));
}
/*
* Used for initializing stdio streams like options.stdin_stream. Returns
* zero on success. See also the cleanup section in uv_spawn().
*/
+#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
+/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
+ * avoided. Since this isn't called on those targets, the function
+ * doesn't even need to be defined for them.
+ */
static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
int mask;
int fd;
@@ -248,11 +274,6 @@
}
-#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
-/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
- * avoided. Since this isn't called on those targets, the function
- * doesn't even need to be defined for them.
- */
static void uv__process_child_init(const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
@@ -384,7 +405,6 @@
uv__write_errno(error_fd);
}
-#endif
#if defined(__APPLE__)
@@ -665,7 +685,7 @@
if (options->file == NULL)
return ENOENT;
- /* The environment for the child process is that of the parent unless overriden
+ /* The environment for the child process is that of the parent unless overridden
* by options->env */
char** env = environ;
if (options->env != NULL)
@@ -931,6 +951,7 @@
return err;
}
+#endif /* ISN'T TARGET_OS_TV || TARGET_OS_WATCH */
int uv_spawn(uv_loop_t* loop,
uv_process_t* process,
@@ -957,7 +978,7 @@
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
- QUEUE_INIT(&process->queue);
+ uv__queue_init(&process->queue);
process->status = 0;
stdio_count = options->stdio_count;
@@ -1012,11 +1033,15 @@
process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN;
}
+ /* This prevents uv__io_poll() from bailing out prematurely, being unaware
+ * that we added an event here for it to react to. We will decrement this
+ * again after the waitpid call succeeds. */
+ loop->nfds++;
#endif
process->pid = pid;
process->exit_cb = options->exit_cb;
- QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
+ uv__queue_insert_tail(&loop->process_handles, &process->queue);
uv__handle_start(process);
}
@@ -1078,8 +1103,10 @@
void uv__process_close(uv_process_t* handle) {
- QUEUE_REMOVE(&handle->queue);
+ uv__queue_remove(&handle->queue);
uv__handle_stop(handle);
- if (QUEUE_EMPTY(&handle->loop->process_handles))
+#ifdef UV_USE_SIGCHLD
+ if (uv__queue_empty(&handle->loop->process_handles))
uv_signal_stop(&handle->loop->child_watcher);
+#endif
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/pthread-fixes.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/pthread-fixes.cpp
deleted file mode 100644
index 022d79c..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/pthread-fixes.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2013, Sony Mobile Communications AB
- * Copyright (c) 2012, Google Inc.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/* Android versions < 4.1 have a broken pthread_sigmask. */
-#include "uv-common.h"
-
-#include <errno.h>
-#include <pthread.h>
-#include <signal.h>
-
-int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
- static int workaround;
- int err;
-
- if (uv__load_relaxed(&workaround)) {
- return sigprocmask(how, set, oset);
- } else {
- err = pthread_sigmask(how, set, oset);
- if (err) {
- if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
- uv__store_relaxed(&workaround, 1);
- return 0;
- } else {
- return -1;
- }
- }
- }
-
- return 0;
-}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/random-devurandom.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/random-devurandom.cpp
index 05e52a5..d6336f2 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/random-devurandom.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/random-devurandom.cpp
@@ -40,7 +40,7 @@
if (fd < 0)
return fd;
- if (fstat(fd, &s)) {
+ if (uv__fstat(fd, &s)) {
uv__close(fd);
return UV__ERR(errno);
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/random-getrandom.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/random-getrandom.cpp
index bcc9408..054eccf 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/random-getrandom.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/random-getrandom.cpp
@@ -24,8 +24,6 @@
#ifdef __linux__
-#include "linux-syscalls.h"
-
#define uv__random_getrandom_init() 0
#else /* !__linux__ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/signal.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/signal.cpp
index 1133c73..63aba5a 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/signal.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/signal.cpp
@@ -279,6 +279,8 @@
int uv__signal_loop_fork(uv_loop_t* loop) {
+ if (loop->signal_pipefd[0] == -1)
+ return 0;
uv__io_stop(loop, &loop->signal_io_watcher, POLLIN);
uv__close(loop->signal_pipefd[0]);
uv__close(loop->signal_pipefd[1]);
@@ -289,16 +291,16 @@
void uv__signal_loop_cleanup(uv_loop_t* loop) {
- QUEUE* q;
+ struct uv__queue* q;
/* Stop all the signal watchers that are still attached to this loop. This
* ensures that the (shared) signal tree doesn't contain any invalid entries
* entries, and that signal handlers are removed when appropriate.
- * It's safe to use QUEUE_FOREACH here because the handles and the handle
+ * It's safe to use uv__queue_foreach here because the handles and the handle
* queue are not modified by uv__signal_stop().
*/
- QUEUE_FOREACH(q, &loop->handle_queue) {
- uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue);
+ uv__queue_foreach(q, &loop->handle_queue) {
+ uv_handle_t* handle = uv__queue_data(q, uv_handle_t, handle_queue);
if (handle->type == UV_SIGNAL)
uv__signal_stop((uv_signal_t*) handle);
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/spinlock.h b/wpinet/src/main/native/thirdparty/libuv/src/unix/spinlock.h
deleted file mode 100644
index a20c83c..0000000
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/spinlock.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef UV_SPINLOCK_H_
-#define UV_SPINLOCK_H_
-
-#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
-#include "atomic-ops.h"
-
-#define UV_SPINLOCK_INITIALIZER { 0 }
-
-typedef struct {
- int lock;
-} uv_spinlock_t;
-
-UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
-UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
-UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
-UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
-
-UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
- ACCESS_ONCE(int, spinlock->lock) = 0;
-}
-
-UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
- while (!uv_spinlock_trylock(spinlock)) cpu_relax();
-}
-
-UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
- ACCESS_ONCE(int, spinlock->lock) = 0;
-}
-
-UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
- /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
- * Not really critical until we have locks that are (frequently) contended
- * for by several threads.
- */
- return 0 == cmpxchgi(&spinlock->lock, 0, 1);
-}
-
-#endif /* UV_SPINLOCK_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/stream.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/stream.cpp
index fa25812..265ddad 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/stream.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/stream.cpp
@@ -60,6 +60,16 @@
};
#endif /* defined(__APPLE__) */
+union uv__cmsg {
+ struct cmsghdr hdr;
+ /* This cannot be larger because of the IBMi PASE limitation that
+ * the total size of control messages cannot exceed 256 bytes.
+ */
+ char pad[256];
+};
+
+STATIC_ASSERT(256 == sizeof(union uv__cmsg));
+
static void uv__stream_connect(uv_stream_t*);
static void uv__write(uv_stream_t* stream);
static void uv__read(uv_stream_t* stream);
@@ -84,8 +94,8 @@
stream->accepted_fd = -1;
stream->queued_fds = NULL;
stream->delayed_error = 0;
- QUEUE_INIT(&stream->write_queue);
- QUEUE_INIT(&stream->write_completed_queue);
+ uv__queue_init(&stream->write_queue);
+ uv__queue_init(&stream->write_completed_queue);
stream->write_queue_size = 0;
if (loop->emfile_fd == -1) {
@@ -429,15 +439,15 @@
void uv__stream_flush_write_queue(uv_stream_t* stream, int error) {
uv_write_t* req;
- QUEUE* q;
- while (!QUEUE_EMPTY(&stream->write_queue)) {
- q = QUEUE_HEAD(&stream->write_queue);
- QUEUE_REMOVE(q);
+ struct uv__queue* q;
+ while (!uv__queue_empty(&stream->write_queue)) {
+ q = uv__queue_head(&stream->write_queue);
+ uv__queue_remove(q);
- req = QUEUE_DATA(q, uv_write_t, queue);
+ req = uv__queue_data(q, uv_write_t, queue);
req->error = error;
- QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+ uv__queue_insert_tail(&stream->write_completed_queue, &req->queue);
}
}
@@ -495,76 +505,34 @@
}
-#if defined(UV_HAVE_KQUEUE)
-# define UV_DEC_BACKLOG(w) w->rcount--;
-#else
-# define UV_DEC_BACKLOG(w) /* no-op */
-#endif /* defined(UV_HAVE_KQUEUE) */
-
-
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv_stream_t* stream;
int err;
+ int fd;
stream = container_of(w, uv_stream_t, io_watcher);
assert(events & POLLIN);
assert(stream->accepted_fd == -1);
assert(!(stream->flags & UV_HANDLE_CLOSING));
- uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+ fd = uv__stream_fd(stream);
+ err = uv__accept(fd);
- /* connection_cb can close the server socket while we're
- * in the loop so check it on each iteration.
- */
- while (uv__stream_fd(stream) != -1) {
- assert(stream->accepted_fd == -1);
+ if (err == UV_EMFILE || err == UV_ENFILE)
+ err = uv__emfile_trick(loop, fd); /* Shed load. */
-#if defined(UV_HAVE_KQUEUE)
- if (w->rcount <= 0)
- return;
-#endif /* defined(UV_HAVE_KQUEUE) */
+ if (err < 0)
+ return;
- err = uv__accept(uv__stream_fd(stream));
- if (err < 0) {
- if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
- return; /* Not an error. */
+ stream->accepted_fd = err;
+ stream->connection_cb(stream, 0);
- if (err == UV_ECONNABORTED)
- continue; /* Ignore. Nothing we can do about that. */
-
- if (err == UV_EMFILE || err == UV_ENFILE) {
- err = uv__emfile_trick(loop, uv__stream_fd(stream));
- if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
- break;
- }
-
- stream->connection_cb(stream, err);
- continue;
- }
-
- UV_DEC_BACKLOG(w)
- stream->accepted_fd = err;
- stream->connection_cb(stream, 0);
-
- if (stream->accepted_fd != -1) {
- /* The user hasn't yet accepted called uv_accept() */
- uv__io_stop(loop, &stream->io_watcher, POLLIN);
- return;
- }
-
- if (stream->type == UV_TCP &&
- (stream->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
- /* Give other processes a chance to accept connections. */
- struct timespec timeout = { 0, 1 };
- nanosleep(&timeout, NULL);
- }
- }
+ if (stream->accepted_fd != -1)
+ /* The user hasn't yet accepted called uv_accept() */
+ uv__io_stop(loop, &stream->io_watcher, POLLIN);
}
-#undef UV_DEC_BACKLOG
-
-
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
int err;
@@ -659,13 +627,13 @@
uv_shutdown_t* req;
int err;
- assert(QUEUE_EMPTY(&stream->write_queue));
+ assert(uv__queue_empty(&stream->write_queue));
if (!(stream->flags & UV_HANDLE_CLOSING)) {
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
}
- if (!(stream->flags & UV_HANDLE_SHUTTING))
+ if (!uv__is_stream_shutting(stream))
return;
req = stream->shutdown_req;
@@ -674,7 +642,6 @@
if ((stream->flags & UV_HANDLE_CLOSING) ||
!(stream->flags & UV_HANDLE_SHUT)) {
stream->shutdown_req = NULL;
- stream->flags &= ~UV_HANDLE_SHUTTING;
uv__req_unregister(stream->loop, req);
err = 0;
@@ -747,7 +714,7 @@
uv_stream_t* stream = req->handle;
/* Pop the req off tcp->write_queue. */
- QUEUE_REMOVE(&req->queue);
+ uv__queue_remove(&req->queue);
/* Only free when there was no error. On error, we touch up write_queue_size
* right before making the callback. The reason we don't do that right away
@@ -764,7 +731,7 @@
/* Add it to the write_completed_queue where it will have its
* callback called in the near future.
*/
- QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+ uv__queue_insert_tail(&stream->write_completed_queue, &req->queue);
uv__io_feed(stream->loop, &stream->io_watcher);
}
@@ -812,18 +779,14 @@
if (send_handle != NULL) {
int fd_to_send;
struct msghdr msg;
- struct cmsghdr *cmsg;
- union {
- char data[64];
- struct cmsghdr alias;
- } scratch;
+ union uv__cmsg cmsg;
if (uv__is_closing(send_handle))
return UV_EBADF;
fd_to_send = uv__handle_fd((uv_handle_t*) send_handle);
- memset(&scratch, 0, sizeof(scratch));
+ memset(&cmsg, 0, sizeof(cmsg));
assert(fd_to_send >= 0);
@@ -833,20 +796,13 @@
msg.msg_iovlen = iovcnt;
msg.msg_flags = 0;
- msg.msg_control = &scratch.alias;
+ msg.msg_control = &cmsg.hdr;
msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
- cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_RIGHTS;
- cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send));
-
- /* silence aliasing warning */
- {
- void* pv = CMSG_DATA(cmsg);
- int* pi = (int*)pv;
- *pi = fd_to_send;
- }
+ cmsg.hdr.cmsg_level = SOL_SOCKET;
+ cmsg.hdr.cmsg_type = SCM_RIGHTS;
+ cmsg.hdr.cmsg_len = CMSG_LEN(sizeof(fd_to_send));
+ memcpy(CMSG_DATA(&cmsg.hdr), &fd_to_send, sizeof(fd_to_send));
do
n = sendmsg(uv__stream_fd(stream), &msg, 0);
@@ -881,18 +837,25 @@
}
static void uv__write(uv_stream_t* stream) {
- QUEUE* q;
+ struct uv__queue* q;
uv_write_t* req;
ssize_t n;
+ int count;
assert(uv__stream_fd(stream) >= 0);
+ /* Prevent loop starvation when the consumer of this stream read as fast as
+ * (or faster than) we can write it. This `count` mechanism does not need to
+ * change even if we switch to edge-triggered I/O.
+ */
+ count = 32;
+
for (;;) {
- if (QUEUE_EMPTY(&stream->write_queue))
+ if (uv__queue_empty(&stream->write_queue))
return;
- q = QUEUE_HEAD(&stream->write_queue);
- req = QUEUE_DATA(q, uv_write_t, queue);
+ q = uv__queue_head(&stream->write_queue);
+ req = uv__queue_data(q, uv_write_t, queue);
assert(req->handle == stream);
n = uv__try_write(stream,
@@ -905,10 +868,13 @@
req->send_handle = NULL;
if (uv__write_req_update(stream, req, n)) {
uv__write_req_finish(req);
- return; /* TODO(bnoordhuis) Start trying to write the next request. */
+ if (count-- > 0)
+ continue; /* Start trying to write the next request. */
+
+ return;
}
} else if (n != UV_EAGAIN)
- break;
+ goto error;
/* If this is a blocking stream, try again. */
if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
@@ -923,6 +889,7 @@
return;
}
+error:
req->error = n;
uv__write_req_finish(req);
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
@@ -932,28 +899,19 @@
static void uv__write_callbacks(uv_stream_t* stream) {
uv_write_t* req;
- QUEUE* q;
- QUEUE pq;
+ struct uv__queue* q;
+ struct uv__queue pq;
- if (QUEUE_EMPTY(&stream->write_completed_queue))
+ if (uv__queue_empty(&stream->write_completed_queue))
return;
-// FIXME: GCC 12.1 gives a possibly real warning, but we don't know how to fix
-// it
-#if __GNUC__ >= 12
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdangling-pointer="
-#endif // __GNUC__ >= 12
- QUEUE_MOVE(&stream->write_completed_queue, &pq);
-#if __GNUC__ >= 12
-#pragma GCC diagnostic pop
-#endif // __GNUC__ >= 12
+ uv__queue_move(&stream->write_completed_queue, &pq);
- while (!QUEUE_EMPTY(&pq)) {
+ while (!uv__queue_empty(&pq)) {
/* Pop a req off write_completed_queue. */
- q = QUEUE_HEAD(&pq);
- req = QUEUE_DATA(q, uv_write_t, queue);
- QUEUE_REMOVE(q);
+ q = uv__queue_head(&pq);
+ req = uv__queue_data(q, uv_write_t, queue);
+ uv__queue_remove(q);
uv__req_unregister(stream->loop, req);
if (req->bufs != NULL) {
@@ -1020,57 +978,38 @@
}
-#if defined(__PASE__)
-/* on IBMi PASE the control message length can not exceed 256. */
-# define UV__CMSG_FD_COUNT 60
-#else
-# define UV__CMSG_FD_COUNT 64
-#endif
-#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
-
-
static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
struct cmsghdr* cmsg;
+ int fd;
+ int err;
+ size_t i;
+ size_t count;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
- char* start;
- char* end;
- int err;
- void* pv;
- int* pi;
- unsigned int i;
- unsigned int count;
-
if (cmsg->cmsg_type != SCM_RIGHTS) {
fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
cmsg->cmsg_type);
continue;
}
- /* silence aliasing warning */
- pv = CMSG_DATA(cmsg);
- pi = (int*)pv;
-
- /* Count available fds */
- start = (char*) cmsg;
- end = (char*) cmsg + cmsg->cmsg_len;
- count = 0;
- while (start + CMSG_LEN(count * sizeof(*pi)) < end)
- count++;
- assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
+ assert(cmsg->cmsg_len >= CMSG_LEN(0));
+ count = cmsg->cmsg_len - CMSG_LEN(0);
+ assert(count % sizeof(fd) == 0);
+ count /= sizeof(fd);
for (i = 0; i < count; i++) {
+ memcpy(&fd, (char*) CMSG_DATA(cmsg) + i * sizeof(fd), sizeof(fd));
/* Already has accepted fd, queue now */
if (stream->accepted_fd != -1) {
- err = uv__stream_queue_fd(stream, pi[i]);
+ err = uv__stream_queue_fd(stream, fd);
if (err != 0) {
/* Close rest */
for (; i < count; i++)
- uv__close(pi[i]);
+ uv__close(fd);
return err;
}
} else {
- stream->accepted_fd = pi[i];
+ stream->accepted_fd = fd;
}
}
}
@@ -1079,17 +1018,11 @@
}
-#ifdef __clang__
-# pragma clang diagnostic push
-# pragma clang diagnostic ignored "-Wgnu-folding-constant"
-# pragma clang diagnostic ignored "-Wvla-extension"
-#endif
-
static void uv__read(uv_stream_t* stream) {
uv_buf_t buf;
ssize_t nread;
struct msghdr msg;
- char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)];
+ union uv__cmsg cmsg;
int count;
int err;
int is_ipc;
@@ -1135,8 +1068,8 @@
msg.msg_name = NULL;
msg.msg_namelen = 0;
/* Set up to receive a descriptor even if one isn't in the message */
- msg.msg_controllen = sizeof(cmsg_space);
- msg.msg_control = cmsg_space;
+ msg.msg_controllen = sizeof(cmsg);
+ msg.msg_control = &cmsg.hdr;
do {
nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
@@ -1220,14 +1153,6 @@
}
-#ifdef __clang__
-# pragma clang diagnostic pop
-#endif
-
-#undef UV__CMSG_FD_COUNT
-#undef UV__CMSG_FD_SIZE
-
-
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
assert(stream->type == UV_TCP ||
stream->type == UV_TTY ||
@@ -1235,7 +1160,7 @@
if (!(stream->flags & UV_HANDLE_WRITABLE) ||
stream->flags & UV_HANDLE_SHUT ||
- stream->flags & UV_HANDLE_SHUTTING ||
+ uv__is_stream_shutting(stream) ||
uv__is_closing(stream)) {
return UV_ENOTCONN;
}
@@ -1248,10 +1173,9 @@
req->handle = stream;
req->cb = cb;
stream->shutdown_req = req;
- stream->flags |= UV_HANDLE_SHUTTING;
stream->flags &= ~UV_HANDLE_WRITABLE;
- if (QUEUE_EMPTY(&stream->write_queue))
+ if (uv__queue_empty(&stream->write_queue))
uv__io_feed(stream->loop, &stream->io_watcher);
return 0;
@@ -1304,7 +1228,7 @@
uv__write_callbacks(stream);
/* Write queue drained. */
- if (QUEUE_EMPTY(&stream->write_queue))
+ if (uv__queue_empty(&stream->write_queue))
uv__drain(stream);
}
}
@@ -1347,7 +1271,7 @@
stream->connect_req = NULL;
uv__req_unregister(stream->loop, req);
- if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) {
+ if (error < 0 || uv__queue_empty(&stream->write_queue)) {
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
}
@@ -1429,7 +1353,7 @@
req->handle = stream;
req->error = 0;
req->send_handle = send_handle;
- QUEUE_INIT(&req->queue);
+ uv__queue_init(&req->queue);
req->bufs = req->bufsml;
if (nbufs > ARRAY_SIZE(req->bufsml))
@@ -1444,7 +1368,7 @@
stream->write_queue_size += uv__count_bufs(bufs, nbufs);
/* Append the request to write_queue. */
- QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue);
+ uv__queue_insert_tail(&stream->write_queue, &req->queue);
/* If the queue was empty when this function began, we should attempt to
* do the write immediately. Otherwise start the write_watcher and wait
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/tcp.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/tcp.cpp
index 73fc657..d6c848f 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/tcp.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/tcp.cpp
@@ -28,16 +28,39 @@
#include <errno.h>
-static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
- struct sockaddr_storage saddr;
+static int maybe_bind_socket(int fd) {
+ union uv__sockaddr s;
socklen_t slen;
+
+ slen = sizeof(s);
+ memset(&s, 0, sizeof(s));
+
+ if (getsockname(fd, &s.addr, &slen))
+ return UV__ERR(errno);
+
+ if (s.addr.sa_family == AF_INET)
+ if (s.in.sin_port != 0)
+ return 0; /* Already bound to a port. */
+
+ if (s.addr.sa_family == AF_INET6)
+ if (s.in6.sin6_port != 0)
+ return 0; /* Already bound to a port. */
+
+ /* Bind to an arbitrary port. */
+ if (bind(fd, &s.addr, slen))
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+static int new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
int sockfd;
int err;
- err = uv__socket(domain, SOCK_STREAM, 0);
- if (err < 0)
- return err;
- sockfd = err;
+ sockfd = uv__socket(domain, SOCK_STREAM, 0);
+ if (sockfd < 0)
+ return sockfd;
err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
if (err) {
@@ -45,74 +68,44 @@
return err;
}
- if (flags & UV_HANDLE_BOUND) {
- /* Bind this new socket to an arbitrary port */
- slen = sizeof(saddr);
- memset(&saddr, 0, sizeof(saddr));
- if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) {
- uv__close(sockfd);
- return UV__ERR(errno);
- }
-
- if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) {
- uv__close(sockfd);
- return UV__ERR(errno);
- }
- }
+ if (flags & UV_HANDLE_BOUND)
+ return maybe_bind_socket(sockfd);
return 0;
}
-static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
- struct sockaddr_storage saddr;
- socklen_t slen;
+static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
+ int sockfd;
+ int err;
- if (domain == AF_UNSPEC) {
- handle->flags |= flags;
- return 0;
- }
+ if (domain == AF_UNSPEC)
+ goto out;
- if (uv__stream_fd(handle) != -1) {
+ sockfd = uv__stream_fd(handle);
+ if (sockfd == -1)
+ return new_socket(handle, domain, flags);
- if (flags & UV_HANDLE_BOUND) {
+ if (!(flags & UV_HANDLE_BOUND))
+ goto out;
- if (handle->flags & UV_HANDLE_BOUND) {
- /* It is already bound to a port. */
- handle->flags |= flags;
- return 0;
- }
+ if (handle->flags & UV_HANDLE_BOUND)
+ goto out; /* Already bound to a port. */
- /* Query to see if tcp socket is bound. */
- slen = sizeof(saddr);
- memset(&saddr, 0, sizeof(saddr));
- if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen))
- return UV__ERR(errno);
+ err = maybe_bind_socket(sockfd);
+ if (err)
+ return err;
- if ((saddr.ss_family == AF_INET6 &&
- ((struct sockaddr_in6*) &saddr)->sin6_port != 0) ||
- (saddr.ss_family == AF_INET &&
- ((struct sockaddr_in*) &saddr)->sin_port != 0)) {
- /* Handle is already bound to a port. */
- handle->flags |= flags;
- return 0;
- }
+out:
- /* Bind to arbitrary port */
- if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen))
- return UV__ERR(errno);
- }
-
- handle->flags |= flags;
- return 0;
- }
-
- return new_socket(handle, domain, flags);
+ handle->flags |= flags;
+ return 0;
}
int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
int domain;
+ int err;
/* Use the lower 8 bits for the domain */
domain = flags & 0xFF;
@@ -129,9 +122,12 @@
*/
if (domain != AF_UNSPEC) {
- int err = maybe_new_socket(tcp, domain, 0);
+ err = new_socket(tcp, domain, 0);
if (err) {
- QUEUE_REMOVE(&tcp->handle_queue);
+ uv__queue_remove(&tcp->handle_queue);
+ if (tcp->io_watcher.fd != -1)
+ uv__close(tcp->io_watcher.fd);
+ tcp->io_watcher.fd = -1;
return err;
}
}
@@ -256,7 +252,7 @@
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
- QUEUE_INIT(&req->queue);
+ uv__queue_init(&req->queue);
handle->connect_req = req;
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
@@ -317,7 +313,7 @@
struct linger l = { 1, 0 };
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
- if (handle->flags & UV_HANDLE_SHUTTING)
+ if (uv__is_stream_shutting(handle))
return UV_EINVAL;
fd = uv__stream_fd(handle);
@@ -338,24 +334,12 @@
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
- static int single_accept_cached = -1;
- unsigned long flags;
- int single_accept;
+ unsigned int flags;
int err;
if (tcp->delayed_error)
return tcp->delayed_error;
- single_accept = uv__load_relaxed(&single_accept_cached);
- if (single_accept == -1) {
- const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
- single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
- uv__store_relaxed(&single_accept_cached, single_accept);
- }
-
- if (single_accept)
- tcp->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
-
flags = 0;
#if defined(__MVS__)
/* on zOS the listen call does not bind automatically
@@ -460,10 +444,6 @@
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
- if (enable)
- handle->flags &= ~UV_HANDLE_TCP_SINGLE_ACCEPT;
- else
- handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
return 0;
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/thread.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/thread.cpp
index 392a071..f860094 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/thread.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/thread.cpp
@@ -41,126 +41,20 @@
#include <gnu/libc-version.h> /* gnu_get_libc_version() */
#endif
+#if defined(__linux__)
+# include <sched.h>
+# define uv__cpu_set_t cpu_set_t
+#elif defined(__FreeBSD__)
+# include <sys/param.h>
+# include <sys/cpuset.h>
+# include <pthread_np.h>
+# define uv__cpu_set_t cpuset_t
+#endif
+
+
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
-#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
-STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
-#endif
-
-/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
-#if defined(_AIX) || \
- defined(__OpenBSD__) || \
- !defined(PTHREAD_BARRIER_SERIAL_THREAD)
-int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
- struct _uv_barrier* b;
- int rc;
-
- if (barrier == NULL || count == 0)
- return UV_EINVAL;
-
- b = (_uv_barrier*)uv__malloc(sizeof(*b));
- if (b == NULL)
- return UV_ENOMEM;
-
- b->in = 0;
- b->out = 0;
- b->threshold = count;
-
- rc = uv_mutex_init(&b->mutex);
- if (rc != 0)
- goto error2;
-
- rc = uv_cond_init(&b->cond);
- if (rc != 0)
- goto error;
-
- barrier->b = b;
- return 0;
-
-error:
- uv_mutex_destroy(&b->mutex);
-error2:
- uv__free(b);
- return rc;
-}
-
-int uv_barrier_wait(uv_barrier_t* barrier) {
- struct _uv_barrier* b;
- int last;
-
- if (barrier == NULL || barrier->b == NULL)
- return UV_EINVAL;
-
- b = barrier->b;
- /* Lock the mutex*/
- uv_mutex_lock(&b->mutex);
-
- if (++b->in == b->threshold) {
- b->in = 0;
- b->out = b->threshold;
- uv_cond_signal(&b->cond);
- } else {
- do
- uv_cond_wait(&b->cond, &b->mutex);
- while (b->in != 0);
- }
-
- last = (--b->out == 0);
- uv_cond_signal(&b->cond);
-
- uv_mutex_unlock(&b->mutex);
- return last;
-}
-
-void uv_barrier_destroy(uv_barrier_t* barrier) {
- struct _uv_barrier* b;
-
- b = barrier->b;
- uv_mutex_lock(&b->mutex);
-
- assert(b->in == 0);
- while (b->out != 0)
- uv_cond_wait(&b->cond, &b->mutex);
-
- if (b->in != 0)
- abort();
-
- uv_mutex_unlock(&b->mutex);
- uv_mutex_destroy(&b->mutex);
- uv_cond_destroy(&b->cond);
-
- uv__free(barrier->b);
- barrier->b = NULL;
-}
-
-#else
-
-int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
- return UV__ERR(pthread_barrier_init(barrier, NULL, count));
-}
-
-
-int uv_barrier_wait(uv_barrier_t* barrier) {
- int rc;
-
- rc = pthread_barrier_wait(barrier);
- if (rc != 0)
- if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
- abort();
-
- return rc == PTHREAD_BARRIER_SERIAL_THREAD;
-}
-
-
-void uv_barrier_destroy(uv_barrier_t* barrier) {
- if (pthread_barrier_destroy(barrier))
- abort();
-}
-
-#endif
-
-
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
* too small to safely receive signals on.
*
@@ -276,6 +170,106 @@
return UV__ERR(err);
}
+#if UV__CPU_AFFINITY_SUPPORTED
+
+int uv_thread_setaffinity(uv_thread_t* tid,
+ char* cpumask,
+ char* oldmask,
+ size_t mask_size) {
+ int i;
+ int r;
+ uv__cpu_set_t cpuset;
+ int cpumasksize;
+
+ cpumasksize = uv_cpumask_size();
+ if (cpumasksize < 0)
+ return cpumasksize;
+ if (mask_size < (size_t)cpumasksize)
+ return UV_EINVAL;
+
+ if (oldmask != NULL) {
+ r = uv_thread_getaffinity(tid, oldmask, mask_size);
+ if (r < 0)
+ return r;
+ }
+
+ CPU_ZERO(&cpuset);
+ for (i = 0; i < cpumasksize; i++)
+ if (cpumask[i])
+ CPU_SET(i, &cpuset);
+
+#if defined(__ANDROID__)
+ if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
+ r = errno;
+ else
+ r = 0;
+#else
+ r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset);
+#endif
+
+ return UV__ERR(r);
+}
+
+
+int uv_thread_getaffinity(uv_thread_t* tid,
+ char* cpumask,
+ size_t mask_size) {
+ int r;
+ int i;
+ uv__cpu_set_t cpuset;
+ int cpumasksize;
+
+ cpumasksize = uv_cpumask_size();
+ if (cpumasksize < 0)
+ return cpumasksize;
+ if (mask_size < (size_t)cpumasksize)
+ return UV_EINVAL;
+
+ CPU_ZERO(&cpuset);
+#if defined(__ANDROID__)
+ if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
+ r = errno;
+ else
+ r = 0;
+#else
+ r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset);
+#endif
+ if (r)
+ return UV__ERR(r);
+ for (i = 0; i < cpumasksize; i++)
+ cpumask[i] = !!CPU_ISSET(i, &cpuset);
+
+ return 0;
+}
+#else
+int uv_thread_setaffinity(uv_thread_t* tid,
+ char* cpumask,
+ char* oldmask,
+ size_t mask_size) {
+ return UV_ENOTSUP;
+}
+
+
+int uv_thread_getaffinity(uv_thread_t* tid,
+ char* cpumask,
+ size_t mask_size) {
+ return UV_ENOTSUP;
+}
+#endif /* defined(__linux__) || defined(UV_BSD_H) */
+
+int uv_thread_getcpu(void) {
+#if UV__CPU_AFFINITY_SUPPORTED
+ int cpu;
+
+ cpu = sched_getcpu();
+ if (cpu < 0)
+ return UV__ERR(errno);
+
+ return cpu;
+#else
+ return UV_ENOTSUP;
+#endif
+}
uv_thread_t uv_thread_self(void) {
return pthread_self();
@@ -577,7 +571,7 @@
uv_mutex_lock(&sem->mutex);
sem->value++;
if (sem->value == 1)
- uv_cond_signal(&sem->cond);
+ uv_cond_signal(&sem->cond); /* Release one to replace us. */
uv_mutex_unlock(&sem->mutex);
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/tty.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/tty.cpp
index ed81e26..1304c6d 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/tty.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/tty.cpp
@@ -21,7 +21,6 @@
#include "uv.h"
#include "internal.h"
-#include "spinlock.h"
#include <stdlib.h>
#include <assert.h>
@@ -30,6 +29,8 @@
#include <errno.h>
#include <sys/ioctl.h>
+#include <atomic>
+
#if defined(__MVS__) && !defined(IMAXBEL)
#define IMAXBEL 0
#endif
@@ -64,7 +65,7 @@
static int orig_termios_fd = -1;
static struct termios orig_termios;
-static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
+static std::atomic<int> termios_spinlock;
int uv__tcsetattr(int fd, int how, const struct termios *term) {
int rc;
@@ -81,7 +82,7 @@
static int uv__tty_is_peripheral(const int fd) {
int result;
-#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+#if defined(__linux__) || defined(__FreeBSD__)
int dummy;
result = ioctl(fd, TIOCGPTN, &dummy) != 0;
@@ -114,7 +115,7 @@
}
/* Lookup stat structure behind the file descriptor. */
- if (fstat(fd, &sb) != 0)
+ if (uv__fstat(fd, &sb) != 0)
abort();
/* Assert character device. */
@@ -223,7 +224,7 @@
int rc = r;
if (newfd != -1)
uv__close(newfd);
- QUEUE_REMOVE(&tty->handle_queue);
+ uv__queue_remove(&tty->handle_queue);
do
r = fcntl(fd, F_SETFL, saved_flags);
while (r == -1 && errno == EINTR);
@@ -281,6 +282,7 @@
int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
struct termios tmp;
+ int expected;
int fd;
int rc;
@@ -297,12 +299,16 @@
return UV__ERR(errno);
/* This is used for uv_tty_reset_mode() */
- uv_spinlock_lock(&termios_spinlock);
+ do
+ expected = 0;
+ while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1));
+
if (orig_termios_fd == -1) {
orig_termios = tty->orig_termios;
orig_termios_fd = fd;
}
- uv_spinlock_unlock(&termios_spinlock);
+
+ atomic_store(&termios_spinlock, 0);
}
tmp = tty->orig_termios;
@@ -361,7 +367,7 @@
if (isatty(file))
return UV_TTY;
- if (fstat(file, &s)) {
+ if (uv__fstat(file, &s)) {
#if defined(__PASE__)
/* On ibmi receiving RST from TCP instead of FIN immediately puts fd into
* an error state. fstat will return EINVAL, getsockname will also return
@@ -446,14 +452,15 @@
int err;
saved_errno = errno;
- if (!uv_spinlock_trylock(&termios_spinlock))
+
+ if (atomic_exchange(&termios_spinlock, 1))
return UV_EBUSY; /* In uv_tty_set_mode(). */
err = 0;
if (orig_termios_fd != -1)
err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
- uv_spinlock_unlock(&termios_spinlock);
+ atomic_store(&termios_spinlock, 0);
errno = saved_errno;
return err;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/unix/udp.cpp b/wpinet/src/main/native/thirdparty/libuv/src/unix/udp.cpp
index a130aea..cbee16b 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/unix/udp.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/unix/udp.cpp
@@ -40,12 +40,6 @@
# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
#endif
-union uv__sockaddr {
- struct sockaddr_in6 in6;
- struct sockaddr_in in;
- struct sockaddr addr;
-};
-
static void uv__udp_run_completed(uv_udp_t* handle);
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
static void uv__udp_recvmsg(uv_udp_t* handle);
@@ -54,36 +48,6 @@
int domain,
unsigned int flags);
-#if HAVE_MMSG
-
-#define UV__MMSG_MAXWIDTH 20
-
-static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
-static void uv__udp_sendmmsg(uv_udp_t* handle);
-
-static int uv__recvmmsg_avail;
-static int uv__sendmmsg_avail;
-static uv_once_t once = UV_ONCE_INIT;
-
-static void uv__udp_mmsg_init(void) {
- int ret;
- int s;
- s = uv__socket(AF_INET, SOCK_DGRAM, 0);
- if (s < 0)
- return;
- ret = uv__sendmmsg(s, NULL, 0);
- if (ret == 0 || errno != ENOSYS) {
- uv__sendmmsg_avail = 1;
- uv__recvmmsg_avail = 1;
- } else {
- ret = uv__recvmmsg(s, NULL, 0);
- if (ret == 0 || errno != ENOSYS)
- uv__recvmmsg_avail = 1;
- }
- uv__close(s);
-}
-
-#endif
void uv__udp_close(uv_udp_t* handle) {
uv__io_close(handle->loop, &handle->io_watcher);
@@ -98,18 +62,18 @@
void uv__udp_finish_close(uv_udp_t* handle) {
uv_udp_send_t* req;
- QUEUE* q;
+ struct uv__queue* q;
assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
assert(handle->io_watcher.fd == -1);
- while (!QUEUE_EMPTY(&handle->write_queue)) {
- q = QUEUE_HEAD(&handle->write_queue);
- QUEUE_REMOVE(q);
+ while (!uv__queue_empty(&handle->write_queue)) {
+ q = uv__queue_head(&handle->write_queue);
+ uv__queue_remove(q);
- req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req = uv__queue_data(q, uv_udp_send_t, queue);
req->status = UV_ECANCELED;
- QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
}
uv__udp_run_completed(handle);
@@ -126,16 +90,16 @@
static void uv__udp_run_completed(uv_udp_t* handle) {
uv_udp_send_t* req;
- QUEUE* q;
+ struct uv__queue* q;
assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
handle->flags |= UV_HANDLE_UDP_PROCESSING;
- while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
- q = QUEUE_HEAD(&handle->write_completed_queue);
- QUEUE_REMOVE(q);
+ while (!uv__queue_empty(&handle->write_completed_queue)) {
+ q = uv__queue_head(&handle->write_completed_queue);
+ uv__queue_remove(q);
- req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req = uv__queue_data(q, uv_udp_send_t, queue);
uv__req_unregister(handle->loop, req);
handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
@@ -157,7 +121,7 @@
req->send_cb(req, req->status);
}
- if (QUEUE_EMPTY(&handle->write_queue)) {
+ if (uv__queue_empty(&handle->write_queue)) {
/* Pending queue and completion queue empty, stop watcher. */
uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
if (!uv__io_active(&handle->io_watcher, POLLIN))
@@ -183,11 +147,11 @@
}
}
-#if HAVE_MMSG
static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
- struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH];
- struct iovec iov[UV__MMSG_MAXWIDTH];
- struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH];
+#if defined(__linux__) || defined(__FreeBSD__)
+ struct sockaddr_in6 peers[20];
+ struct iovec iov[ARRAY_SIZE(peers)];
+ struct mmsghdr msgs[ARRAY_SIZE(peers)];
ssize_t nread;
uv_buf_t chunk_buf;
size_t chunks;
@@ -212,7 +176,7 @@
}
do
- nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
+ nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
while (nread == -1 && errno == EINTR);
if (nread < 1) {
@@ -240,8 +204,10 @@
handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
}
return nread;
+#else /* __linux__ || ____FreeBSD__ */
+ return UV_ENOSYS;
+#endif /* __linux__ || ____FreeBSD__ */
}
-#endif
static void uv__udp_recvmsg(uv_udp_t* handle) {
struct sockaddr_storage peer;
@@ -268,14 +234,12 @@
}
assert(buf.base != NULL);
-#if HAVE_MMSG
if (uv_udp_using_recvmmsg(handle)) {
nread = uv__udp_recvmmsg(handle, &buf);
if (nread > 0)
count -= nread;
continue;
}
-#endif
memset(&h, 0, sizeof(h));
memset(&peer, 0, sizeof(peer));
@@ -311,25 +275,25 @@
&& handle->recv_cb != NULL);
}
-#if HAVE_MMSG
-static void uv__udp_sendmmsg(uv_udp_t* handle) {
+static void uv__udp_sendmsg(uv_udp_t* handle) {
+#if defined(__linux__) || defined(__FreeBSD__)
uv_udp_send_t* req;
- struct uv__mmsghdr h[UV__MMSG_MAXWIDTH];
- struct uv__mmsghdr *p;
- QUEUE* q;
+ struct mmsghdr h[20];
+ struct mmsghdr* p;
+ struct uv__queue* q;
ssize_t npkts;
size_t pkts;
size_t i;
- if (QUEUE_EMPTY(&handle->write_queue))
+ if (uv__queue_empty(&handle->write_queue))
return;
write_queue_drain:
- for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
- pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue;
- ++pkts, q = QUEUE_HEAD(q)) {
+ for (pkts = 0, q = uv__queue_head(&handle->write_queue);
+ pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
+ ++pkts, q = uv__queue_head(q)) {
assert(q != NULL);
- req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
p = &h[pkts];
@@ -355,22 +319,22 @@
}
do
- npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
+ npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
while (npkts == -1 && errno == EINTR);
if (npkts < 1) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
return;
- for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
+ for (i = 0, q = uv__queue_head(&handle->write_queue);
i < pkts && q != &handle->write_queue;
- ++i, q = QUEUE_HEAD(&handle->write_queue)) {
+ ++i, q = uv__queue_head(&handle->write_queue)) {
assert(q != NULL);
- req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
req->status = UV__ERR(errno);
- QUEUE_REMOVE(&req->queue);
- QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ uv__queue_remove(&req->queue);
+ uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
}
uv__io_feed(handle->loop, &handle->io_watcher);
return;
@@ -379,11 +343,11 @@
/* Safety: npkts known to be >0 below. Hence cast from ssize_t
* to size_t safe.
*/
- for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
+ for (i = 0, q = uv__queue_head(&handle->write_queue);
i < (size_t)npkts && q != &handle->write_queue;
- ++i, q = QUEUE_HEAD(&handle->write_queue)) {
+ ++i, q = uv__queue_head(&handle->write_queue)) {
assert(q != NULL);
- req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
req->status = req->bufs[0].len;
@@ -393,37 +357,25 @@
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
- QUEUE_REMOVE(&req->queue);
- QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ uv__queue_remove(&req->queue);
+ uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
}
/* couldn't batch everything, continue sending (jump to avoid stack growth) */
- if (!QUEUE_EMPTY(&handle->write_queue))
+ if (!uv__queue_empty(&handle->write_queue))
goto write_queue_drain;
uv__io_feed(handle->loop, &handle->io_watcher);
- return;
-}
-#endif
-
-static void uv__udp_sendmsg(uv_udp_t* handle) {
+#else /* __linux__ || ____FreeBSD__ */
uv_udp_send_t* req;
struct msghdr h;
- QUEUE* q;
+ struct uv__queue* q;
ssize_t size;
-#if HAVE_MMSG
- uv_once(&once, uv__udp_mmsg_init);
- if (uv__sendmmsg_avail) {
- uv__udp_sendmmsg(handle);
- return;
- }
-#endif
-
- while (!QUEUE_EMPTY(&handle->write_queue)) {
- q = QUEUE_HEAD(&handle->write_queue);
+ while (!uv__queue_empty(&handle->write_queue)) {
+ q = uv__queue_head(&handle->write_queue);
assert(q != NULL);
- req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
memset(&h, 0, sizeof h);
@@ -462,10 +414,11 @@
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
- QUEUE_REMOVE(&req->queue);
- QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ uv__queue_remove(&req->queue);
+ uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
uv__io_feed(handle->loop, &handle->io_watcher);
}
+#endif /* __linux__ || ____FreeBSD__ */
}
/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
@@ -495,7 +448,8 @@
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
}
-#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__)
+#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \
+ !defined(__sun__)
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
#else
@@ -775,7 +729,7 @@
memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
handle->send_queue_count++;
- QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
+ uv__queue_insert_tail(&handle->write_queue, &req->queue);
uv__handle_start(handle);
if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
@@ -785,7 +739,7 @@
* away. In such cases the `io_watcher` has to be queued for asynchronous
* write.
*/
- if (!QUEUE_EMPTY(&handle->write_queue))
+ if (!uv__queue_empty(&handle->write_queue))
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
} else {
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
@@ -1053,19 +1007,17 @@
handle->send_queue_size = 0;
handle->send_queue_count = 0;
uv__io_init(&handle->io_watcher, uv__udp_io, fd);
- QUEUE_INIT(&handle->write_queue);
- QUEUE_INIT(&handle->write_completed_queue);
+ uv__queue_init(&handle->write_queue);
+ uv__queue_init(&handle->write_completed_queue);
return 0;
}
int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
-#if HAVE_MMSG
- if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
- uv_once(&once, uv__udp_mmsg_init);
- return uv__recvmmsg_avail;
- }
+#if defined(__linux__) || defined(__FreeBSD__)
+ if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
+ return 1;
#endif
return 0;
}
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/uv-common.cpp b/wpinet/src/main/native/thirdparty/libuv/src/uv-common.cpp
index 8ab600d..5c6d841 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/uv-common.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/uv-common.cpp
@@ -128,6 +128,39 @@
return 0;
}
+
+void uv_os_free_passwd(uv_passwd_t* pwd) {
+ if (pwd == NULL)
+ return;
+
+ /* On unix, the memory for name, shell, and homedir are allocated in a single
+ * uv__malloc() call. The base of the pointer is stored in pwd->username, so
+ * that is the field that needs to be freed.
+ */
+ uv__free(pwd->username);
+#ifdef _WIN32
+ uv__free(pwd->homedir);
+#endif
+ pwd->username = NULL;
+ pwd->shell = NULL;
+ pwd->homedir = NULL;
+}
+
+
+void uv_os_free_group(uv_group_t *grp) {
+ if (grp == NULL)
+ return;
+
+ /* The memory for is allocated in a single uv__malloc() call. The base of the
+ * pointer is stored in grp->members, so that is the only field that needs to
+ * be freed.
+ */
+ uv__free(grp->members);
+ grp->members = NULL;
+ grp->groupname = NULL;
+}
+
+
#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
size_t uv_handle_size(uv_handle_type type) {
@@ -500,26 +533,17 @@
void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
- QUEUE queue;
- QUEUE* q;
+ struct uv__queue queue;
+ struct uv__queue* q;
uv_handle_t* h;
-// FIXME: GCC 12.1 gives a possibly real warning, but we don't know how to fix
-// it
-#if __GNUC__ >= 12
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdangling-pointer="
-#endif // __GNUC__ >= 12
- QUEUE_MOVE(&loop->handle_queue, &queue);
-#if __GNUC__ >= 12
-#pragma GCC diagnostic pop
-#endif // __GNUC__ >= 12
- while (!QUEUE_EMPTY(&queue)) {
- q = QUEUE_HEAD(&queue);
- h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+ uv__queue_move(&loop->handle_queue, &queue);
+ while (!uv__queue_empty(&queue)) {
+ q = uv__queue_head(&queue);
+ h = uv__queue_data(q, uv_handle_t, handle_queue);
- QUEUE_REMOVE(q);
- QUEUE_INSERT_TAIL(&loop->handle_queue, q);
+ uv__queue_remove(q);
+ uv__queue_insert_tail(&loop->handle_queue, q);
if (h->flags & UV_HANDLE_INTERNAL) continue;
walk_cb(h, arg);
@@ -529,14 +553,14 @@
static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
const char* type;
- QUEUE* q;
+ struct uv__queue* q;
uv_handle_t* h;
if (loop == NULL)
loop = uv_default_loop();
- QUEUE_FOREACH(q, &loop->handle_queue) {
- h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+ uv__queue_foreach(q, &loop->handle_queue) {
+ h = uv__queue_data(q, uv_handle_t, handle_queue);
if (only_active && !uv__is_active(h))
continue;
@@ -659,14 +683,22 @@
void uv__fs_scandir_cleanup(uv_fs_t* req) {
uv__dirent_t** dents;
+ unsigned int* nbufs;
+ unsigned int i;
+ unsigned int n;
- unsigned int* nbufs = uv__get_nbufs(req);
+ if (req->result >= 0) {
+ dents = (uv__dirent_t**)(req->ptr);
+ nbufs = uv__get_nbufs(req);
- dents = (uv__dirent_t**)(req->ptr);
- if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
- (*nbufs)--;
- for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
- uv__fs_scandir_free(dents[*nbufs]);
+ i = 0;
+ if (*nbufs > 0)
+ i = *nbufs - 1;
+
+ n = (unsigned int) req->result;
+ for (; i < n; i++)
+ uv__fs_scandir_free(dents[i]);
+ }
uv__fs_scandir_free(req->ptr);
req->ptr = NULL;
@@ -823,7 +855,7 @@
int uv_loop_close(uv_loop_t* loop) {
- QUEUE* q;
+ struct uv__queue* q;
uv_handle_t* h;
#ifndef NDEBUG
void* saved_data;
@@ -832,8 +864,8 @@
if (uv__has_active_reqs(loop))
return UV_EBUSY;
- QUEUE_FOREACH(q, &loop->handle_queue) {
- h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+ uv__queue_foreach(q, &loop->handle_queue) {
+ h = uv__queue_data(q, uv_handle_t, handle_queue);
if (!(h->flags & UV_HANDLE_INTERNAL))
return UV_EBUSY;
}
@@ -897,12 +929,17 @@
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+#ifdef __linux__
+ (void) &count;
+ uv__free(cpu_infos);
+#else
int i;
for (i = 0; i < count; i++)
uv__free(cpu_infos[i].model);
uv__free(cpu_infos);
+#endif /* __linux__ */
}
@@ -914,9 +951,9 @@
__attribute__((destructor))
#endif
void uv_library_shutdown(void) {
- static int was_shutdown;
+ static std::atomic<int> was_shutdown;
- if (uv__load_relaxed(&was_shutdown))
+ if (uv__exchange_int_relaxed(&was_shutdown, 1))
return;
uv__process_title_cleanup();
@@ -927,7 +964,6 @@
#else
uv__threadpool_cleanup();
#endif
- uv__store_relaxed(&was_shutdown, 1);
}
@@ -973,6 +1009,15 @@
}
+int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics) {
+ memcpy(metrics,
+ &uv__get_loop_metrics(loop)->metrics,
+ sizeof(*metrics));
+
+ return 0;
+}
+
+
uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
uv__loop_metrics_t* loop_metrics;
uint64_t entry_time;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/uv-common.h b/wpinet/src/main/native/thirdparty/libuv/src/uv-common.h
index 6001b0c..5dce8ea 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/uv-common.h
+++ b/wpinet/src/main/native/thirdparty/libuv/src/uv-common.h
@@ -30,12 +30,9 @@
#include <assert.h>
#include <stdarg.h>
#include <stddef.h>
+#include <stdint.h>
-#if defined(_MSC_VER) && _MSC_VER < 1600
-# include "uv/stdint-msvc2008.h"
-#else
-# include <stdint.h>
-#endif
+#include <atomic>
#include "uv.h"
#include "uv/tree.h"
@@ -53,19 +50,25 @@
#endif
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#define ARRAY_END(a) ((a) + ARRAY_SIZE(a))
#define container_of(ptr, type, member) \
((type *) ((char *) (ptr) - offsetof(type, member)))
+/* C11 defines static_assert to be a macro which calls _Static_assert. */
+#if defined(static_assert)
+#define STATIC_ASSERT(expr) static_assert(expr, #expr)
+#else
#define STATIC_ASSERT(expr) \
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
+#endif
-#if defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7)
-#define uv__load_relaxed(p) __atomic_load_n(p, __ATOMIC_RELAXED)
-#define uv__store_relaxed(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
+#ifdef _MSC_VER
+#define uv__exchange_int_relaxed(p, v) \
+ InterlockedExchangeNoFence((LONG volatile*)(p), v)
#else
-#define uv__load_relaxed(p) (*p)
-#define uv__store_relaxed(p, v) do *p = v; while (0)
+#define uv__exchange_int_relaxed(p, v) \
+ std::atomic_exchange_explicit((std::atomic<int>*)(p), v, std::memory_order_relaxed)
#endif
#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
@@ -83,7 +86,6 @@
/* Used by streams. */
UV_HANDLE_LISTENING = 0x00000040,
UV_HANDLE_CONNECTION = 0x00000080,
- UV_HANDLE_SHUTTING = 0x00000100,
UV_HANDLE_SHUT = 0x00000200,
UV_HANDLE_READ_PARTIAL = 0x00000400,
UV_HANDLE_READ_EOF = 0x00000800,
@@ -263,6 +265,14 @@
#define uv__is_closing(h) \
(((h)->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED)) != 0)
+#if defined(_WIN32)
+# define uv__is_stream_shutting(h) \
+ (h->stream.conn.shutdown_req != NULL)
+#else
+# define uv__is_stream_shutting(h) \
+ (h->shutdown_req != NULL)
+#endif
+
#define uv__handle_start(h) \
do { \
if (((h)->flags & UV_HANDLE_ACTIVE) != 0) break; \
@@ -311,7 +321,7 @@
(h)->loop = (loop_); \
(h)->type = (type_); \
(h)->flags = UV_HANDLE_REF; /* Ref the loop when active. */ \
- QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue); \
+ uv__queue_insert_tail(&(loop_)->handle_queue, &(h)->handle_queue); \
uv__handle_platform_init(h); \
} \
while (0)
@@ -347,6 +357,21 @@
#define uv__get_loop_metrics(loop) \
(&uv__get_internal_fields(loop)->loop_metrics)
+#define uv__metrics_inc_loop_count(loop) \
+ do { \
+ uv__get_loop_metrics(loop)->metrics.loop_count++; \
+ } while (0)
+
+#define uv__metrics_inc_events(loop, e) \
+ do { \
+ uv__get_loop_metrics(loop)->metrics.events += (e); \
+ } while (0)
+
+#define uv__metrics_inc_events_waiting(loop, e) \
+ do { \
+ uv__get_loop_metrics(loop)->metrics.events_waiting += (e); \
+ } while (0)
+
/* Allocator prototypes */
void *uv__calloc(size_t count, size_t size);
char *uv__strdup(const char* s);
@@ -360,6 +385,7 @@
typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t;
struct uv__loop_metrics_s {
+ uv_metrics_t metrics;
uint64_t provider_entry_time;
uint64_t provider_idle_time;
uv_mutex_t lock;
@@ -368,9 +394,38 @@
void uv__metrics_update_idle_time(uv_loop_t* loop);
void uv__metrics_set_provider_entry_time(uv_loop_t* loop);
+#ifdef __linux__
+struct uv__iou {
+ uint32_t* sqhead;
+ uint32_t* sqtail;
+ uint32_t* sqarray;
+ uint32_t sqmask;
+ uint32_t* sqflags;
+ uint32_t* cqhead;
+ uint32_t* cqtail;
+ uint32_t cqmask;
+ void* sq; /* pointer to munmap() on event loop teardown */
+ void* cqe; /* pointer to array of struct uv__io_uring_cqe */
+ void* sqe; /* pointer to array of struct uv__io_uring_sqe */
+ size_t sqlen;
+ size_t cqlen;
+ size_t maxlen;
+ size_t sqelen;
+ int ringfd;
+ uint32_t in_flight;
+ uint32_t flags;
+};
+#endif /* __linux__ */
+
struct uv__loop_internal_fields_s {
unsigned int flags;
uv__loop_metrics_t loop_metrics;
+ int current_timeout;
+#ifdef __linux__
+ struct uv__iou ctl;
+ struct uv__iou iou;
+ void* inv; /* used by uv__platform_invalidate_fd() */
+#endif /* __linux__ */
};
#endif /* UV_COMMON_H_ */
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/core.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/core.cpp
index 0752edf..87ade7a 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/core.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/core.cpp
@@ -247,6 +247,9 @@
err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err)
goto fail_metrics_mutex_init;
+ memset(&lfields->loop_metrics.metrics,
+ 0,
+ sizeof(lfields->loop_metrics.metrics));
/* To prevent uninitialized memory access, loop->time must be initialized
* to zero before calling uv_update_time for the first time.
@@ -254,8 +257,8 @@
loop->time = 0;
uv_update_time(loop);
- QUEUE_INIT(&loop->wq);
- QUEUE_INIT(&loop->handle_queue);
+ uv__queue_init(&loop->wq);
+ uv__queue_init(&loop->handle_queue);
loop->active_reqs.count = 0;
loop->active_handles = 0;
@@ -281,9 +284,6 @@
memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
- loop->active_tcp_streams = 0;
- loop->active_udp_streams = 0;
-
loop->timer_counter = 0;
loop->stop_flag = 0;
@@ -360,7 +360,7 @@
}
uv_mutex_lock(&loop->wq_mutex);
- assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
+ assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
@@ -426,6 +426,7 @@
static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
+ uv__loop_internal_fields_t* lfields;
DWORD bytes;
ULONG_PTR key;
OVERLAPPED* overlapped;
@@ -435,9 +436,10 @@
uint64_t user_timeout;
int reset_timeout;
+ lfields = uv__get_internal_fields(loop);
timeout_time = loop->time + timeout;
- if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@@ -452,6 +454,12 @@
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
+ /* Store the current timeout in a location that's globally accessible so
+ * other locations like uv__work_done() can determine whether the queue
+ * of events in the callback were waiting when poll was called.
+ */
+ lfields->current_timeout = timeout;
+
GetQueuedCompletionStatus(loop->iocp,
&bytes,
&key,
@@ -459,6 +467,8 @@
timeout);
if (reset_timeout != 0) {
+ if (overlapped && timeout == 0)
+ uv__metrics_inc_events_waiting(loop, 1);
timeout = user_timeout;
reset_timeout = 0;
}
@@ -471,6 +481,8 @@
uv__metrics_update_idle_time(loop);
if (overlapped) {
+ uv__metrics_inc_events(loop, 1);
+
/* Package was dequeued */
req = uv__overlapped_to_req(overlapped);
uv__insert_pending_req(loop, req);
@@ -505,6 +517,7 @@
static void uv__poll(uv_loop_t* loop, DWORD timeout) {
+ uv__loop_internal_fields_t* lfields;
BOOL success;
uv_req_t* req;
OVERLAPPED_ENTRY overlappeds[128];
@@ -513,11 +526,13 @@
int repeat;
uint64_t timeout_time;
uint64_t user_timeout;
+ uint64_t actual_timeout;
int reset_timeout;
+ lfields = uv__get_internal_fields(loop);
timeout_time = loop->time + timeout;
- if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@@ -526,12 +541,20 @@
}
for (repeat = 0; ; repeat++) {
+ actual_timeout = timeout;
+
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
+ /* Store the current timeout in a location that's globally accessible so
+ * other locations like uv__work_done() can determine whether the queue
+ * of events in the callback were waiting when poll was called.
+ */
+ lfields->current_timeout = timeout;
+
success = pGetQueuedCompletionStatusEx(loop->iocp,
overlappeds,
ARRAY_SIZE(overlappeds),
@@ -545,9 +568,9 @@
}
/* Placed here because on success the loop will break whether there is an
- * empty package or not, or if GetQueuedCompletionStatus returned early then
- * the timeout will be updated and the loop will run again. In either case
- * the idle time will need to be updated.
+ * empty package or not, or if pGetQueuedCompletionStatusEx returned early
+ * then the timeout will be updated and the loop will run again. In either
+ * case the idle time will need to be updated.
*/
uv__metrics_update_idle_time(loop);
@@ -557,6 +580,10 @@
* meant only to wake us up.
*/
if (overlappeds[i].lpOverlapped) {
+ uv__metrics_inc_events(loop, 1);
+ if (actual_timeout == 0)
+ uv__metrics_inc_events_waiting(loop, 1);
+
req = uv__overlapped_to_req(overlappeds[i].lpOverlapped);
uv__insert_pending_req(loop, req);
}
@@ -600,10 +627,16 @@
if (!r)
uv_update_time(loop);
- while (r != 0 && loop->stop_flag == 0) {
+ /* Maintain backwards compatibility by processing timers before entering the
+ * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
+ * once, which should be done after polling in order to maintain proper
+ * execution order of the conceptual event loop. */
+ if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
uv_update_time(loop);
uv__run_timers(loop);
+ }
+ while (r != 0 && loop->stop_flag == 0) {
can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL;
uv__process_reqs(loop);
@@ -614,6 +647,8 @@
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop);
+ uv__metrics_inc_loop_count(loop);
+
if (pGetQueuedCompletionStatusEx)
uv__poll(loop, timeout);
else
@@ -634,18 +669,8 @@
uv__check_invoke(loop);
uv__process_endgames(loop);
- if (mode == UV_RUN_ONCE) {
- /* UV_RUN_ONCE implies forward progress: at least one callback must have
- * been invoked when it returns. uv__io_poll() can return without doing
- * I/O (meaning: no callbacks) when its timeout expires - which means we
- * have pending timers that satisfy the forward progress constraint.
- *
- * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
- * the check.
- */
- uv_update_time(loop);
- uv__run_timers(loop);
- }
+ uv_update_time(loop);
+ uv__run_timers(loop);
r = uv__loop_alive(loop);
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/fs.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/fs.cpp
index 71c9b16..f415ddc 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/fs.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/fs.cpp
@@ -38,6 +38,8 @@
#include "handle-inl.h"
#include "fs-fd-hash-inl.h"
+#include <winioctl.h>
+
#pragma comment(lib, "Advapi32.lib")
#define UV_FS_FREE_PATHS 0x0002
@@ -145,26 +147,97 @@
}
+static int32_t fs__decode_wtf8_char(const char** input) {
+ uint32_t code_point;
+ uint8_t b1;
+ uint8_t b2;
+ uint8_t b3;
+ uint8_t b4;
+
+ b1 = **input;
+ if (b1 <= 0x7F)
+ return b1; /* ASCII code point */
+ if (b1 < 0xC2)
+ return -1; /* invalid: continuation byte */
+ code_point = b1;
+
+ b2 = *++*input;
+ if ((b2 & 0xC0) != 0x80)
+ return -1; /* invalid: not a continuation byte */
+ code_point = (code_point << 6) | (b2 & 0x3F);
+ if (b1 <= 0xDF)
+ return 0x7FF & code_point; /* two-byte character */
+
+ b3 = *++*input;
+ if ((b3 & 0xC0) != 0x80)
+ return -1; /* invalid: not a continuation byte */
+ code_point = (code_point << 6) | (b3 & 0x3F);
+ if (b1 <= 0xEF)
+ return 0xFFFF & code_point; /* three-byte character */
+
+ b4 = *++*input;
+ if ((b4 & 0xC0) != 0x80)
+ return -1; /* invalid: not a continuation byte */
+ code_point = (code_point << 6) | (b4 & 0x3F);
+ if (b1 <= 0xF4)
+ if (code_point <= 0x10FFFF)
+ return code_point; /* four-byte character */
+
+ /* code point too large */
+ return -1;
+}
+
+
+static ssize_t fs__get_length_wtf8(const char* source_ptr) {
+ size_t w_target_len = 0;
+ int32_t code_point;
+
+ do {
+ code_point = fs__decode_wtf8_char(&source_ptr);
+ if (code_point < 0)
+ return -1;
+ if (code_point > 0xFFFF)
+ w_target_len++;
+ w_target_len++;
+ } while (*source_ptr++);
+ return w_target_len;
+}
+
+
+static void fs__wtf8_to_wide(const char* source_ptr, WCHAR* w_target) {
+ int32_t code_point;
+
+ do {
+ code_point = fs__decode_wtf8_char(&source_ptr);
+ /* fs__get_length_wtf8 should have been called and checked first. */
+ assert(code_point >= 0);
+ if (code_point > 0x10000) {
+ assert(code_point < 0x10FFFF);
+ *w_target++ = (((code_point - 0x10000) >> 10) + 0xD800);
+ *w_target++ = ((code_point - 0x10000) & 0x3FF) + 0xDC00;
+ } else {
+ *w_target++ = code_point;
+ }
+ } while (*source_ptr++);
+}
+
+
INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
const char* new_path, const int copy_path) {
- char* buf;
- char* pos;
- ssize_t buf_sz = 0, path_len = 0, pathw_len = 0, new_pathw_len = 0;
+ WCHAR* buf;
+ WCHAR* pos;
+ size_t buf_sz = 0;
+ size_t path_len = 0;
+ ssize_t pathw_len = 0;
+ ssize_t new_pathw_len = 0;
/* new_path can only be set if path is also set. */
assert(new_path == NULL || path != NULL);
if (path != NULL) {
- pathw_len = MultiByteToWideChar(CP_UTF8,
- 0,
- path,
- -1,
- NULL,
- 0);
- if (pathw_len == 0) {
- return GetLastError();
- }
-
+ pathw_len = fs__get_length_wtf8(path);
+ if (pathw_len < 0)
+ return ERROR_INVALID_NAME;
buf_sz += pathw_len * sizeof(WCHAR);
}
@@ -174,16 +247,9 @@
}
if (new_path != NULL) {
- new_pathw_len = MultiByteToWideChar(CP_UTF8,
- 0,
- new_path,
- -1,
- NULL,
- 0);
- if (new_pathw_len == 0) {
- return GetLastError();
- }
-
+ new_pathw_len = fs__get_length_wtf8(new_path);
+ if (new_pathw_len < 0)
+ return ERROR_INVALID_NAME;
buf_sz += new_pathw_len * sizeof(WCHAR);
}
@@ -195,7 +261,7 @@
return 0;
}
- buf = (char*) uv__malloc(buf_sz);
+ buf = (WCHAR *)uv__malloc(buf_sz);
if (buf == NULL) {
return ERROR_OUTOFMEMORY;
}
@@ -203,29 +269,17 @@
pos = buf;
if (path != NULL) {
- DWORD r = MultiByteToWideChar(CP_UTF8,
- 0,
- path,
- -1,
- (WCHAR*) pos,
- pathw_len);
- assert(r == (DWORD) pathw_len);
- req->file.pathw = (WCHAR*) pos;
- pos += r * sizeof(WCHAR);
+ fs__wtf8_to_wide(path, pos);
+ req->file.pathw = pos;
+ pos += pathw_len;
} else {
req->file.pathw = NULL;
}
if (new_path != NULL) {
- DWORD r = MultiByteToWideChar(CP_UTF8,
- 0,
- new_path,
- -1,
- (WCHAR*) pos,
- new_pathw_len);
- assert(r == (DWORD) new_pathw_len);
- req->fs.info.new_pathw = (WCHAR*) pos;
- pos += r * sizeof(WCHAR);
+ fs__wtf8_to_wide(new_path, pos);
+ req->fs.info.new_pathw = pos;
+ pos += new_pathw_len;
} else {
req->fs.info.new_pathw = NULL;
}
@@ -233,8 +287,8 @@
req->path = path;
if (path != NULL && copy_path) {
memcpy(pos, path, path_len);
- assert(path_len == buf_sz - (pos - buf));
- req->path = pos;
+ assert(path_len == buf_sz - (pos - buf) * sizeof(WCHAR));
+ req->path = (char*) pos;
}
req->flags |= UV_FS_FREE_PATHS;
@@ -260,57 +314,115 @@
}
-static int fs__wide_to_utf8(WCHAR* w_source_ptr,
- DWORD w_source_len,
- char** target_ptr,
- uint64_t* target_len_ptr) {
- int r;
- int target_len;
+static int32_t fs__get_surrogate_value(const WCHAR* w_source_ptr,
+ size_t w_source_len) {
+ WCHAR u;
+ WCHAR next;
+
+ u = w_source_ptr[0];
+ if (u >= 0xD800 && u <= 0xDBFF && w_source_len > 1) {
+ next = w_source_ptr[1];
+ if (next >= 0xDC00 && next <= 0xDFFF)
+ return 0x10000 + ((u - 0xD800) << 10) + (next - 0xDC00);
+ }
+ return u;
+}
+
+
+static size_t fs__get_length_wide(const WCHAR* w_source_ptr,
+ size_t w_source_len) {
+ size_t target_len;
+ int32_t code_point;
+
+ target_len = 0;
+ for (; w_source_len; w_source_len--, w_source_ptr++) {
+ code_point = fs__get_surrogate_value(w_source_ptr, w_source_len);
+ /* Can be invalid UTF-8 but must be valid WTF-8. */
+ assert(code_point >= 0);
+ if (code_point < 0x80)
+ target_len += 1;
+ else if (code_point < 0x800)
+ target_len += 2;
+ else if (code_point < 0x10000)
+ target_len += 3;
+ else {
+ target_len += 4;
+ w_source_ptr++;
+ w_source_len--;
+ }
+ }
+ return target_len;
+}
+
+
+static int fs__wide_to_wtf8(WCHAR* w_source_ptr,
+ size_t w_source_len,
+ char** target_ptr,
+ size_t* target_len_ptr) {
+ size_t target_len;
char* target;
- target_len = WideCharToMultiByte(CP_UTF8,
- 0,
- w_source_ptr,
- w_source_len,
- NULL,
- 0,
- NULL,
- NULL);
+ int32_t code_point;
- if (target_len == 0) {
- return -1;
+ /* If *target_ptr is provided, then *target_len_ptr must be its length
+ * (excluding space for null), otherwise we will compute the target_len_ptr
+ * length and may return a new allocation in *target_ptr if target_ptr is
+ * provided. */
+ if (target_ptr == NULL || *target_ptr == NULL) {
+ target_len = fs__get_length_wide(w_source_ptr, w_source_len);
+ if (target_len_ptr != NULL)
+ *target_len_ptr = target_len;
+ } else {
+ target_len = *target_len_ptr;
}
- if (target_len_ptr != NULL) {
- *target_len_ptr = target_len;
- }
-
- if (target_ptr == NULL) {
+ if (target_ptr == NULL)
return 0;
+
+ if (*target_ptr == NULL) {
+ target = (char *)uv__malloc(target_len + 1);
+ if (target == NULL) {
+ SetLastError(ERROR_OUTOFMEMORY);
+ return -1;
+ }
+ *target_ptr = target;
+ } else {
+ target = *target_ptr;
}
- target = (char*)uv__malloc(target_len + 1);
- if (target == NULL) {
- SetLastError(ERROR_OUTOFMEMORY);
- return -1;
- }
+ for (; w_source_len; w_source_len--, w_source_ptr++) {
+ code_point = fs__get_surrogate_value(w_source_ptr, w_source_len);
+ /* Can be invalid UTF-8 but must be valid WTF-8. */
+ assert(code_point >= 0);
- r = WideCharToMultiByte(CP_UTF8,
- 0,
- w_source_ptr,
- w_source_len,
- target,
- target_len,
- NULL,
- NULL);
- assert(r == target_len);
- target[target_len] = '\0';
- *target_ptr = target;
+ if (code_point < 0x80) {
+ *target++ = code_point;
+ } else if (code_point < 0x800) {
+ *target++ = 0xC0 | (code_point >> 6);
+ *target++ = 0x80 | (code_point & 0x3F);
+ } else if (code_point < 0x10000) {
+ *target++ = 0xE0 | (code_point >> 12);
+ *target++ = 0x80 | ((code_point >> 6) & 0x3F);
+ *target++ = 0x80 | (code_point & 0x3F);
+ } else {
+ *target++ = 0xF0 | (code_point >> 18);
+ *target++ = 0x80 | ((code_point >> 12) & 0x3F);
+ *target++ = 0x80 | ((code_point >> 6) & 0x3F);
+ *target++ = 0x80 | (code_point & 0x3F);
+ w_source_ptr++;
+ w_source_len--;
+ }
+ }
+ assert((size_t) (target - *target_ptr) == target_len);
+
+ *target++ = '\0';
+
return 0;
}
-INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr,
- uint64_t* target_len_ptr) {
+INLINE static int fs__readlink_handle(HANDLE handle,
+ char** target_ptr,
+ size_t* target_len_ptr) {
char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
REPARSE_DATA_BUFFER* reparse_data = (REPARSE_DATA_BUFFER*) buffer;
WCHAR* w_target;
@@ -440,7 +552,8 @@
return -1;
}
- return fs__wide_to_utf8(w_target, w_target_len, target_ptr, target_len_ptr);
+ assert(target_ptr == NULL || *target_ptr == NULL);
+ return fs__wide_to_wtf8(w_target, w_target_len, target_ptr, target_len_ptr);
}
@@ -1430,7 +1543,8 @@
uv__dirent_t* dirent;
size_t wchar_len;
- size_t utf8_len;
+ size_t wtf8_len;
+ char* wtf8;
/* Obtain a pointer to the current directory entry. */
position += next_entry_offset;
@@ -1457,11 +1571,8 @@
info->FileName[1] == L'.')
continue;
- /* Compute the space required to store the filename as UTF-8. */
- utf8_len = WideCharToMultiByte(
- CP_UTF8, 0, &info->FileName[0], wchar_len, NULL, 0, NULL, NULL);
- if (utf8_len == 0)
- goto win32_error;
+ /* Compute the space required to store the filename as WTF-8. */
+ wtf8_len = fs__get_length_wide(&info->FileName[0], wchar_len);
/* Resize the dirent array if needed. */
if (dirents_used >= dirents_size) {
@@ -1481,26 +1592,17 @@
* includes room for the first character of the filename, but `utf8_len`
* doesn't count the NULL terminator at this point.
*/
- dirent = (uv__dirent_t*)uv__malloc(sizeof *dirent + utf8_len);
+ dirent = (uv__dirent_t*)uv__malloc(sizeof *dirent + wtf8_len);
if (dirent == NULL)
goto out_of_memory_error;
dirents[dirents_used++] = dirent;
/* Convert file name to UTF-8. */
- if (WideCharToMultiByte(CP_UTF8,
- 0,
- &info->FileName[0],
- wchar_len,
- &dirent->d_name[0],
- utf8_len,
- NULL,
- NULL) == 0)
+ wtf8 = &dirent->d_name[0];
+ if (fs__wide_to_wtf8(&info->FileName[0], wchar_len, &wtf8, &wtf8_len) == -1)
goto win32_error;
- /* Add a null terminator to the filename. */
- dirent->d_name[utf8_len] = '\0';
-
/* Fill out the type field. */
if (info->FileAttributes & FILE_ATTRIBUTE_DEVICE)
dirent->d_type = UV__DT_CHAR;
@@ -1709,11 +1811,37 @@
INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
int do_lstat) {
+ size_t target_length = 0;
+ FILE_FS_DEVICE_INFORMATION device_info;
FILE_ALL_INFORMATION file_info;
FILE_FS_VOLUME_INFORMATION volume_info;
NTSTATUS nt_status;
IO_STATUS_BLOCK io_status;
+ nt_status = pNtQueryVolumeInformationFile(handle,
+ &io_status,
+ &device_info,
+ sizeof device_info,
+ FileFsDeviceInformation);
+
+ /* Buffer overflow (a warning status code) is expected here. */
+ if (NT_ERROR(nt_status)) {
+ SetLastError(pRtlNtStatusToDosError(nt_status));
+ return -1;
+ }
+
+ /* If it's NUL device set fields as reasonable as possible and return. */
+ if (device_info.DeviceType == FILE_DEVICE_NULL) {
+ memset(statbuf, 0, sizeof(uv_stat_t));
+ statbuf->st_mode = _S_IFCHR;
+ statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
+ ((_S_IREAD | _S_IWRITE) >> 6);
+ statbuf->st_nlink = 1;
+ statbuf->st_blksize = 4096;
+ statbuf->st_rdev = FILE_DEVICE_NULL << 16;
+ return 0;
+ }
+
nt_status = pNtQueryInformationFile(handle,
&io_status,
&file_info,
@@ -1779,9 +1907,10 @@
* to be treated as a regular file. The higher level lstat function will
* detect this failure and retry without do_lstat if appropriate.
*/
- if (fs__readlink_handle(handle, NULL, &statbuf->st_size) != 0)
+ if (fs__readlink_handle(handle, NULL, &target_length) != 0)
return -1;
statbuf->st_mode |= S_IFLNK;
+ statbuf->st_size = target_length;
}
if (statbuf->st_mode == 0) {
@@ -1918,6 +2047,37 @@
}
+INLINE static int fs__fstat_handle(int fd, HANDLE handle, uv_stat_t* statbuf) {
+ DWORD file_type;
+
+ /* Each file type is processed differently. */
+ file_type = uv_guess_handle(fd);
+ switch (file_type) {
+ /* Disk files use the existing logic from fs__stat_handle. */
+ case UV_FILE:
+ return fs__stat_handle(handle, statbuf, 0);
+
+ /* Devices and pipes are processed identically. There is no more information
+ * for them from any API. Fields are set as reasonably as possible and the
+ * function returns. */
+ case UV_TTY:
+ case UV_NAMED_PIPE:
+ memset(statbuf, 0, sizeof(uv_stat_t));
+ statbuf->st_mode = file_type == UV_TTY ? _S_IFCHR : _S_IFIFO;
+ statbuf->st_nlink = 1;
+ statbuf->st_rdev = (file_type == UV_TTY ? FILE_DEVICE_CONSOLE : FILE_DEVICE_NAMED_PIPE) << 16;
+ statbuf->st_ino = (uintptr_t) handle;
+ return 0;
+
+ /* If file type is unknown it is an error. */
+ case UV_UNKNOWN_HANDLE:
+ default:
+ SetLastError(ERROR_INVALID_HANDLE);
+ return -1;
+ }
+}
+
+
static void fs__stat(uv_fs_t* req) {
fs__stat_prepare_path(req->file.pathw);
fs__stat_impl(req, 0);
@@ -1943,7 +2103,7 @@
return;
}
- if (fs__stat_handle(handle, &req->statbuf, 0) != 0) {
+ if (fs__fstat_handle(fd, handle, &req->statbuf) != 0) {
SET_REQ_WIN32_ERROR(req, GetLastError());
return;
}
@@ -2224,7 +2384,7 @@
SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
goto fchmod_cleanup;
}
- /* Remeber to clear the flag later on */
+ /* Remember to clear the flag later on */
clear_archive_flag = 1;
} else {
clear_archive_flag = 0;
@@ -2606,8 +2766,12 @@
return;
}
+ assert(req->ptr == NULL);
if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) {
- SET_REQ_WIN32_ERROR(req, GetLastError());
+ DWORD error = GetLastError();
+ SET_REQ_WIN32_ERROR(req, error);
+ if (error == ERROR_NOT_A_REPARSE_POINT)
+ req->result = UV_EINVAL;
CloseHandle(handle);
return;
}
@@ -2662,7 +2826,8 @@
return -1;
}
- r = fs__wide_to_utf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL);
+ assert(*realpath_ptr == NULL);
+ r = fs__wide_to_wtf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL);
uv__free(w_realpath_buf);
return r;
}
@@ -2682,6 +2847,7 @@
return;
}
+ assert(req->ptr == NULL);
if (fs__realpath_handle(handle, (char**) &req->ptr) == -1) {
CloseHandle(handle);
SET_REQ_WIN32_ERROR(req, GetLastError());
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/handle-inl.h b/wpinet/src/main/native/thirdparty/libuv/src/win/handle-inl.h
index 5c843c2..4722e85 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/handle-inl.h
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/handle-inl.h
@@ -75,7 +75,7 @@
#define uv__handle_close(handle) \
do { \
- QUEUE_REMOVE(&(handle)->handle_queue); \
+ uv__queue_remove(&(handle)->handle_queue); \
uv__active_handle_rm((uv_handle_t*) (handle)); \
\
(handle)->flags |= UV_HANDLE_CLOSED; \
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/internal.h b/wpinet/src/main/native/thirdparty/libuv/src/win/internal.h
index 89c72b8..9672fbc 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/internal.h
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/internal.h
@@ -168,18 +168,8 @@
uv_req_t* req);
void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req);
-/*
- * uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
- * TODO: find a way to remove it
- */
-void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
- uv_req_t* raw_req);
-/*
- * uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
- * TODO: find a way to remove it
- */
-void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
- uv_connect_t* req);
+#define uv__process_tty_accept_req(loop, handle, req) abort()
+#define uv__process_tty_connect_req(loop, handle, req) abort()
void uv__process_tty_shutdown_req(uv_loop_t* loop,
uv_tty_t* stream,
uv_shutdown_t* req);
@@ -267,7 +257,6 @@
uint64_t uv__hrtime(unsigned int scale);
__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);
-int uv__getpwuid_r(uv_passwd_t* pwd);
int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8);
int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16);
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/pipe.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/pipe.cpp
index f413a72..258d6a6 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/pipe.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/pipe.cpp
@@ -57,7 +57,7 @@
typedef struct {
uv__ipc_socket_xfer_type_t xfer_type;
uv__ipc_socket_xfer_info_t xfer_info;
- QUEUE member;
+ struct uv__queue member;
} uv__ipc_xfer_queue_item_t;
/* IPC frame header flags. */
@@ -113,7 +113,7 @@
handle->name = NULL;
handle->pipe.conn.ipc_remote_pid = 0;
handle->pipe.conn.ipc_data_frame.payload_remaining = 0;
- QUEUE_INIT(&handle->pipe.conn.ipc_xfer_queue);
+ uv__queue_init(&handle->pipe.conn.ipc_xfer_queue);
handle->pipe.conn.ipc_xfer_queue_length = 0;
handle->ipc = ipc;
handle->pipe.conn.non_overlapped_writes_tail = NULL;
@@ -639,13 +639,13 @@
if (handle->flags & UV_HANDLE_CONNECTION) {
/* Free pending sockets */
- while (!QUEUE_EMPTY(&handle->pipe.conn.ipc_xfer_queue)) {
- QUEUE* q;
+ while (!uv__queue_empty(&handle->pipe.conn.ipc_xfer_queue)) {
+ struct uv__queue* q;
SOCKET socket;
- q = QUEUE_HEAD(&handle->pipe.conn.ipc_xfer_queue);
- QUEUE_REMOVE(q);
- xfer_queue_item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
+ q = uv__queue_head(&handle->pipe.conn.ipc_xfer_queue);
+ uv__queue_remove(q);
+ xfer_queue_item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member);
/* Materialize socket and close it */
socket = WSASocketW(FROM_PROTOCOL_INFO,
@@ -696,20 +696,48 @@
/* Creates a pipe server. */
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
+ return uv_pipe_bind2(handle, name, strlen(name), 0);
+}
+
+
+int uv_pipe_bind2(uv_pipe_t* handle,
+ const char* name,
+ size_t namelen,
+ unsigned int flags) {
uv_loop_t* loop = handle->loop;
int i, err, nameSize;
uv_pipe_accept_t* req;
+ if (flags & ~UV_PIPE_NO_TRUNCATE) {
+ return UV_EINVAL;
+ }
+
+ if (name == NULL) {
+ return UV_EINVAL;
+ }
+
+ if (namelen == 0) {
+ return UV_EINVAL;
+ }
+
+ if (*name == '\0') {
+ return UV_EINVAL;
+ }
+
+ if (flags & UV_PIPE_NO_TRUNCATE) {
+ if (namelen > 256) {
+ return UV_EINVAL;
+ }
+ }
+
if (handle->flags & UV_HANDLE_BOUND) {
return UV_EINVAL;
}
- if (!name) {
- return UV_EINVAL;
- }
if (uv__is_closing(handle)) {
return UV_EINVAL;
}
+
if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
handle->pipe.serv.pending_instances = default_pending_pipe_instances;
}
@@ -794,15 +822,17 @@
/* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait
* up to 30 seconds for the pipe to become available with WaitNamedPipe. */
- while (WaitNamedPipeW(handle->name, 30000)) {
+ while (WaitNamedPipeW(req->u.connect.name, 30000)) {
/* The pipe is now available, try to connect. */
- pipeHandle = open_named_pipe(handle->name, &duplex_flags);
+ pipeHandle = open_named_pipe(req->u.connect.name, &duplex_flags);
if (pipeHandle != INVALID_HANDLE_VALUE)
break;
SwitchToThread();
}
+ uv__free(req->u.connect.name);
+ req->u.connect.name = NULL;
if (pipeHandle != INVALID_HANDLE_VALUE) {
SET_REQ_SUCCESS(req);
req->u.connect.pipeHandle = pipeHandle;
@@ -818,18 +848,53 @@
}
-void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
- const char* name, uv_connect_cb cb) {
+void uv_pipe_connect(uv_connect_t* req,
+ uv_pipe_t* handle,
+ const char* name,
+ uv_connect_cb cb) {
+ uv_pipe_connect2(req, handle, name, strlen(name), 0, cb);
+}
+
+
+int uv_pipe_connect2(uv_connect_t* req,
+ uv_pipe_t* handle,
+ const char* name,
+ size_t namelen,
+ unsigned int flags,
+ uv_connect_cb cb) {
uv_loop_t* loop = handle->loop;
int err, nameSize;
HANDLE pipeHandle = INVALID_HANDLE_VALUE;
DWORD duplex_flags;
+ if (flags & ~UV_PIPE_NO_TRUNCATE) {
+ return UV_EINVAL;
+ }
+
+ if (name == NULL) {
+ return UV_EINVAL;
+ }
+
+ if (namelen == 0) {
+ return UV_EINVAL;
+ }
+
+ if (*name == '\0') {
+ return UV_EINVAL;
+ }
+
+ if (flags & UV_PIPE_NO_TRUNCATE) {
+ if (namelen > 256) {
+ return UV_EINVAL;
+ }
+ }
+
UV_REQ_INIT(req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
req->u.connect.pipeHandle = INVALID_HANDLE_VALUE;
req->u.connect.duplex_flags = 0;
+ req->u.connect.name = NULL;
if (handle->flags & UV_HANDLE_PIPESERVER) {
err = ERROR_INVALID_PARAMETER;
@@ -861,10 +926,19 @@
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
if (pipeHandle == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_PIPE_BUSY) {
+ req->u.connect.name = (WCHAR *)uv__malloc(nameSize);
+ if (!req->u.connect.name) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ memcpy(req->u.connect.name, handle->name, nameSize);
+
/* Wait for the server to make a pipe instance available. */
if (!QueueUserWorkItem(&pipe_connect_thread_proc,
req,
WT_EXECUTELONGFUNCTION)) {
+ uv__free(req->u.connect.name);
+ req->u.connect.name = NULL;
err = GetLastError();
goto error;
}
@@ -872,7 +946,7 @@
REGISTER_HANDLE_REQ(loop, handle, req);
handle->reqs_pending++;
- return;
+ return 0;
}
err = GetLastError();
@@ -885,7 +959,7 @@
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
- return;
+ return 0;
error:
if (handle->name) {
@@ -901,7 +975,7 @@
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
- return;
+ return 0;
}
@@ -1052,28 +1126,29 @@
uv_loop_t* loop = server->loop;
uv_pipe_t* pipe_client;
uv_pipe_accept_t* req;
- QUEUE* q;
+ struct uv__queue* q;
uv__ipc_xfer_queue_item_t* item;
int err;
if (server->ipc) {
- if (QUEUE_EMPTY(&server->pipe.conn.ipc_xfer_queue)) {
+ if (uv__queue_empty(&server->pipe.conn.ipc_xfer_queue)) {
/* No valid pending sockets. */
return WSAEWOULDBLOCK;
}
- q = QUEUE_HEAD(&server->pipe.conn.ipc_xfer_queue);
- QUEUE_REMOVE(q);
+ q = uv__queue_head(&server->pipe.conn.ipc_xfer_queue);
+ uv__queue_remove(q);
server->pipe.conn.ipc_xfer_queue_length--;
- item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
+ item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member);
err = uv__tcp_xfer_import(
(uv_tcp_t*) client, item->xfer_type, &item->xfer_info);
+
+ uv__free(item);
+
if (err != 0)
return err;
- uv__free(item);
-
} else {
pipe_client = (uv_pipe_t*) client;
uv__pipe_connection_init(pipe_client);
@@ -1640,9 +1715,13 @@
/* If the both ends of the IPC pipe are owned by the same process,
* the remote end pid may not yet be set. If so, do it here.
* TODO: this is weird; it'd probably better to use a handshake. */
- if (*pid == 0)
- *pid = GetCurrentProcessId();
-
+ if (*pid == 0) {
+ GetNamedPipeClientProcessId(handle->handle, pid);
+ if (*pid == GetCurrentProcessId()) {
+ GetNamedPipeServerProcessId(handle->handle, pid);
+ }
+ }
+
return *pid;
}
@@ -1814,7 +1893,7 @@
item->xfer_type = xfer_type;
item->xfer_info = *xfer_info;
- QUEUE_INSERT_TAIL(&handle->pipe.conn.ipc_xfer_queue, &item->member);
+ uv__queue_insert_tail(&handle->pipe.conn.ipc_xfer_queue, &item->member);
handle->pipe.conn.ipc_xfer_queue_length++;
}
@@ -2071,9 +2150,9 @@
uv__queue_non_overlapped_write(handle);
}
- if (handle->stream.conn.write_reqs_pending == 0)
- if (handle->flags & UV_HANDLE_SHUTTING)
- uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
+ if (handle->stream.conn.write_reqs_pending == 0 &&
+ uv__is_stream_shutting(handle))
+ uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
DECREASE_PENDING_REQ_COUNT(handle);
}
@@ -2128,7 +2207,10 @@
if (REQ_SUCCESS(req)) {
pipeHandle = req->u.connect.pipeHandle;
duplex_flags = req->u.connect.duplex_flags;
- err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
+ if (handle->flags & UV_HANDLE_CLOSING)
+ err = UV_ECANCELED;
+ else
+ err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
if (err)
CloseHandle(pipeHandle);
} else {
@@ -2151,7 +2233,6 @@
/* Clear the shutdown_req field so we don't go here again. */
handle->stream.conn.shutdown_req = NULL;
- handle->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, handle, req);
if (handle->flags & UV_HANDLE_CLOSING) {
@@ -2344,7 +2425,10 @@
if (pipe->ipc) {
assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
- pipe->pipe.conn.ipc_remote_pid = uv_os_getppid();
+ GetNamedPipeClientProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid);
+ if (pipe->pipe.conn.ipc_remote_pid == GetCurrentProcessId()) {
+ GetNamedPipeServerProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid);
+ }
assert(pipe->pipe.conn.ipc_remote_pid != (DWORD)(uv_pid_t) -1);
}
return 0;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/poll.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/poll.cpp
index bd531b0..7fec2b9 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/poll.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/poll.cpp
@@ -425,9 +425,8 @@
return uv_translate_sys_error(WSAGetLastError());
/* Try to obtain a base handle for the socket. This increases this chances that
- * we find an AFD handle and are able to use the fast poll mechanism. This will
- * always fail on windows XP/2k3, since they don't support the. SIO_BASE_HANDLE
- * ioctl. */
+ * we find an AFD handle and are able to use the fast poll mechanism.
+ */
#ifndef NDEBUG
base_socket = INVALID_SOCKET;
#endif
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/process.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/process.cpp
index 8e7835a..18816d3 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/process.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/process.cpp
@@ -145,7 +145,6 @@
handle->exit_signal = 0;
handle->wait_handle = INVALID_HANDLE_VALUE;
handle->process_handle = INVALID_HANDLE_VALUE;
- handle->child_stdio_buffer = NULL;
handle->exit_cb_pending = 0;
UV_REQ_INIT(&handle->exit_req, UV_PROCESS_EXIT);
@@ -948,9 +947,11 @@
STARTUPINFOW startup;
PROCESS_INFORMATION info;
DWORD process_flags;
+ BYTE* child_stdio_buffer;
uv__process_init(loop, process);
process->exit_cb = options->exit_cb;
+ child_stdio_buffer = NULL;
if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) {
return UV_ENOTSUP;
@@ -1041,7 +1042,7 @@
}
}
- err = uv__stdio_create(loop, options, &process->child_stdio_buffer);
+ err = uv__stdio_create(loop, options, &child_stdio_buffer);
if (err)
goto done;
@@ -1060,12 +1061,12 @@
startup.lpTitle = NULL;
startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
- startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer);
- startup.lpReserved2 = (BYTE*) process->child_stdio_buffer;
+ startup.cbReserved2 = uv__stdio_size(child_stdio_buffer);
+ startup.lpReserved2 = (BYTE*) child_stdio_buffer;
- startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0);
- startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1);
- startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2);
+ startup.hStdInput = uv__stdio_handle(child_stdio_buffer, 0);
+ startup.hStdOutput = uv__stdio_handle(child_stdio_buffer, 1);
+ startup.hStdError = uv__stdio_handle(child_stdio_buffer, 2);
process_flags = CREATE_UNICODE_ENVIRONMENT;
@@ -1179,10 +1180,10 @@
uv__free(env);
uv__free(alloc_path);
- if (process->child_stdio_buffer != NULL) {
+ if (child_stdio_buffer != NULL) {
/* Clean up child stdio handles. */
- uv__stdio_destroy(process->child_stdio_buffer);
- process->child_stdio_buffer = NULL;
+ uv__stdio_destroy(child_stdio_buffer);
+ child_stdio_buffer = NULL;
}
return uv_translate_sys_error(err);
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/stream.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/stream.cpp
index 292bf58..7bf9ca3 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/stream.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/stream.cpp
@@ -204,7 +204,7 @@
uv_loop_t* loop = handle->loop;
if (!(handle->flags & UV_HANDLE_WRITABLE) ||
- handle->flags & UV_HANDLE_SHUTTING ||
+ uv__is_stream_shutting(handle) ||
uv__is_closing(handle)) {
return UV_ENOTCONN;
}
@@ -214,7 +214,6 @@
req->cb = cb;
handle->flags &= ~UV_HANDLE_WRITABLE;
- handle->flags |= UV_HANDLE_SHUTTING;
handle->stream.conn.shutdown_req = req;
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/tcp.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/tcp.cpp
index 4cccee4..d8da4d9 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/tcp.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/tcp.cpp
@@ -30,14 +30,6 @@
/*
- * Threshold of active tcp streams for which to preallocate tcp read buffers.
- * (Due to node slab allocator performing poorly under this pattern,
- * the optimization is temporarily disabled (threshold=0). This will be
- * revisited once node allocator is improved.)
- */
-const unsigned int uv_active_tcp_streams_threshold = 0;
-
-/*
* Number of simultaneous pending AcceptEx calls.
*/
const unsigned int uv_simultaneous_server_accepts = 32;
@@ -183,14 +175,14 @@
sock = socket(domain, SOCK_STREAM, 0);
if (sock == INVALID_SOCKET) {
err = WSAGetLastError();
- QUEUE_REMOVE(&handle->handle_queue);
+ uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}
err = uv__tcp_set_socket(handle->loop, handle, sock, domain, 0);
if (err) {
closesocket(sock);
- QUEUE_REMOVE(&handle->handle_queue);
+ uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}
@@ -214,7 +206,6 @@
assert(stream->flags & UV_HANDLE_CONNECTION);
stream->stream.conn.shutdown_req = NULL;
- stream->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, stream, req);
err = 0;
@@ -274,7 +265,6 @@
}
uv__handle_close(handle);
- loop->active_tcp_streams--;
}
@@ -484,26 +474,9 @@
req = &handle->read_req;
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
- /*
- * Preallocate a read buffer if the number of active streams is below
- * the threshold.
- */
- if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) {
- handle->flags &= ~UV_HANDLE_ZERO_READ;
- handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0);
- handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer);
- if (handle->tcp.conn.read_buffer.base == NULL ||
- handle->tcp.conn.read_buffer.len == 0) {
- handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer);
- return;
- }
- assert(handle->tcp.conn.read_buffer.base != NULL);
- buf = handle->tcp.conn.read_buffer;
- } else {
- handle->flags |= UV_HANDLE_ZERO_READ;
- buf.base = (char*) &uv_zero_;
- buf.len = 0;
- }
+ handle->flags |= UV_HANDLE_ZERO_READ;
+ buf.base = (char*) &uv_zero_;
+ buf.len = 0;
/* Prepare the overlapped structure. */
memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
@@ -550,7 +523,7 @@
struct linger l = { 1, 0 };
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
- if (handle->flags & UV_HANDLE_SHUTTING)
+ if (uv__is_stream_shutting(handle))
return UV_EINVAL;
if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l)))
@@ -654,7 +627,6 @@
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
- uv_loop_t* loop = server->loop;
int err = 0;
int family;
@@ -716,8 +688,6 @@
}
}
- loop->active_tcp_streams++;
-
return err;
}
@@ -1163,7 +1133,7 @@
closesocket(handle->socket);
handle->socket = INVALID_SOCKET;
}
- if (handle->flags & UV_HANDLE_SHUTTING)
+ if (uv__is_stream_shutting(handle))
uv__process_tcp_shutdown_req(loop,
handle,
handle->stream.conn.shutdown_req);
@@ -1248,7 +1218,6 @@
0) == 0) {
uv__connection_init((uv_stream_t*)handle);
handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
- loop->active_tcp_streams++;
} else {
err = WSAGetLastError();
}
@@ -1331,7 +1300,6 @@
tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
- tcp->loop->active_tcp_streams++;
return 0;
}
@@ -1432,7 +1400,7 @@
uv_tcp_non_ifs_lsp_ipv4;
/* If there are non-ifs LSPs then try to obtain a base handle for the socket.
- * This will always fail on Windows XP/3k. */
+ */
if (non_ifs_lsp) {
DWORD bytes;
if (WSAIoctl(socket,
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/thread.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/thread.cpp
index 9ad60c9..03b33e9 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/thread.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/thread.cpp
@@ -180,6 +180,81 @@
return UV_EIO;
}
+int uv_thread_setaffinity(uv_thread_t* tid,
+ char* cpumask,
+ char* oldmask,
+ size_t mask_size) {
+ int i;
+ HANDLE hproc;
+ DWORD_PTR procmask;
+ DWORD_PTR sysmask;
+ DWORD_PTR threadmask;
+ DWORD_PTR oldthreadmask;
+ int cpumasksize;
+
+ cpumasksize = uv_cpumask_size();
+ assert(cpumasksize > 0);
+ if (mask_size < (size_t)cpumasksize)
+ return UV_EINVAL;
+
+ hproc = GetCurrentProcess();
+ if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
+ return uv_translate_sys_error(GetLastError());
+
+ threadmask = 0;
+ for (i = 0; i < cpumasksize; i++) {
+ if (cpumask[i]) {
+ if (procmask & (1LL << i))
+ threadmask |= 1LL << i;
+ else
+ return UV_EINVAL;
+ }
+ }
+
+ oldthreadmask = SetThreadAffinityMask(*tid, threadmask);
+ if (oldthreadmask == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ if (oldmask != NULL) {
+ for (i = 0; i < cpumasksize; i++)
+ oldmask[i] = (oldthreadmask >> i) & 1;
+ }
+
+ return 0;
+}
+
+int uv_thread_getaffinity(uv_thread_t* tid,
+ char* cpumask,
+ size_t mask_size) {
+ int i;
+ HANDLE hproc;
+ DWORD_PTR procmask;
+ DWORD_PTR sysmask;
+ DWORD_PTR threadmask;
+ int cpumasksize;
+
+ cpumasksize = uv_cpumask_size();
+ assert(cpumasksize > 0);
+ if (mask_size < (size_t)cpumasksize)
+ return UV_EINVAL;
+
+ hproc = GetCurrentProcess();
+ if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
+ return uv_translate_sys_error(GetLastError());
+
+ threadmask = SetThreadAffinityMask(*tid, procmask);
+ if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ for (i = 0; i < cpumasksize; i++)
+ cpumask[i] = (threadmask >> i) & 1;
+
+ return 0;
+}
+
+int uv_thread_getcpu(void) {
+ return GetCurrentProcessorNumber();
+}
uv_thread_t uv_thread_self(void) {
uv_thread_t key;
@@ -374,6 +449,7 @@
abort();
}
+
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
return 0;
@@ -383,69 +459,6 @@
}
-int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
- int err;
-
- barrier->n = count;
- barrier->count = 0;
-
- err = uv_mutex_init(&barrier->mutex);
- if (err)
- return err;
-
- err = uv_sem_init(&barrier->turnstile1, 0);
- if (err)
- goto error2;
-
- err = uv_sem_init(&barrier->turnstile2, 1);
- if (err)
- goto error;
-
- return 0;
-
-error:
- uv_sem_destroy(&barrier->turnstile1);
-error2:
- uv_mutex_destroy(&barrier->mutex);
- return err;
-
-}
-
-
-void uv_barrier_destroy(uv_barrier_t* barrier) {
- uv_sem_destroy(&barrier->turnstile2);
- uv_sem_destroy(&barrier->turnstile1);
- uv_mutex_destroy(&barrier->mutex);
-}
-
-
-int uv_barrier_wait(uv_barrier_t* barrier) {
- int serial_thread;
-
- uv_mutex_lock(&barrier->mutex);
- if (++barrier->count == barrier->n) {
- uv_sem_wait(&barrier->turnstile2);
- uv_sem_post(&barrier->turnstile1);
- }
- uv_mutex_unlock(&barrier->mutex);
-
- uv_sem_wait(&barrier->turnstile1);
- uv_sem_post(&barrier->turnstile1);
-
- uv_mutex_lock(&barrier->mutex);
- serial_thread = (--barrier->count == 0);
- if (serial_thread) {
- uv_sem_wait(&barrier->turnstile1);
- uv_sem_post(&barrier->turnstile2);
- }
- uv_mutex_unlock(&barrier->mutex);
-
- uv_sem_wait(&barrier->turnstile2);
- uv_sem_post(&barrier->turnstile2);
- return serial_thread;
-}
-
-
int uv_key_create(uv_key_t* key) {
key->tls_index = TlsAlloc();
if (key->tls_index == TLS_OUT_OF_INDEXES)
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/tty.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/tty.cpp
index 9753784..9bb3d9e 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/tty.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/tty.cpp
@@ -25,12 +25,7 @@
#include <io.h>
#include <string.h>
#include <stdlib.h>
-
-#if defined(_MSC_VER) && _MSC_VER < 1600
-# include "uv/stdint-msvc2008.h"
-#else
-# include <stdint.h>
-#endif
+#include <stdint.h>
#ifndef COMMON_LVB_REVERSE_VIDEO
# define COMMON_LVB_REVERSE_VIDEO 0x4000
@@ -179,14 +174,14 @@
0);
if (uv__tty_console_handle != INVALID_HANDLE_VALUE) {
CONSOLE_SCREEN_BUFFER_INFO sb_info;
- QueueUserWorkItem(uv__tty_console_resize_message_loop_thread,
- NULL,
- WT_EXECUTELONGFUNCTION);
uv_mutex_init(&uv__tty_console_resize_mutex);
if (GetConsoleScreenBufferInfo(uv__tty_console_handle, &sb_info)) {
uv__tty_console_width = sb_info.dwSize.X;
uv__tty_console_height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
}
+ QueueUserWorkItem(uv__tty_console_resize_message_loop_thread,
+ NULL,
+ WT_EXECUTELONGFUNCTION);
}
}
@@ -2243,11 +2238,11 @@
handle->stream.conn.write_reqs_pending--;
- if (handle->stream.conn.write_reqs_pending == 0)
- if (handle->flags & UV_HANDLE_SHUTTING)
- uv__process_tty_shutdown_req(loop,
- handle,
- handle->stream.conn.shutdown_req);
+ if (handle->stream.conn.write_reqs_pending == 0 &&
+ uv__is_stream_shutting(handle))
+ uv__process_tty_shutdown_req(loop,
+ handle,
+ handle->stream.conn.shutdown_req);
DECREASE_PENDING_REQ_COUNT(handle);
}
@@ -2278,7 +2273,6 @@
assert(req);
stream->stream.conn.shutdown_req = NULL;
- stream->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, stream, req);
/* TTY shutdown is really just a no-op */
@@ -2308,26 +2302,6 @@
}
-/*
- * uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
- * TODO: find a way to remove it
- */
-void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
- uv_req_t* raw_req) {
- abort();
-}
-
-
-/*
- * uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
- * TODO: find a way to remove it
- */
-void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
- uv_connect_t* req) {
- abort();
-}
-
-
int uv_tty_reset_mode(void) {
/* Not necessary to do anything. */
return 0;
@@ -2433,7 +2407,6 @@
height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
uv_mutex_lock(&uv__tty_console_resize_mutex);
- assert(uv__tty_console_width != -1 && uv__tty_console_height != -1);
if (width != uv__tty_console_width || height != uv__tty_console_height) {
uv__tty_console_width = width;
uv__tty_console_height = height;
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/udp.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/udp.cpp
index eaebc1e..eab5384 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/udp.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/udp.cpp
@@ -29,11 +29,6 @@
#include "req-inl.h"
-/*
- * Threshold of active udp streams for which to preallocate udp read buffers.
- */
-const unsigned int uv_active_udp_streams_threshold = 0;
-
/* A zero-size buffer for use by uv_udp_read */
static char uv_zero_[] = "";
int uv_udp_getpeername(const uv_udp_t* handle,
@@ -151,14 +146,14 @@
sock = socket(domain, SOCK_DGRAM, 0);
if (sock == INVALID_SOCKET) {
err = WSAGetLastError();
- QUEUE_REMOVE(&handle->handle_queue);
+ uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}
err = uv__udp_set_socket(handle->loop, handle, sock, domain);
if (err) {
closesocket(sock);
- QUEUE_REMOVE(&handle->handle_queue);
+ uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}
}
@@ -276,84 +271,35 @@
req = &handle->recv_req;
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
- /*
- * Preallocate a read buffer if the number of active streams is below
- * the threshold.
- */
- if (loop->active_udp_streams < uv_active_udp_streams_threshold) {
- handle->flags &= ~UV_HANDLE_ZERO_READ;
+ handle->flags |= UV_HANDLE_ZERO_READ;
- handle->recv_buffer = uv_buf_init(NULL, 0);
- handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &handle->recv_buffer);
- if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) {
- handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
- return;
- }
- assert(handle->recv_buffer.base != NULL);
+ buf.base = (char*) uv_zero_;
+ buf.len = 0;
+ flags = MSG_PEEK;
- buf = handle->recv_buffer;
- memset(&handle->recv_from, 0, sizeof handle->recv_from);
- handle->recv_from_len = sizeof handle->recv_from;
- flags = 0;
+ result = handle->func_wsarecv(handle->socket,
+ (WSABUF*) &buf,
+ 1,
+ &bytes,
+ &flags,
+ &req->u.io.overlapped,
+ NULL);
- result = handle->func_wsarecvfrom(handle->socket,
- (WSABUF*) &buf,
- 1,
- &bytes,
- &flags,
- (struct sockaddr*) &handle->recv_from,
- &handle->recv_from_len,
- &req->u.io.overlapped,
- NULL);
-
- if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
- /* Process the req without IOCP. */
- handle->flags |= UV_HANDLE_READ_PENDING;
- req->u.io.overlapped.InternalHigh = bytes;
- handle->reqs_pending++;
- uv__insert_pending_req(loop, req);
- } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
- /* The req will be processed with IOCP. */
- handle->flags |= UV_HANDLE_READ_PENDING;
- handle->reqs_pending++;
- } else {
- /* Make this req pending reporting an error. */
- SET_REQ_ERROR(req, WSAGetLastError());
- uv__insert_pending_req(loop, req);
- handle->reqs_pending++;
- }
-
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
+ /* Process the req without IOCP. */
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ req->u.io.overlapped.InternalHigh = bytes;
+ handle->reqs_pending++;
+ uv__insert_pending_req(loop, req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
+ /* The req will be processed with IOCP. */
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
} else {
- handle->flags |= UV_HANDLE_ZERO_READ;
-
- buf.base = (char*) uv_zero_;
- buf.len = 0;
- flags = MSG_PEEK;
-
- result = handle->func_wsarecv(handle->socket,
- (WSABUF*) &buf,
- 1,
- &bytes,
- &flags,
- &req->u.io.overlapped,
- NULL);
-
- if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
- /* Process the req without IOCP. */
- handle->flags |= UV_HANDLE_READ_PENDING;
- req->u.io.overlapped.InternalHigh = bytes;
- handle->reqs_pending++;
- uv__insert_pending_req(loop, req);
- } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
- /* The req will be processed with IOCP. */
- handle->flags |= UV_HANDLE_READ_PENDING;
- handle->reqs_pending++;
- } else {
- /* Make this req pending reporting an error. */
- SET_REQ_ERROR(req, WSAGetLastError());
- uv__insert_pending_req(loop, req);
- handle->reqs_pending++;
- }
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv__insert_pending_req(loop, req);
+ handle->reqs_pending++;
}
}
@@ -376,7 +322,6 @@
handle->flags |= UV_HANDLE_READING;
INCREASE_ACTIVE_COUNT(loop, handle);
- loop->active_udp_streams++;
handle->recv_cb = recv_cb;
handle->alloc_cb = alloc_cb;
@@ -393,7 +338,6 @@
int uv__udp_recv_stop(uv_udp_t* handle) {
if (handle->flags & UV_HANDLE_READING) {
handle->flags &= ~UV_HANDLE_READING;
- handle->loop->active_udp_streams--;
DECREASE_ACTIVE_COUNT(loop, handle);
}
@@ -497,57 +441,68 @@
DWORD bytes, err, flags;
struct sockaddr_storage from;
int from_len;
+ int count;
- /* Do a nonblocking receive.
- * TODO: try to read multiple datagrams at once. FIONREAD maybe? */
- buf = uv_buf_init(NULL, 0);
- handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
- if (buf.base == NULL || buf.len == 0) {
- handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
- goto done;
- }
- assert(buf.base != NULL);
+ /* Prevent loop starvation when the data comes in as fast as
+ * (or faster than) we can read it. */
+ count = 32;
- memset(&from, 0, sizeof from);
- from_len = sizeof from;
+ do {
+ /* Do at most `count` nonblocking receive. */
+ buf = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
+ goto done;
+ }
- flags = 0;
+ memset(&from, 0, sizeof from);
+ from_len = sizeof from;
- if (WSARecvFrom(handle->socket,
- (WSABUF*)&buf,
- 1,
- &bytes,
- &flags,
- (struct sockaddr*) &from,
- &from_len,
- NULL,
- NULL) != SOCKET_ERROR) {
+ flags = 0;
- /* Message received */
- handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
- } else {
- err = WSAGetLastError();
- if (err == WSAEMSGSIZE) {
- /* Message truncated */
- handle->recv_cb(handle,
- bytes,
- &buf,
- (const struct sockaddr*) &from,
- UV_UDP_PARTIAL);
- } else if (err == WSAEWOULDBLOCK) {
- /* Kernel buffer empty */
- handle->recv_cb(handle, 0, &buf, NULL, 0);
- } else if (err == WSAECONNRESET || err == WSAENETRESET) {
- /* WSAECONNRESET/WSANETRESET is ignored because this just indicates
- * that a previous sendto operation failed.
- */
- handle->recv_cb(handle, 0, &buf, NULL, 0);
+ if (WSARecvFrom(handle->socket,
+ (WSABUF*)&buf,
+ 1,
+ &bytes,
+ &flags,
+ (struct sockaddr*) &from,
+ &from_len,
+ NULL,
+ NULL) != SOCKET_ERROR) {
+
+ /* Message received */
+ err = ERROR_SUCCESS;
+ handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
} else {
- /* Any other error that we want to report back to the user. */
- uv_udp_recv_stop(handle);
- handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
+ err = WSAGetLastError();
+ if (err == WSAEMSGSIZE) {
+ /* Message truncated */
+ handle->recv_cb(handle,
+ bytes,
+ &buf,
+ (const struct sockaddr*) &from,
+ UV_UDP_PARTIAL);
+ } else if (err == WSAEWOULDBLOCK) {
+ /* Kernel buffer empty */
+ handle->recv_cb(handle, 0, &buf, NULL, 0);
+ } else if (err == WSAECONNRESET || err == WSAENETRESET) {
+ /* WSAECONNRESET/WSANETRESET is ignored because this just indicates
+ * that a previous sendto operation failed.
+ */
+ handle->recv_cb(handle, 0, &buf, NULL, 0);
+ } else {
+ /* Any other error that we want to report back to the user. */
+ uv_udp_recv_stop(handle);
+ handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
+ }
}
}
+ while (err == ERROR_SUCCESS &&
+ count-- > 0 &&
+ /* The recv_cb callback may decide to pause or close the handle. */
+ (handle->flags & UV_HANDLE_READING) &&
+ !(handle->flags & UV_HANDLE_READ_PENDING));
}
done:
diff --git a/wpinet/src/main/native/thirdparty/libuv/src/win/util.cpp b/wpinet/src/main/native/thirdparty/libuv/src/win/util.cpp
index d9888ae..4b76417 100644
--- a/wpinet/src/main/native/thirdparty/libuv/src/win/util.cpp
+++ b/wpinet/src/main/native/thirdparty/libuv/src/win/util.cpp
@@ -31,6 +31,7 @@
#include "internal.h"
/* clang-format off */
+#include <sysinfoapi.h>
#include <winsock2.h>
#include <winperf.h>
#include <iphlpapi.h>
@@ -72,7 +73,9 @@
static CRITICAL_SECTION process_title_lock;
#pragma comment(lib, "Advapi32.lib")
+#pragma comment(lib, "Dbghelp.lib")
#pragma comment(lib, "IPHLPAPI.lib")
+#pragma comment(lib, "Ole32.lib")
#pragma comment(lib, "Psapi.lib")
#pragma comment(lib, "Userenv.lib")
#pragma comment(lib, "kernel32.lib")
@@ -129,9 +132,6 @@
goto error;
}
- /* utf16_len contains the length, *not* including the terminating null. */
- utf16_buffer[utf16_len] = L'\0';
-
/* Convert to UTF-8 */
utf8_len = WideCharToMultiByte(CP_UTF8,
0,
@@ -159,6 +159,51 @@
}
+static int uv__cwd(WCHAR** buf, DWORD *len) {
+ WCHAR* p;
+ DWORD n;
+ DWORD t;
+
+ t = GetCurrentDirectoryW(0, NULL);
+ for (;;) {
+ if (t == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ /* |t| is the size of the buffer _including_ nul. */
+ p = (WCHAR *)uv__malloc(t * sizeof(*p));
+ if (p == NULL)
+ return UV_ENOMEM;
+
+ /* |n| is the size of the buffer _excluding_ nul but _only on success_.
+ * If |t| was too small because another thread changed the working
+ * directory, |n| is the size the buffer should be _including_ nul.
+ * It therefore follows we must resize when n >= t and fail when n == 0.
+ */
+ n = GetCurrentDirectoryW(t, p);
+ if (n > 0)
+ if (n < t)
+ break;
+
+ uv__free(p);
+ t = n;
+ }
+
+ /* The returned directory should not have a trailing slash, unless it points
+ * at a drive root, like c:\. Remove it if needed.
+ */
+ t = n - 1;
+ if (p[t] == L'\\' && !(n == 3 && p[1] == L':')) {
+ p[t] = L'\0';
+ n = t;
+ }
+
+ *buf = p;
+ *len = n;
+
+ return 0;
+}
+
+
int uv_cwd(char* buffer, size_t* size) {
DWORD utf16_len;
WCHAR *utf16_buffer;
@@ -168,30 +213,9 @@
return UV_EINVAL;
}
- utf16_len = GetCurrentDirectoryW(0, NULL);
- if (utf16_len == 0) {
- return uv_translate_sys_error(GetLastError());
- }
- utf16_buffer = (WCHAR*)uv__malloc(utf16_len * sizeof(WCHAR));
- if (utf16_buffer == NULL) {
- return UV_ENOMEM;
- }
-
- utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer);
- if (utf16_len == 0) {
- uv__free(utf16_buffer);
- return uv_translate_sys_error(GetLastError());
- }
-
- /* utf16_len contains the length, *not* including the terminating null. */
- utf16_buffer[utf16_len] = L'\0';
-
- /* The returned directory should not have a trailing slash, unless it points
- * at a drive root, like c:\. Remove it if needed. */
- if (utf16_buffer[utf16_len - 1] == L'\\' &&
- !(utf16_len == 3 && utf16_buffer[1] == L':')) {
- utf16_len--;
- utf16_buffer[utf16_len] = L'\0';
+ r = uv__cwd(&utf16_buffer, &utf16_len);
+ if (r < 0) {
+ return r;
}
/* Check how much space we need */
@@ -234,8 +258,9 @@
int uv_chdir(const char* dir) {
WCHAR *utf16_buffer;
- size_t utf16_len, new_utf16_len;
+ DWORD utf16_len;
WCHAR drive_letter, env_var[4];
+ int r;
if (dir == NULL) {
return UV_EINVAL;
@@ -270,32 +295,22 @@
return uv_translate_sys_error(GetLastError());
}
+ /* uv__cwd() will return a new buffer. */
+ uv__free(utf16_buffer);
+ utf16_buffer = NULL;
+
/* Windows stores the drive-local path in an "hidden" environment variable,
* which has the form "=C:=C:\Windows". SetCurrentDirectory does not update
* this, so we'll have to do it. */
- new_utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer);
- if (new_utf16_len > utf16_len ) {
- uv__free(utf16_buffer);
- utf16_buffer = (WCHAR*)uv__malloc(new_utf16_len * sizeof(WCHAR));
- if (utf16_buffer == NULL) {
- /* When updating the environment variable fails, return UV_OK anyway.
- * We did successfully change current working directory, only updating
- * hidden env variable failed. */
- return 0;
- }
- new_utf16_len = GetCurrentDirectoryW(new_utf16_len, utf16_buffer);
- }
- if (utf16_len == 0) {
- uv__free(utf16_buffer);
+ r = uv__cwd(&utf16_buffer, &utf16_len);
+ if (r == UV_ENOMEM) {
+ /* When updating the environment variable fails, return UV_OK anyway.
+ * We did successfully change current working directory, only updating
+ * hidden env variable failed. */
return 0;
}
-
- /* The returned directory should not have a trailing slash, unless it points
- * at a drive root, like c:\. Remove it if needed. */
- if (utf16_buffer[utf16_len - 1] == L'\\' &&
- !(utf16_len == 3 && utf16_buffer[1] == L':')) {
- utf16_len--;
- utf16_buffer[utf16_len] = L'\0';
+ if (r < 0) {
+ return r;
}
if (utf16_len < 2 || utf16_buffer[1] != L':') {
@@ -338,7 +353,7 @@
memory_status.dwLength = sizeof(memory_status);
if (!GlobalMemoryStatusEx(&memory_status)) {
- return -1;
+ return 0;
}
return (uint64_t)memory_status.ullAvailPhys;
@@ -350,7 +365,7 @@
memory_status.dwLength = sizeof(memory_status);
if (!GlobalMemoryStatusEx(&memory_status)) {
- return -1;
+ return 0;
}
return (uint64_t)memory_status.ullTotalPhys;
@@ -362,6 +377,11 @@
}
+uint64_t uv_get_available_memory(void) {
+ return uv_get_free_memory();
+}
+
+
uv_pid_t uv_os_getpid(void) {
return GetCurrentProcessId();
}
@@ -495,11 +515,43 @@
}
+/* https://github.com/libuv/libuv/issues/1674 */
+int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
+ FILETIME ft;
+ int64_t t;
+
+ if (ts == NULL)
+ return UV_EFAULT;
+
+ switch (clock_id) {
+ case UV_CLOCK_MONOTONIC:
+ uv__once_init();
+ t = uv__hrtime(UV__NANOSEC);
+ ts->tv_sec = t / 1000000000;
+ ts->tv_nsec = t % 1000000000;
+ return 0;
+ case UV_CLOCK_REALTIME:
+ GetSystemTimePreciseAsFileTime(&ft);
+ /* In 100-nanosecond increments from 1601-01-01 UTC because why not? */
+ t = (int64_t) ft.dwHighDateTime << 32 | ft.dwLowDateTime;
+ /* Convert to UNIX epoch, 1970-01-01. Still in 100 ns increments. */
+ t -= 116444736000000000ll;
+ /* Now convert to seconds and nanoseconds. */
+ ts->tv_sec = t / 10000000;
+ ts->tv_nsec = t % 10000000 * 100;
+ return 0;
+ }
+
+ return UV_EINVAL;
+}
+
+
uint64_t uv_hrtime(void) {
uv__once_init();
return uv__hrtime(UV__NANOSEC);
}
+
uint64_t uv__hrtime(unsigned int scale) {
LARGE_INTEGER counter;
double scaled_freq;
@@ -686,71 +738,6 @@
}
-static int is_windows_version_or_greater(DWORD os_major,
- DWORD os_minor,
- WORD service_pack_major,
- WORD service_pack_minor) {
- OSVERSIONINFOEX osvi;
- DWORDLONG condition_mask = 0;
- int op = VER_GREATER_EQUAL;
-
- /* Initialize the OSVERSIONINFOEX structure. */
- ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
- osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
- osvi.dwMajorVersion = os_major;
- osvi.dwMinorVersion = os_minor;
- osvi.wServicePackMajor = service_pack_major;
- osvi.wServicePackMinor = service_pack_minor;
-
- /* Initialize the condition mask. */
- VER_SET_CONDITION(condition_mask, VER_MAJORVERSION, op);
- VER_SET_CONDITION(condition_mask, VER_MINORVERSION, op);
- VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMAJOR, op);
- VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMINOR, op);
-
- /* Perform the test. */
- return (int) VerifyVersionInfo(
- &osvi,
- VER_MAJORVERSION | VER_MINORVERSION |
- VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR,
- condition_mask);
-}
-
-
-static int address_prefix_match(int family,
- struct sockaddr* address,
- struct sockaddr* prefix_address,
- int prefix_len) {
- uint8_t* address_data;
- uint8_t* prefix_address_data;
- int i;
-
- assert(address->sa_family == family);
- assert(prefix_address->sa_family == family);
-
- if (family == AF_INET6) {
- address_data = (uint8_t*) &(((struct sockaddr_in6 *) address)->sin6_addr);
- prefix_address_data =
- (uint8_t*) &(((struct sockaddr_in6 *) prefix_address)->sin6_addr);
- } else {
- address_data = (uint8_t*) &(((struct sockaddr_in *) address)->sin_addr);
- prefix_address_data =
- (uint8_t*) &(((struct sockaddr_in *) prefix_address)->sin_addr);
- }
-
- for (i = 0; i < prefix_len >> 3; i++) {
- if (address_data[i] != prefix_address_data[i])
- return 0;
- }
-
- if (prefix_len % 8)
- return prefix_address_data[i] ==
- (address_data[i] & (0xff << (8 - prefix_len % 8)));
-
- return 1;
-}
-
-
int uv_interface_addresses(uv_interface_address_t** addresses_ptr,
int* count_ptr) {
IP_ADAPTER_ADDRESSES* win_address_buf;
@@ -763,26 +750,13 @@
uv_interface_address_t* uv_address;
int count;
-
- int is_vista_or_greater;
ULONG flags;
*addresses_ptr = NULL;
*count_ptr = 0;
- is_vista_or_greater = is_windows_version_or_greater(6, 0, 0, 0);
- if (is_vista_or_greater) {
- flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
- GAA_FLAG_SKIP_DNS_SERVER;
- } else {
- /* We need at least XP SP1. */
- if (!is_windows_version_or_greater(5, 1, 1, 0))
- return UV_ENOTSUP;
-
- flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
- GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_PREFIX;
- }
-
+ flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
+ GAA_FLAG_SKIP_DNS_SERVER;
/* Fetch the size of the adapters reported by windows, and then get the list
* itself. */
@@ -947,37 +921,8 @@
sa = unicast_address->Address.lpSockaddr;
- /* XP has no OnLinkPrefixLength field. */
- if (is_vista_or_greater) {
- prefix_len =
- ((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength;
- } else {
- /* Prior to Windows Vista the FirstPrefix pointed to the list with
- * single prefix for each IP address assigned to the adapter.
- * Order of FirstPrefix does not match order of FirstUnicastAddress,
- * so we need to find corresponding prefix.
- */
- IP_ADAPTER_PREFIX* prefix;
- prefix_len = 0;
-
- for (prefix = adapter->FirstPrefix; prefix; prefix = prefix->Next) {
- /* We want the longest matching prefix. */
- if (prefix->Address.lpSockaddr->sa_family != sa->sa_family ||
- prefix->PrefixLength <= prefix_len)
- continue;
-
- if (address_prefix_match(sa->sa_family, sa,
- prefix->Address.lpSockaddr, prefix->PrefixLength)) {
- prefix_len = prefix->PrefixLength;
- }
- }
-
- /* If there is no matching prefix information, return a single-host
- * subnet mask (e.g. 255.255.255.255 for IPv4).
- */
- if (!prefix_len)
- prefix_len = (sa->sa_family == AF_INET6) ? 128 : 32;
- }
+ prefix_len =
+ ((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength;
memset(uv_address, 0, sizeof *uv_address);
@@ -1102,8 +1047,8 @@
if (r != UV_ENOENT)
return r;
- /* USERPROFILE is not set, so call uv__getpwuid_r() */
- r = uv__getpwuid_r(&pwd);
+ /* USERPROFILE is not set, so call uv_os_get_passwd() */
+ r = uv_os_get_passwd(&pwd);
if (r != 0) {
return r;
@@ -1190,17 +1135,6 @@
}
-void uv_os_free_passwd(uv_passwd_t* pwd) {
- if (pwd == NULL)
- return;
-
- uv__free(pwd->username);
- uv__free(pwd->homedir);
- pwd->username = NULL;
- pwd->homedir = NULL;
-}
-
-
/*
* Converts a UTF-16 string into a UTF-8 one. The resulting string is
* null-terminated.
@@ -1297,7 +1231,7 @@
}
-int uv__getpwuid_r(uv_passwd_t* pwd) {
+static int uv__getpwuid_r(uv_passwd_t* pwd) {
HANDLE token;
wchar_t username[UNLEN + 1];
wchar_t *path;
@@ -1375,6 +1309,16 @@
}
+int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
+ return UV_ENOTSUP;
+}
+
+
+int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
+ return UV_ENOTSUP;
+}
+
+
int uv_os_environ(uv_env_item_t** envitems, int* count) {
wchar_t* env;
wchar_t* penv;
@@ -1778,6 +1722,22 @@
RegCloseKey(registry_key);
if (r == ERROR_SUCCESS) {
+ /* Windows 11 shares dwMajorVersion with Windows 10
+ * this workaround tries to disambiguate that by checking
+ * if the dwBuildNumber is from Windows 11 releases (>= 22000).
+ *
+ * This workaround replaces the ProductName key value
+ * from "Windows 10 *" to "Windows 11 *" */
+ if (os_info.dwMajorVersion == 10 &&
+ os_info.dwBuildNumber >= 22000 &&
+ product_name_w_size >= ARRAY_SIZE(L"Windows 10")) {
+ /* If ProductName starts with "Windows 10" */
+ if (wcsncmp(product_name_w, L"Windows 10", ARRAY_SIZE(L"Windows 10") - 1) == 0) {
+ /* Bump 10 to 11 */
+ product_name_w[9] = '1';
+ }
+ }
+
version_size = WideCharToMultiByte(CP_UTF8,
0,
product_name_w,
diff --git a/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPConnector_parallel.cpp b/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPConnector_parallel.cpp
index 1d979cb..32654e9 100644
--- a/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPConnector_parallel.cpp
+++ b/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPConnector_parallel.cpp
@@ -15,14 +15,6 @@
using namespace wpi;
-// MSVC < 1900 doesn't have support for thread_local
-#if !defined(_MSC_VER) || _MSC_VER >= 1900
-// clang check for availability of thread_local
-#if !defined(__has_feature) || __has_feature(cxx_thread_local)
-#define HAVE_THREAD_LOCAL
-#endif
-#endif
-
std::unique_ptr<NetworkStream> TCPConnector::connect_parallel(
std::span<const std::pair<const char*, int>> servers, Logger& logger,
int timeout) {
@@ -33,18 +25,10 @@
// structure to make sure we don't start duplicate workers
struct GlobalState {
wpi::mutex mtx;
-#ifdef HAVE_THREAD_LOCAL
- SmallSet<std::pair<std::string, int>, 16> active;
-#else
SmallSet<std::tuple<std::thread::id, std::string, int>, 16> active;
-#endif
};
-#ifdef HAVE_THREAD_LOCAL
- thread_local auto global = std::make_shared<GlobalState>();
-#else
static auto global = std::make_shared<GlobalState>();
auto this_id = std::this_thread::get_id();
-#endif
auto local = global; // copy to an automatic variable for lambda capture
// structure shared between threads and this function
@@ -63,12 +47,8 @@
for (const auto& server : servers) {
std::pair<std::string, int> server_copy{std::string{server.first},
server.second};
-#ifdef HAVE_THREAD_LOCAL
- const auto& active_tracker = server_copy;
-#else
std::tuple<std::thread::id, std::string, int> active_tracker{
this_id, server_copy.first, server_copy.second};
-#endif
// don't start a new worker if we had a previously still-active connection
// attempt to the same server
diff --git a/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPStream.cpp b/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPStream.cpp
index 920f7b1..ccd7591 100644
--- a/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPStream.cpp
+++ b/wpinet/src/main/native/thirdparty/tcpsockets/cpp/TCPStream.cpp
@@ -36,6 +36,8 @@
#include <cerrno>
+#include <wpi/StringExtras.h>
+
using namespace wpi;
TCPStream::TCPStream(int sd, sockaddr_in* address)
@@ -85,12 +87,9 @@
}
if (!result) {
char Buffer[128];
-#ifdef _MSC_VER
- sprintf_s(Buffer, "Send() failed: WSA error=%d\n", WSAGetLastError());
-#else
- std::snprintf(Buffer, sizeof(Buffer), "Send() failed: WSA error=%d\n",
- WSAGetLastError());
-#endif
+ wpi::format_to_n_c_str(Buffer, sizeof(Buffer),
+ "Send() failed: WSA error={}\n", WSAGetLastError());
+
OutputDebugStringA(Buffer);
*err = kConnectionReset;
return 0;
diff --git a/wpinet/src/netconsoleServer/native/cpp/main.cpp b/wpinet/src/netconsoleServer/native/cpp/main.cpp
index 0271e36..29c1491 100644
--- a/wpinet/src/netconsoleServer/native/cpp/main.cpp
+++ b/wpinet/src/netconsoleServer/native/cpp/main.cpp
@@ -14,6 +14,7 @@
#include <wpi/MathExtras.h>
#include <wpi/SmallVector.h>
#include <wpi/StringExtras.h>
+#include <wpi/bit.h>
#include <wpi/timestamp.h>
#include "wpinet/raw_uv_ostream.h"
@@ -47,7 +48,8 @@
std::string_view toCopy = wpi::slice(str, 0, idx + 1);
if (tcp) {
// Header is 2 byte len, 1 byte type, 4 byte timestamp, 2 byte sequence num
- uint32_t ts = wpi::FloatToBits((wpi::Now() - startTime) * 1.0e-6);
+ uint32_t ts =
+ wpi::bit_cast<uint32_t, float>((wpi::Now() - startTime) * 1.0e-6);
uint16_t len = rem.size() + toCopy.size() + 1 + 4 + 2;
const uint8_t header[] = {static_cast<uint8_t>((len >> 8) & 0xff),
static_cast<uint8_t>(len & 0xff),
@@ -67,6 +69,10 @@
return true;
}
+// FIXME: clang-tidy reports a false positive for leaking a captured shared_ptr
+// (clang-analyzer-cplusplus.NewDeleteLeaks)
+
+// NOLINTBEGIN
static void CopyUdp(uv::Stream& in, std::shared_ptr<uv::Udp> out,
bool broadcast) {
sockaddr_in addr;
@@ -131,6 +137,7 @@
});
});
}
+// NOLINTEND
int main(int argc, char* argv[]) {
// parse arguments
diff --git a/wpinet/src/netconsoleTee/native/cpp/main.cpp b/wpinet/src/netconsoleTee/native/cpp/main.cpp
index a6bdff4..1028992 100644
--- a/wpinet/src/netconsoleTee/native/cpp/main.cpp
+++ b/wpinet/src/netconsoleTee/native/cpp/main.cpp
@@ -8,6 +8,7 @@
#include <wpi/MathExtras.h>
#include <wpi/SmallVector.h>
#include <wpi/StringExtras.h>
+#include <wpi/bit.h>
#include <wpi/timestamp.h>
#include "wpinet/raw_uv_ostream.h"
@@ -38,7 +39,8 @@
std::string_view toCopy = wpi::slice(str, 0, idx + 1);
if (tcp) {
// Header is 2 byte len, 1 byte type, 4 byte timestamp, 2 byte sequence num
- uint32_t ts = wpi::FloatToBits((wpi::Now() - startTime) * 1.0e-6);
+ uint32_t ts =
+ wpi::bit_cast<uint32_t, float>((wpi::Now() - startTime) * 1.0e-6);
uint16_t len = rem.size() + toCopy.size() + 1 + 4 + 2;
const uint8_t header[] = {static_cast<uint8_t>((len >> 8) & 0xff),
static_cast<uint8_t>(len & 0xff),
@@ -58,6 +60,10 @@
return true;
}
+// FIXME: clang-tidy reports a false positive for leaking a captured shared_ptr
+// (clang-analyzer-cplusplus.NewDeleteLeaks)
+
+// NOLINTBEGIN
static void CopyUdp(uv::Stream& in, std::shared_ptr<uv::Udp> out, int port,
bool broadcast) {
sockaddr_in addr;
@@ -110,6 +116,7 @@
},
out);
}
+// NOLINTEND
static void CopyStream(uv::Stream& in, std::shared_ptr<uv::Stream> out) {
in.data.connect([out](uv::Buffer& buf, size_t len) {
diff --git a/wpinet/src/test/native/cpp/HttpParserTest.cpp b/wpinet/src/test/native/cpp/HttpParserTest.cpp
index a9d927a..02a17d9 100644
--- a/wpinet/src/test/native/cpp/HttpParserTest.cpp
+++ b/wpinet/src/test/native/cpp/HttpParserTest.cpp
@@ -4,7 +4,7 @@
#include "wpinet/HttpParser.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h"
+#include <gtest/gtest.h>
namespace wpi {
diff --git a/wpinet/src/test/native/cpp/HttpUtilTest.cpp b/wpinet/src/test/native/cpp/HttpUtilTest.cpp
index 4417b7c..f36235b 100644
--- a/wpinet/src/test/native/cpp/HttpUtilTest.cpp
+++ b/wpinet/src/test/native/cpp/HttpUtilTest.cpp
@@ -4,7 +4,7 @@
#include "wpinet/HttpUtil.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h"
+#include <gtest/gtest.h>
namespace wpi {
diff --git a/wpinet/src/test/native/cpp/MulticastTest.cpp b/wpinet/src/test/native/cpp/MulticastTest.cpp
index bbec538..412dd48 100644
--- a/wpinet/src/test/native/cpp/MulticastTest.cpp
+++ b/wpinet/src/test/native/cpp/MulticastTest.cpp
@@ -12,10 +12,9 @@
#include <thread>
#include <utility>
+#include <gtest/gtest.h>
#include <wpi/timestamp.h>
-#include "gtest/gtest.h"
-
TEST(MulticastServiceAnnouncerTest, EmptyText) {
const std::string_view serviceName = "TestServiceNoText";
const std::string_view serviceType = "_wpinotxt";
diff --git a/wpinet/src/test/native/cpp/WebSocketIntegrationTest.cpp b/wpinet/src/test/native/cpp/WebSocketIntegrationTest.cpp
index 5f6c8a5..6b74d82 100644
--- a/wpinet/src/test/native/cpp/WebSocketIntegrationTest.cpp
+++ b/wpinet/src/test/native/cpp/WebSocketIntegrationTest.cpp
@@ -147,4 +147,56 @@
ASSERT_EQ(gotData, 1);
}
+TEST_F(WebSocketIntegrationTest, ServerSendPing) {
+ int gotPing = 0;
+ int gotPong = 0;
+ int gotData = 0;
+
+ serverPipe->Listen([&]() {
+ auto conn = serverPipe->Accept();
+ auto server = WebSocketServer::Create(*conn);
+ server->connected.connect([&](std::string_view, WebSocket& ws) {
+ ws.SendText({{"hello"}}, [&](auto, uv::Error) {});
+ ws.SendPing({uv::Buffer{"\x03\x04", 2}}, [&](auto, uv::Error) {});
+ ws.SendPing({uv::Buffer{"\x03\x04", 2}}, [&](auto, uv::Error) {});
+ ws.SendText({{"hello"}}, [&](auto, uv::Error) {});
+ ws.pong.connect([&](auto data) {
+ ++gotPong;
+ std::vector<uint8_t> recvData{data.begin(), data.end()};
+ std::vector<uint8_t> expectData{0x03, 0x04};
+ ASSERT_EQ(recvData, expectData);
+ if (gotPong == 2) {
+ ws.Close();
+ }
+ });
+ });
+ });
+
+ clientPipe->Connect(pipeName, [&] {
+ auto ws = WebSocket::CreateClient(*clientPipe, "/test", pipeName);
+ ws->closed.connect([&](uint16_t code, std::string_view reason) {
+ Finish();
+ if (code != 1005 && code != 1006) {
+ FAIL() << "Code: " << code << " Reason: " << reason;
+ }
+ });
+ ws->ping.connect([&](auto data) {
+ ++gotPing;
+ std::vector<uint8_t> recvData{data.begin(), data.end()};
+ std::vector<uint8_t> expectData{0x03, 0x04};
+ ASSERT_EQ(recvData, expectData);
+ });
+ ws->text.connect([&](std::string_view data, bool) {
+ ++gotData;
+ ASSERT_EQ(data, "hello");
+ });
+ });
+
+ loop->Run();
+
+ ASSERT_EQ(gotPing, 2);
+ ASSERT_EQ(gotPong, 2);
+ ASSERT_EQ(gotData, 2);
+}
+
} // namespace wpi
diff --git a/wpinet/src/test/native/cpp/WebSocketSerializerTest.cpp b/wpinet/src/test/native/cpp/WebSocketSerializerTest.cpp
new file mode 100644
index 0000000..6767a23
--- /dev/null
+++ b/wpinet/src/test/native/cpp/WebSocketSerializerTest.cpp
@@ -0,0 +1,379 @@
+// Copyright (c) FIRST and other WPILib contributors.
+// Open Source Software; you can modify and/or share it under the terms of
+// the WPILib BSD license file in the root directory of this project.
+
+#include "WebSocketSerializer.h" // NOLINT(build/include_order)
+
+#include <algorithm>
+#include <array>
+#include <ostream>
+#include <span>
+
+#include <gmock/gmock.h>
+#include <wpi/SpanMatcher.h>
+
+#include "WebSocketTest.h"
+#include "wpinet/uv/Buffer.h"
+
+using ::testing::_;
+using ::testing::AnyOf;
+using ::testing::ElementsAre;
+using ::testing::Field;
+using ::testing::Pointee;
+using ::testing::Return;
+
+namespace wpi::uv {
+inline bool operator==(const Buffer& lhs, const Buffer& rhs) {
+ return lhs.len == rhs.len &&
+ std::equal(lhs.base, lhs.base + lhs.len, rhs.base);
+}
+inline void PrintTo(const Buffer& buf, ::std::ostream* os) {
+ ::wpi::PrintTo(buf.bytes(), os);
+}
+} // namespace wpi::uv
+
+namespace wpi {
+inline bool operator==(const WebSocket::Frame& lhs,
+ const WebSocket::Frame& rhs) {
+ return lhs.opcode == rhs.opcode &&
+ std::equal(lhs.data.begin(), lhs.data.end(), rhs.data.begin());
+}
+inline void PrintTo(const WebSocket::Frame& frame, ::std::ostream* os) {
+ *os << frame.opcode << ": ";
+ ::wpi::PrintTo(frame.data, os);
+}
+} // namespace wpi
+
+namespace wpi::detail {
+
+class MockWebSocketWriteReq
+ : public std::enable_shared_from_this<MockWebSocketWriteReq>,
+ public detail::WebSocketWriteReqBase {
+ public:
+ explicit MockWebSocketWriteReq(
+ std::function<void(std::span<uv::Buffer>, uv::Error)> callback) {}
+};
+
+class MockStream {
+ public:
+ MOCK_METHOD(int, TryWrite, (std::span<const uv::Buffer>));
+ void Write(std::span<const uv::Buffer> bufs,
+ const std::shared_ptr<MockWebSocketWriteReq>& req) {
+ // std::cout << "Write(";
+ // PrintTo(bufs, &std::cout);
+ // std::cout << ")\n";
+ DoWrite(bufs, req);
+ }
+ MOCK_METHOD(void, DoWrite,
+ (std::span<const uv::Buffer> bufs,
+ const std::shared_ptr<MockWebSocketWriteReq>& req));
+};
+
+class WebSocketWriteReqTest : public ::testing::Test {
+ public:
+ WebSocketWriteReqTest() {
+ req->m_frames.m_bufs.emplace_back(m_buf0);
+ req->m_frames.m_bufs.emplace_back(m_buf1);
+ req->m_frames.m_bufs.emplace_back(m_buf2);
+ req->m_continueFrameOffs.emplace_back(5); // frame 0: first 2 buffers
+ req->m_continueFrameOffs.emplace_back(9); // frame 1: last buffer
+ }
+
+ std::shared_ptr<MockWebSocketWriteReq> req =
+ std::make_shared<MockWebSocketWriteReq>([](auto, auto) {});
+ ::testing::StrictMock<MockStream> stream;
+ static const uint8_t m_buf0[3];
+ static const uint8_t m_buf1[2];
+ static const uint8_t m_buf2[4];
+};
+
+const uint8_t WebSocketWriteReqTest::m_buf0[3] = {1, 2, 3};
+const uint8_t WebSocketWriteReqTest::m_buf1[2] = {4, 5};
+const uint8_t WebSocketWriteReqTest::m_buf2[4] = {6, 7, 8, 9};
+
+TEST_F(WebSocketWriteReqTest, ContinueDone) {
+ req->m_continueBufPos = 3;
+ ASSERT_EQ(req->Continue(stream, req), 0);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWriteComplete) {
+ EXPECT_CALL(stream, TryWrite(wpi::SpanEq(req->m_frames.m_bufs)))
+ .WillOnce(Return(9));
+ ASSERT_EQ(req->Continue(stream, req), 0);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWriteNoProgress) {
+ // if TryWrite returns 0
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(0));
+ // Write should get called for all of next frame - make forward progress
+ uv::Buffer remBufs[2] = {uv::Buffer{m_buf0}, uv::Buffer{m_buf1}};
+ EXPECT_CALL(stream,
+ DoWrite(wpi::SpanEq(std::span<const uv::Buffer>(remBufs)), _));
+ ASSERT_EQ(req->Continue(stream, req), 1);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWriteError) {
+ // if TryWrite returns -1, the error is passed along
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(-1));
+ ASSERT_EQ(req->Continue(stream, req), -1);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWritePartialMidFrameMidBuf1) {
+ // stop partway through buf 0
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(2));
+ // Write should get called for remainder of buf 0 and all of buf 1
+ uv::Buffer remBufs[2] = {uv::Buffer{&m_buf0[2], 1}, uv::Buffer{m_buf1}};
+ EXPECT_CALL(stream,
+ DoWrite(wpi::SpanEq(std::span<const uv::Buffer>(remBufs)), _));
+ ASSERT_EQ(req->Continue(stream, req), 1);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWritePartialMidFrameBufBoundary) {
+ // stop at end of buf 0
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(3));
+ // Write should get called for all of buf 1
+ uv::Buffer remBufs[1] = {uv::Buffer{m_buf1}};
+ EXPECT_CALL(stream,
+ DoWrite(wpi::SpanEq(std::span<const uv::Buffer>(remBufs)), _));
+ ASSERT_EQ(req->Continue(stream, req), 1);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWritePartialMidFrameMidBuf2) {
+ // stop partway through buf 1
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(4));
+ // Write should get called for remainder of buf 1
+ uv::Buffer remBufs[1] = {uv::Buffer{&m_buf1[1], 1}};
+ EXPECT_CALL(stream,
+ DoWrite(wpi::SpanEq(std::span<const uv::Buffer>(remBufs)), _));
+ ASSERT_EQ(req->Continue(stream, req), 1);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWritePartialFrameBoundary) {
+ // stop at end of buf 1
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(5));
+ // Write should get called for all of next frame
+ uv::Buffer remBufs[1] = {uv::Buffer{m_buf2}};
+ EXPECT_CALL(stream,
+ DoWrite(wpi::SpanEq(std::span<const uv::Buffer>(remBufs)), _));
+ ASSERT_EQ(req->Continue(stream, req), 1);
+}
+
+TEST_F(WebSocketWriteReqTest, ContinueTryWritePartialMidFrameMidBuf3) {
+ // stop partway through buf 2
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(6));
+ // Write should get called for remainder of buf 2
+ uv::Buffer remBufs[1] = {uv::Buffer{&m_buf2[1], 3}};
+ EXPECT_CALL(stream,
+ DoWrite(wpi::SpanEq(std::span<const uv::Buffer>(remBufs)), _));
+ ASSERT_EQ(req->Continue(stream, req), 1);
+}
+
+class WebSocketTrySendTest : public ::testing::Test {
+ public:
+ ::testing::StrictMock<MockStream> stream;
+ std::shared_ptr<MockWebSocketWriteReq> req;
+ static const std::array<uint8_t, 3> m_buf0data;
+ static const std::array<uint8_t, 2> m_buf1data;
+ static const std::array<uint8_t, 4> m_buf2data;
+ static const std::array<uint8_t, 4> m_buf3data;
+ static const std::array<uv::Buffer, 4> m_bufs;
+ static const std::array<uint8_t, 5> m_frame0data;
+ static const std::array<uv::Buffer, 2> m_frame0bufs;
+ static const std::array<uv::Buffer, 1> m_frame1bufs;
+ static const std::array<uv::Buffer, 1> m_frame2bufs;
+ static const std::array<WebSocket::Frame, 3> m_frames;
+ static const std::array<std::vector<uint8_t>, 3> m_serialized;
+ static const std::array<uv::Buffer, 3> m_frameHeaders;
+
+ int makeReqCalled = 0;
+ int callbackCalled = 0;
+ void CheckTrySendFrames(std::span<const uv::Buffer> expectCbBufs,
+ std::span<const WebSocket::Frame> expectRet,
+ int expectErr = 0);
+};
+
+const std::array<uint8_t, 3> WebSocketTrySendTest::m_buf0data{1, 2, 3};
+const std::array<uint8_t, 2> WebSocketTrySendTest::m_buf1data{4, 5};
+const std::array<uint8_t, 4> WebSocketTrySendTest::m_buf2data{6, 7, 8, 9};
+const std::array<uint8_t, 4> WebSocketTrySendTest::m_buf3data{10, 11, 12, 13};
+const std::array<uv::Buffer, 4> WebSocketTrySendTest::m_bufs{
+ uv::Buffer{m_buf0data}, uv::Buffer{m_buf1data}, uv::Buffer{m_buf2data},
+ uv::Buffer{m_buf3data}};
+const std::array<uint8_t, 5> WebSocketTrySendTest::m_frame0data{1, 2, 3, 4, 5};
+const std::array<uv::Buffer, 2> WebSocketTrySendTest::m_frame0bufs{m_bufs[0],
+ m_bufs[1]};
+const std::array<uv::Buffer, 1> WebSocketTrySendTest::m_frame1bufs{m_bufs[2]};
+const std::array<uv::Buffer, 1> WebSocketTrySendTest::m_frame2bufs{m_bufs[3]};
+const std::array<WebSocket::Frame, 3> WebSocketTrySendTest::m_frames{
+ WebSocket::Frame{WebSocket::Frame::kBinaryFragment, m_frame0bufs},
+ WebSocket::Frame{WebSocket::Frame::kBinary, m_frame1bufs},
+ WebSocket::Frame{WebSocket::Frame::kText, m_frame2bufs},
+};
+const std::array<std::vector<uint8_t>, 3> WebSocketTrySendTest::m_serialized{
+ WebSocketTest::BuildMessage(m_frames[0].opcode, false, false, m_frame0data),
+ WebSocketTest::BuildMessage(m_frames[1].opcode, true, false, m_buf2data),
+ WebSocketTest::BuildMessage(m_frames[2].opcode, true, false, m_buf3data),
+};
+const std::array<uv::Buffer, 3> WebSocketTrySendTest::m_frameHeaders{
+ uv::Buffer{m_serialized[0].data(),
+ m_serialized[0].size() - m_frame0data.size()},
+ uv::Buffer{m_serialized[1].data(),
+ m_serialized[1].size() - m_buf2data.size()},
+ uv::Buffer{m_serialized[2].data(),
+ m_serialized[2].size() - m_buf3data.size()},
+};
+
+void WebSocketTrySendTest::CheckTrySendFrames(
+ std::span<const uv::Buffer> expectCbBufs,
+ std::span<const WebSocket::Frame> expectRet, int expectErr) {
+ ASSERT_THAT(
+ TrySendFrames(
+ true, stream, m_frames,
+ [&](std::function<void(std::span<uv::Buffer>, uv::Error)>&& cb) {
+ ++makeReqCalled;
+ req = std::make_shared<MockWebSocketWriteReq>(std::move(cb));
+ return req;
+ },
+ [&](auto bufs, auto err) {
+ ++callbackCalled;
+ ASSERT_THAT(bufs,
+ SpanEq(std::span<const uv::Buffer>(expectCbBufs)));
+ ASSERT_EQ(err.code(), expectErr);
+ }),
+ SpanEq(expectRet));
+}
+
+TEST_F(WebSocketTrySendTest, ServerComplete) {
+ // if trywrite sends everything
+ EXPECT_CALL(stream, TryWrite(_))
+ .WillOnce(Return(m_serialized[0].size() + m_serialized[1].size() +
+ m_serialized[2].size()));
+ // return nothing, and call callback immediately
+ CheckTrySendFrames(m_bufs, {});
+ ASSERT_EQ(makeReqCalled, 0);
+ ASSERT_EQ(callbackCalled, 1);
+}
+
+TEST_F(WebSocketTrySendTest, ServerNoProgress) {
+ // if trywrite sends nothing
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(0));
+ // we should get all the frames back (the callback may be called with an empty
+ // set of buffers)
+ CheckTrySendFrames({}, m_frames);
+ ASSERT_EQ(makeReqCalled, 0);
+ ASSERT_THAT(callbackCalled, AnyOf(0, 1));
+}
+
+TEST_F(WebSocketTrySendTest, ServerError) {
+ // if TryWrite returns -1, the error is passed along
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(-1));
+ CheckTrySendFrames(m_bufs, m_frames, -1);
+ ASSERT_EQ(makeReqCalled, 0);
+ ASSERT_EQ(callbackCalled, 1);
+}
+
+TEST_F(WebSocketTrySendTest, ServerPartialMidFrameMidBuf0) {
+ // stop partway through buf 0 (first buf of frame 0)
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(m_frameHeaders[0].len + 2));
+ // Write should get called for remainder of buf 0 and all of buf 1
+ // buf 2 should get put into continuation because frame 0 is a fragment
+ // return will be frame 2 only
+ std::array<uv::Buffer, 2> remBufs{std::span{m_buf0data}.subspan(2),
+ m_bufs[1]};
+ std::array<uv::Buffer, 2> contBufs{m_frameHeaders[1], m_bufs[2]};
+ std::array<int, 1> contFrameOffs{static_cast<int>(m_serialized[1].size())};
+ EXPECT_CALL(stream, DoWrite(wpi::SpanEq(remBufs), _));
+ CheckTrySendFrames({}, std::span{m_frames}.subspan(2));
+ ASSERT_EQ(makeReqCalled, 1);
+ ASSERT_THAT(req->m_frames.m_bufs, SpanEq(contBufs));
+ ASSERT_EQ(req->m_continueBufPos, 0u);
+ ASSERT_EQ(req->m_continueFramePos, 0u);
+ ASSERT_THAT(req->m_continueFrameOffs, SpanEq(contFrameOffs));
+ ASSERT_EQ(callbackCalled, 0);
+}
+
+TEST_F(WebSocketTrySendTest, ServerPartialMidFrameBufBoundary) {
+ // stop at end of buf 0 (first buf of frame 0)
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(m_frameHeaders[0].len + 3));
+ // Write should get called for all of buf 1
+ // buf 2 should get put into continuation because frame 0 is a fragment
+ // return will be frame 2 only
+ std::array<uv::Buffer, 1> remBufs{m_bufs[1]};
+ std::array<uv::Buffer, 2> contBufs{m_frameHeaders[1], m_bufs[2]};
+ EXPECT_CALL(stream, DoWrite(wpi::SpanEq(remBufs), _));
+ CheckTrySendFrames({}, std::span{m_frames}.subspan(2));
+ ASSERT_EQ(makeReqCalled, 1);
+ ASSERT_THAT(req->m_frames.m_bufs, SpanEq(contBufs));
+ ASSERT_EQ(callbackCalled, 0);
+}
+
+TEST_F(WebSocketTrySendTest, ServerPartialMidFrameMidBuf1) {
+ // stop partway through buf 1 (second buf of frame 0)
+ EXPECT_CALL(stream, TryWrite(_)).WillOnce(Return(m_frameHeaders[0].len + 4));
+ // Write should get called for remainder of buf 1
+ // buf 2 should get put into continuation because frame 0 is a fragment
+ // return will be frame 2 only
+ std::array<uv::Buffer, 1> remBufs{std::span{m_buf1data}.subspan(1)};
+ std::array<uv::Buffer, 2> contBufs{m_frameHeaders[1], m_bufs[2]};
+ EXPECT_CALL(stream, DoWrite(wpi::SpanEq(remBufs), _));
+ CheckTrySendFrames({}, std::span{m_frames}.subspan(2));
+ ASSERT_EQ(makeReqCalled, 1);
+ ASSERT_THAT(req->m_frames.m_bufs, SpanEq(contBufs));
+ ASSERT_EQ(callbackCalled, 0);
+}
+
+TEST_F(WebSocketTrySendTest, ServerPartialFrameBoundary) {
+ // stop at end of buf 1 (end of frame 0)
+ EXPECT_CALL(stream, TryWrite(_))
+ .WillOnce(Return(m_frameHeaders[0].len + m_frameHeaders[1].len + 5));
+ // Write should get called for all of buf 2 because frame 0 is a fragment
+ // no continuation
+ // return will be frame 2 only
+ std::array<uv::Buffer, 1> remBufs{m_bufs[2]};
+ EXPECT_CALL(stream, DoWrite(wpi::SpanEq(remBufs), _));
+ CheckTrySendFrames({}, std::span{m_frames}.subspan(2));
+ ASSERT_EQ(makeReqCalled, 1);
+ ASSERT_TRUE(req->m_frames.m_bufs.empty());
+ ASSERT_EQ(callbackCalled, 0);
+}
+
+TEST_F(WebSocketTrySendTest, ServerPartialMidFrameMidBuf2) {
+ // stop partway through buf 2 (frame 1 buf)
+ EXPECT_CALL(stream, TryWrite(_))
+ .WillOnce(Return(m_frameHeaders[0].len + m_frameHeaders[1].len + 6));
+ // Write should get called for remainder of buf 2; no continuation
+ // return will be frame 2 only
+ std::array<uv::Buffer, 1> remBufs{std::span{m_buf2data}.subspan(1)};
+ EXPECT_CALL(stream, DoWrite(wpi::SpanEq(remBufs), _));
+ CheckTrySendFrames({}, std::span{m_frames}.subspan(2));
+ ASSERT_EQ(makeReqCalled, 1);
+ ASSERT_TRUE(req->m_frames.m_bufs.empty());
+ ASSERT_EQ(callbackCalled, 0);
+}
+
+TEST_F(WebSocketTrySendTest, ServerFrameBoundary) {
+ // stop at end of buf 2 (end of frame 1)
+ EXPECT_CALL(stream, TryWrite(_))
+ .WillOnce(Return(m_frameHeaders[0].len + m_frameHeaders[1].len + 9));
+ // call callback immediately for bufs 0-2 and return frame 2
+ CheckTrySendFrames(std::span{m_bufs}.subspan(0, 3),
+ std::span{m_frames}.subspan(2));
+ ASSERT_EQ(makeReqCalled, 0);
+ ASSERT_EQ(callbackCalled, 1);
+}
+
+TEST_F(WebSocketTrySendTest, ServerPartialLastFrame) {
+ // stop partway through buf 3
+ EXPECT_CALL(stream, TryWrite(_))
+ .WillOnce(Return(m_frameHeaders[0].len + m_frameHeaders[1].len +
+ m_frameHeaders[2].len + 10));
+ // Write called for remainder of buf 3; no continuation
+ std::array<uv::Buffer, 1> remBufs{std::span{m_buf3data}.subspan(1)};
+ EXPECT_CALL(stream, DoWrite(wpi::SpanEq(remBufs), _));
+ CheckTrySendFrames({}, {});
+ ASSERT_EQ(makeReqCalled, 1);
+ ASSERT_TRUE(req->m_frames.m_bufs.empty());
+ ASSERT_EQ(callbackCalled, 0);
+}
+
+} // namespace wpi::detail
diff --git a/wpinet/src/test/native/cpp/WebSocketServerTest.cpp b/wpinet/src/test/native/cpp/WebSocketServerTest.cpp
index 18dd50b..7d33fa2 100644
--- a/wpinet/src/test/native/cpp/WebSocketServerTest.cpp
+++ b/wpinet/src/test/native/cpp/WebSocketServerTest.cpp
@@ -396,7 +396,14 @@
std::vector<uint8_t> data2(4, 0x04);
std::vector<uint8_t> data3(4, 0x05);
std::vector<uint8_t> combData{data};
+#if __GNUC__ == 11
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif // __GNUC__ == 11
combData.insert(combData.end(), data2.begin(), data2.end());
+#if __GNUC__ == 11
+#pragma GCC diagnostic pop
+#endif // __GNUC__ == 11
combData.insert(combData.end(), data3.begin(), data3.end());
setupWebSocket = [&] {
@@ -471,6 +478,56 @@
ASSERT_EQ(gotCallback, 3);
}
+// Control frames can happen in the middle of a fragmented message
+TEST_F(WebSocketServerTest, ReceiveFragmentWithControl) {
+ int gotCallback = 0;
+ int gotPongCallback = 0;
+
+ std::vector<uint8_t> data(4, 0x03);
+ std::vector<uint8_t> data2(4, 0x04);
+ std::vector<uint8_t> data3(4, 0x05);
+ std::vector<uint8_t> data4(4, 0x06);
+ std::vector<uint8_t> combData{data};
+#if __GNUC__ == 11
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif // __GNUC__ == 11
+ combData.insert(combData.end(), data2.begin(), data2.end());
+#if __GNUC__ == 11
+#pragma GCC diagnostic pop
+#endif // __GNUC__ == 11
+ combData.insert(combData.end(), data4.begin(), data4.end());
+
+ setupWebSocket = [&] {
+ ws->binary.connect([&](auto inData, bool fin) {
+ ASSERT_TRUE(gotPongCallback);
+ ++gotCallback;
+ ws->Terminate();
+ ASSERT_TRUE(fin);
+ std::vector<uint8_t> recvData{inData.begin(), inData.end()};
+ ASSERT_EQ(combData, recvData);
+ });
+ ws->pong.connect([&](auto inData) {
+ ASSERT_FALSE(gotCallback);
+ ++gotPongCallback;
+ });
+ };
+
+ auto message = BuildMessage(0x02, false, true, data);
+ auto message2 = BuildMessage(0x00, false, true, data2);
+ auto message3 = BuildMessage(0x0a, true, true, data3);
+ auto message4 = BuildMessage(0x00, true, true, data4);
+ resp.headersComplete.connect([&](bool) {
+ clientPipe->Write({{message}, {message2}, {message3}, {message4}},
+ [&](auto bufs, uv::Error) {});
+ });
+
+ loop->Run();
+
+ ASSERT_EQ(gotCallback, 1);
+ ASSERT_EQ(gotPongCallback, 1);
+}
+
//
// Maximum message size is limited.
//
diff --git a/wpinet/src/test/native/cpp/WebSocketTest.h b/wpinet/src/test/native/cpp/WebSocketTest.h
index 903ce00..1c904a8 100644
--- a/wpinet/src/test/native/cpp/WebSocketTest.h
+++ b/wpinet/src/test/native/cpp/WebSocketTest.h
@@ -9,7 +9,8 @@
#include <span>
#include <vector>
-#include "gtest/gtest.h"
+#include <gtest/gtest.h>
+
#include "wpinet/uv/Loop.h"
#include "wpinet/uv/Pipe.h"
#include "wpinet/uv/Timer.h"
@@ -48,9 +49,7 @@
failTimer->Unreference();
}
- ~WebSocketTest() override {
- Finish();
- }
+ ~WebSocketTest() override { Finish(); }
void Finish() {
loop->Walk([](uv::Handle& it) { it.Close(); });
diff --git a/wpinet/src/test/native/cpp/WorkerThreadTest.cpp b/wpinet/src/test/native/cpp/WorkerThreadTest.cpp
index 8279cb1..d4f462d 100644
--- a/wpinet/src/test/native/cpp/WorkerThreadTest.cpp
+++ b/wpinet/src/test/native/cpp/WorkerThreadTest.cpp
@@ -4,10 +4,9 @@
#include "wpinet/WorkerThread.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h" // NOLINT(build/include_order)
-
#include <thread>
+#include <gtest/gtest.h>
#include <wpi/condition_variable.h>
#include <wpi/mutex.h>
diff --git a/wpinet/src/test/native/cpp/hostname.cpp b/wpinet/src/test/native/cpp/hostname.cpp
index 51a29a2..df7e2f0 100644
--- a/wpinet/src/test/native/cpp/hostname.cpp
+++ b/wpinet/src/test/native/cpp/hostname.cpp
@@ -4,11 +4,10 @@
#include "wpinet/hostname.h"
+#include <gtest/gtest.h>
#include <wpi/SmallString.h>
#include <wpi/SmallVector.h>
-#include "gtest/gtest.h"
-
namespace wpi {
TEST(HostNameTest, HostNameNotEmpty) {
ASSERT_NE(GetHostname(), "");
diff --git a/wpinet/src/test/native/cpp/main.cpp b/wpinet/src/test/native/cpp/main.cpp
index 09072ee..e993c1f 100644
--- a/wpinet/src/test/native/cpp/main.cpp
+++ b/wpinet/src/test/native/cpp/main.cpp
@@ -2,7 +2,7 @@
// Open Source Software; you can modify and/or share it under the terms of
// the WPILib BSD license file in the root directory of this project.
-#include "gtest/gtest.h"
+#include <gtest/gtest.h>
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
diff --git a/wpinet/src/test/native/cpp/raw_uv_stream_test.cpp b/wpinet/src/test/native/cpp/raw_uv_stream_test.cpp
index 541da5a..4b33b17 100644
--- a/wpinet/src/test/native/cpp/raw_uv_stream_test.cpp
+++ b/wpinet/src/test/native/cpp/raw_uv_stream_test.cpp
@@ -4,7 +4,7 @@
#include "wpinet/raw_uv_ostream.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h"
+#include <gtest/gtest.h>
namespace wpi {
diff --git a/wpinet/src/test/native/cpp/uv/UvAsyncFunctionTest.cpp b/wpinet/src/test/native/cpp/uv/UvAsyncFunctionTest.cpp
index 19c1faf..4369dc0 100644
--- a/wpinet/src/test/native/cpp/uv/UvAsyncFunctionTest.cpp
+++ b/wpinet/src/test/native/cpp/uv/UvAsyncFunctionTest.cpp
@@ -4,10 +4,10 @@
#include "wpinet/uv/AsyncFunction.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h" // NOLINT(build/include_order)
-
#include <thread>
+#include <gtest/gtest.h>
+
#include "wpinet/uv/Loop.h"
#include "wpinet/uv/Prepare.h"
diff --git a/wpinet/src/test/native/cpp/uv/UvAsyncTest.cpp b/wpinet/src/test/native/cpp/uv/UvAsyncTest.cpp
index 5dd0f76..ed4324c 100644
--- a/wpinet/src/test/native/cpp/uv/UvAsyncTest.cpp
+++ b/wpinet/src/test/native/cpp/uv/UvAsyncTest.cpp
@@ -25,11 +25,10 @@
#include "wpinet/uv/Async.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h" // NOLINT(build/include_order)
-
#include <atomic>
#include <thread>
+#include <gtest/gtest.h>
#include <wpi/mutex.h>
#include "wpinet/uv/Loop.h"
diff --git a/wpinet/src/test/native/cpp/uv/UvBufferTest.cpp b/wpinet/src/test/native/cpp/uv/UvBufferTest.cpp
index da6a63c..f349d51 100644
--- a/wpinet/src/test/native/cpp/uv/UvBufferTest.cpp
+++ b/wpinet/src/test/native/cpp/uv/UvBufferTest.cpp
@@ -4,7 +4,7 @@
#include "wpinet/uv/Buffer.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h" // NOLINT(build/include_order)
+#include <gtest/gtest.h>
namespace wpi::uv {
diff --git a/wpinet/src/test/native/cpp/uv/UvGetAddrInfoTest.cpp b/wpinet/src/test/native/cpp/uv/UvGetAddrInfoTest.cpp
index 5ad33b2..78bb4c2 100644
--- a/wpinet/src/test/native/cpp/uv/UvGetAddrInfoTest.cpp
+++ b/wpinet/src/test/native/cpp/uv/UvGetAddrInfoTest.cpp
@@ -25,7 +25,7 @@
#include "wpinet/uv/GetAddrInfo.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h" // NOLINT(build/include_order)
+#include <gtest/gtest.h>
#include "wpinet/uv/Loop.h"
diff --git a/wpinet/src/test/native/cpp/uv/UvGetNameInfoTest.cpp b/wpinet/src/test/native/cpp/uv/UvGetNameInfoTest.cpp
index 707f037..dfb813b 100644
--- a/wpinet/src/test/native/cpp/uv/UvGetNameInfoTest.cpp
+++ b/wpinet/src/test/native/cpp/uv/UvGetNameInfoTest.cpp
@@ -25,7 +25,7 @@
#include "wpinet/uv/GetNameInfo.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h" // NOLINT(build/include_order)
+#include <gtest/gtest.h>
#include "wpinet/uv/Loop.h"
diff --git a/wpinet/src/test/native/cpp/uv/UvLoopWalkTest.cpp b/wpinet/src/test/native/cpp/uv/UvLoopWalkTest.cpp
index eee0f99..3cb1c9f 100644
--- a/wpinet/src/test/native/cpp/uv/UvLoopWalkTest.cpp
+++ b/wpinet/src/test/native/cpp/uv/UvLoopWalkTest.cpp
@@ -25,7 +25,7 @@
#include "wpinet/uv/Loop.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h" // NOLINT(build/include_order)
+#include <gtest/gtest.h>
#include "wpinet/uv/Timer.h"
diff --git a/wpinet/src/test/native/cpp/uv/UvTimerTest.cpp b/wpinet/src/test/native/cpp/uv/UvTimerTest.cpp
index 7377ab1..ea6188b 100644
--- a/wpinet/src/test/native/cpp/uv/UvTimerTest.cpp
+++ b/wpinet/src/test/native/cpp/uv/UvTimerTest.cpp
@@ -4,7 +4,7 @@
#include "wpinet/uv/Timer.h" // NOLINT(build/include_order)
-#include "gtest/gtest.h"
+#include <gtest/gtest.h>
namespace wpi::uv {